Pull drm fixes from Simona Vetter:
 "Looks like stochastics conspired to make this one a bit bigger, but
  nothing scary at all. Also first examples of the new Link: tags, yay!

  Next week Dave should be back.

  Drivers:
   - mediatek: uaf in unbind, fixes -rc2 boot regression
   - radeon: devm conversion fixes
   - amdgpu: VPE idle handler, re-enable DM idle optimization, DCN3,
     SMU, vblank, HDP eDP, powerplay fixes for fiji/iceland
   - msm: bunch of gem error path fixes, gmu fw parsing fix, dpu fixes
   - intel: fix dmc/dc6 asserts on ADL-S
   - xe: fix xe_validation_guard(), wake device handling around gt reset
   - ast: fix display output on AST2300
   - etnaviv: fix gpu flush
   - imx: fix parallel bridge handling
   - nouveau: scheduler locking fix
   - panel: fixes for kingdisplay-kd097d04 and sitronix-st7789v

  Core Changes:
   - CI: disable broken sanity job
   - sysfb: fix NULL pointer access
   - sched: fix SIGKILL handling, locking for race condition
   - dma_fence: better timeline name for signalled fences"

* tag 'drm-fixes-2025-10-31' of https://gitlab.freedesktop.org/drm/kernel: (44 commits)
  drm/ast: Clear preserved bits from register output value
  drm/imx: parallel-display: add the bridge before attaching it
  drm/imx: parallel-display: convert to devm_drm_bridge_alloc() API
  drm/panel: kingdisplay-kd097d04: Disable EoTp
  drm/panel: sitronix-st7789v: fix sync flags for t28cp45tn89
  drm/xe: Do not wake device during a GT reset
  drm/xe: Fix uninitialized return value from xe_validation_guard()
  drm/msm/dpu: Fix adjusted mode clock check for 3d merge
  drm/msm/dpu: Disable broken YUV on QSEED2 hardware
  drm/msm/dpu: Require linear modifier for writeback framebuffers
  drm/msm/dpu: Fix pixel extension sub-sampling
  drm/msm/dpu: Disable scaling for unsupported scaler types
  drm/msm/dpu: Propagate error from dpu_assign_plane_resources
  drm/msm/dpu: Fix allocation of RGB SSPPs without scaling
  drm/msm: dsi: fix PLL init in bonded mode
  drm/i915/dmc: Clear HRR EVT_CTL/HTP to zero on ADL-S
  drm/amd/display: Fix incorrect return of vblank enable on unconfigured crtc
  drm/amd/display: Add HDR workaround for a specific eDP
  drm/amdgpu: fix SPDX header on cyan_skillfish_reg_init.c
  drm/amdgpu: fix SPDX header on irqsrcs_vcn_5_0.h
  ...
This commit is contained in:
Linus Torvalds
2025-10-31 14:47:02 -07:00
42 changed files with 234 additions and 128 deletions

View File

@@ -1141,7 +1141,7 @@ const char __rcu *dma_fence_timeline_name(struct dma_fence *fence)
"RCU protection is required for safe access to returned string");
if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
return fence->ops->get_driver_name(fence);
return fence->ops->get_timeline_name(fence);
else
return "signaled-timeline";
}

View File

@@ -1,4 +1,4 @@
// SPDX-License-Identifier: GPL-2.0
// SPDX-License-Identifier: MIT
/*
* Copyright 2025 Advanced Micro Devices, Inc.
*

View File

@@ -1,4 +1,4 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* SPDX-License-Identifier: MIT */
/*
* Copyright 2025 Advanced Micro Devices, Inc.
*

View File

@@ -322,6 +322,26 @@ static int vpe_early_init(struct amdgpu_ip_block *ip_block)
return 0;
}
static bool vpe_need_dpm0_at_power_down(struct amdgpu_device *adev)
{
switch (amdgpu_ip_version(adev, VPE_HWIP, 0)) {
case IP_VERSION(6, 1, 1):
return adev->pm.fw_version < 0x0a640500;
default:
return false;
}
}
static int vpe_get_dpm_level(struct amdgpu_device *adev)
{
struct amdgpu_vpe *vpe = &adev->vpe;
if (!adev->pm.dpm_enabled)
return 0;
return RREG32(vpe_get_reg_offset(vpe, 0, vpe->regs.dpm_request_lv));
}
static void vpe_idle_work_handler(struct work_struct *work)
{
struct amdgpu_device *adev =
@@ -329,11 +349,17 @@ static void vpe_idle_work_handler(struct work_struct *work)
unsigned int fences = 0;
fences += amdgpu_fence_count_emitted(&adev->vpe.ring);
if (fences)
goto reschedule;
if (fences == 0)
amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VPE, AMD_PG_STATE_GATE);
else
schedule_delayed_work(&adev->vpe.idle_work, VPE_IDLE_TIMEOUT);
if (vpe_need_dpm0_at_power_down(adev) && vpe_get_dpm_level(adev) != 0)
goto reschedule;
amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VPE, AMD_PG_STATE_GATE);
return;
reschedule:
schedule_delayed_work(&adev->vpe.idle_work, VPE_IDLE_TIMEOUT);
}
static int vpe_common_init(struct amdgpu_vpe *vpe)

View File

@@ -1,4 +1,4 @@
// SPDX-License-Identifier: GPL-2.0
// SPDX-License-Identifier: MIT
/*
* Copyright 2018 Advanced Micro Devices, Inc.
*

View File

@@ -248,6 +248,8 @@ static void amdgpu_dm_crtc_vblank_control_worker(struct work_struct *work)
struct vblank_control_work *vblank_work =
container_of(work, struct vblank_control_work, work);
struct amdgpu_display_manager *dm = vblank_work->dm;
struct amdgpu_device *adev = drm_to_adev(dm->ddev);
int r;
mutex_lock(&dm->dc_lock);
@@ -277,7 +279,16 @@ static void amdgpu_dm_crtc_vblank_control_worker(struct work_struct *work)
if (dm->active_vblank_irq_count == 0) {
dc_post_update_surfaces_to_stream(dm->dc);
r = amdgpu_dpm_pause_power_profile(adev, true);
if (r)
dev_warn(adev->dev, "failed to set default power profile mode\n");
dc_allow_idle_optimizations(dm->dc, true);
r = amdgpu_dpm_pause_power_profile(adev, false);
if (r)
dev_warn(adev->dev, "failed to restore the power profile mode\n");
}
mutex_unlock(&dm->dc_lock);
@@ -297,8 +308,12 @@ static inline int amdgpu_dm_crtc_set_vblank(struct drm_crtc *crtc, bool enable)
int irq_type;
int rc = 0;
if (acrtc->otg_inst == -1)
goto skip;
if (enable && !acrtc->base.enabled) {
drm_dbg_vbl(crtc->dev,
"Reject vblank enable on unconfigured CRTC %d (enabled=%d)\n",
acrtc->crtc_id, acrtc->base.enabled);
return -EINVAL;
}
irq_type = amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
@@ -383,7 +398,7 @@ static inline int amdgpu_dm_crtc_set_vblank(struct drm_crtc *crtc, bool enable)
return rc;
}
#endif
skip:
if (amdgpu_in_reset(adev))
return 0;

View File

@@ -83,6 +83,7 @@ static void apply_edid_quirks(struct drm_device *dev, struct edid *edid, struct
edid_caps->panel_patch.remove_sink_ext_caps = true;
break;
case drm_edid_encode_panel_id('S', 'D', 'C', 0x4154):
case drm_edid_encode_panel_id('S', 'D', 'C', 0x4171):
drm_dbg_driver(dev, "Disabling VSC on monitor with panel id %X\n", panel_id);
edid_caps->panel_patch.disable_colorimetry = true;
break;

View File

@@ -578,9 +578,6 @@ static void dpp3_power_on_blnd_lut(
dpp_base->ctx->dc->optimized_required = true;
dpp_base->deferred_reg_writes.bits.disable_blnd_lut = true;
}
} else {
REG_SET(CM_MEM_PWR_CTRL, 0,
BLNDGAM_MEM_PWR_FORCE, power_on == true ? 0 : 1);
}
}

View File

@@ -1,4 +1,4 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* SPDX-License-Identifier: MIT */
/*
* Copyright 2025 Advanced Micro Devices, Inc.
*

View File

@@ -1,4 +1,4 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* SPDX-License-Identifier: MIT */
/*
* Copyright 2024 Advanced Micro Devices, Inc. All rights reserved.

View File

@@ -2024,7 +2024,7 @@ static int fiji_init_smc_table(struct pp_hwmgr *hwmgr)
table->VoltageResponseTime = 0;
table->PhaseResponseTime = 0;
table->MemoryThermThrottleEnable = 1;
table->PCIeBootLinkLevel = 0; /* 0:Gen1 1:Gen2 2:Gen3*/
table->PCIeBootLinkLevel = (uint8_t) (data->dpm_table.pcie_speed_table.count);
table->PCIeGenInterval = 1;
table->VRConfig = 0;

View File

@@ -2028,7 +2028,7 @@ static int iceland_init_smc_table(struct pp_hwmgr *hwmgr)
table->VoltageResponseTime = 0;
table->PhaseResponseTime = 0;
table->MemoryThermThrottleEnable = 1;
table->PCIeBootLinkLevel = 0;
table->PCIeBootLinkLevel = (uint8_t) (data->dpm_table.pcie_speed_table.count);
table->PCIeGenInterval = 1;
result = iceland_populate_smc_svi2_config(hwmgr, table);

View File

@@ -969,7 +969,7 @@ int smu_cmn_update_table(struct smu_context *smu,
table_index);
uint32_t table_size;
int ret = 0;
if (!table_data || table_id >= SMU_TABLE_COUNT || table_id < 0)
if (!table_data || table_index >= SMU_TABLE_COUNT || table_id < 0)
return -EINVAL;
table_size = smu_table->tables[table_index].size;

View File

@@ -282,13 +282,13 @@ static inline void __ast_write8_i(void __iomem *addr, u32 reg, u8 index, u8 val)
__ast_write8(addr, reg + 1, val);
}
static inline void __ast_write8_i_masked(void __iomem *addr, u32 reg, u8 index, u8 read_mask,
static inline void __ast_write8_i_masked(void __iomem *addr, u32 reg, u8 index, u8 preserve_mask,
u8 val)
{
u8 tmp = __ast_read8_i_masked(addr, reg, index, read_mask);
u8 tmp = __ast_read8_i_masked(addr, reg, index, preserve_mask);
tmp |= val;
__ast_write8_i(addr, reg, index, tmp);
val &= ~preserve_mask;
__ast_write8_i(addr, reg, index, tmp | val);
}
static inline u32 ast_read32(struct ast_device *ast, u32 reg)

View File

@@ -280,7 +280,7 @@ sanity:
GIT_STRATEGY: none
script:
# ci-fairy check-commits --junit-xml=check-commits.xml
- ci-fairy check-merge-request --require-allow-collaboration --junit-xml=check-merge-request.xml
# - ci-fairy check-merge-request --require-allow-collaboration --junit-xml=check-merge-request.xml
- |
set -eu
image_tags=(

View File

@@ -310,8 +310,12 @@ EXPORT_SYMBOL(drm_gem_destroy_shadow_plane_state);
void __drm_gem_reset_shadow_plane(struct drm_plane *plane,
struct drm_shadow_plane_state *shadow_plane_state)
{
__drm_atomic_helper_plane_reset(plane, &shadow_plane_state->base);
drm_format_conv_state_init(&shadow_plane_state->fmtcnv_state);
if (shadow_plane_state) {
__drm_atomic_helper_plane_reset(plane, &shadow_plane_state->base);
drm_format_conv_state_init(&shadow_plane_state->fmtcnv_state);
} else {
__drm_atomic_helper_plane_reset(plane, NULL);
}
}
EXPORT_SYMBOL(__drm_gem_reset_shadow_plane);

View File

@@ -347,7 +347,7 @@ void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, u32 exec_state,
u32 link_target, link_dwords;
bool switch_context = gpu->exec_state != exec_state;
bool switch_mmu_context = gpu->mmu_context != mmu_context;
unsigned int new_flush_seq = READ_ONCE(gpu->mmu_context->flush_seq);
unsigned int new_flush_seq = READ_ONCE(mmu_context->flush_seq);
bool need_flush = switch_mmu_context || gpu->flush_seq != new_flush_seq;
bool has_blt = !!(gpu->identity.minor_features5 &
chipMinorFeatures5_BLT_ENGINE);

View File

@@ -546,6 +546,36 @@ static bool is_event_handler(struct intel_display *display,
REG_FIELD_GET(DMC_EVT_CTL_EVENT_ID_MASK, data) == event_id;
}
static bool fixup_dmc_evt(struct intel_display *display,
enum intel_dmc_id dmc_id,
i915_reg_t reg_ctl, u32 *data_ctl,
i915_reg_t reg_htp, u32 *data_htp)
{
if (!is_dmc_evt_ctl_reg(display, dmc_id, reg_ctl))
return false;
if (!is_dmc_evt_htp_reg(display, dmc_id, reg_htp))
return false;
/* make sure reg_ctl and reg_htp are for the same event */
if (i915_mmio_reg_offset(reg_ctl) - i915_mmio_reg_offset(DMC_EVT_CTL(display, dmc_id, 0)) !=
i915_mmio_reg_offset(reg_htp) - i915_mmio_reg_offset(DMC_EVT_HTP(display, dmc_id, 0)))
return false;
/*
* On ADL-S the HRR event handler is not restored after DC6.
* Clear it to zero from the beginning to avoid mismatches later.
*/
if (display->platform.alderlake_s && dmc_id == DMC_FW_MAIN &&
is_event_handler(display, dmc_id, MAINDMC_EVENT_VBLANK_A, reg_ctl, *data_ctl)) {
*data_ctl = 0;
*data_htp = 0;
return true;
}
return false;
}
static bool disable_dmc_evt(struct intel_display *display,
enum intel_dmc_id dmc_id,
i915_reg_t reg, u32 data)
@@ -1064,9 +1094,32 @@ static u32 parse_dmc_fw_header(struct intel_dmc *dmc,
for (i = 0; i < mmio_count; i++) {
dmc_info->mmioaddr[i] = _MMIO(mmioaddr[i]);
dmc_info->mmiodata[i] = mmiodata[i];
}
for (i = 0; i < mmio_count - 1; i++) {
u32 orig_mmiodata[2] = {
dmc_info->mmiodata[i],
dmc_info->mmiodata[i+1],
};
if (!fixup_dmc_evt(display, dmc_id,
dmc_info->mmioaddr[i], &dmc_info->mmiodata[i],
dmc_info->mmioaddr[i+1], &dmc_info->mmiodata[i+1]))
continue;
drm_dbg_kms(display->drm,
" mmio[%d]: 0x%x = 0x%x->0x%x (EVT_CTL)\n",
i, i915_mmio_reg_offset(dmc_info->mmioaddr[i]),
orig_mmiodata[0], dmc_info->mmiodata[i]);
drm_dbg_kms(display->drm,
" mmio[%d]: 0x%x = 0x%x->0x%x (EVT_HTP)\n",
i+1, i915_mmio_reg_offset(dmc_info->mmioaddr[i+1]),
orig_mmiodata[1], dmc_info->mmiodata[i+1]);
}
for (i = 0; i < mmio_count; i++) {
drm_dbg_kms(display->drm, " mmio[%d]: 0x%x = 0x%x%s%s\n",
i, mmioaddr[i], mmiodata[i],
i, i915_mmio_reg_offset(dmc_info->mmioaddr[i]), dmc_info->mmiodata[i],
is_dmc_evt_ctl_reg(display, dmc_id, dmc_info->mmioaddr[i]) ? " (EVT_CTL)" :
is_dmc_evt_htp_reg(display, dmc_id, dmc_info->mmioaddr[i]) ? " (EVT_HTP)" : "",
disable_dmc_evt(display, dmc_id, dmc_info->mmioaddr[i],

View File

@@ -25,19 +25,18 @@
struct imx_parallel_display_encoder {
struct drm_encoder encoder;
struct drm_bridge bridge;
struct imx_parallel_display *pd;
};
struct imx_parallel_display {
struct device *dev;
u32 bus_format;
struct drm_bridge *next_bridge;
struct drm_bridge bridge;
};
static inline struct imx_parallel_display *bridge_to_imxpd(struct drm_bridge *b)
{
return container_of(b, struct imx_parallel_display_encoder, bridge)->pd;
return container_of(b, struct imx_parallel_display, bridge);
}
static const u32 imx_pd_bus_fmts[] = {
@@ -195,15 +194,13 @@ static int imx_pd_bind(struct device *dev, struct device *master, void *data)
if (IS_ERR(imxpd_encoder))
return PTR_ERR(imxpd_encoder);
imxpd_encoder->pd = imxpd;
encoder = &imxpd_encoder->encoder;
bridge = &imxpd_encoder->bridge;
bridge = &imxpd->bridge;
ret = imx_drm_encoder_parse_of(drm, encoder, imxpd->dev->of_node);
if (ret)
return ret;
bridge->funcs = &imx_pd_bridge_funcs;
drm_bridge_attach(encoder, bridge, NULL, DRM_BRIDGE_ATTACH_NO_CONNECTOR);
connector = drm_bridge_connector_init(drm, encoder);
@@ -228,9 +225,10 @@ static int imx_pd_probe(struct platform_device *pdev)
u32 bus_format = 0;
const char *fmt;
imxpd = devm_kzalloc(dev, sizeof(*imxpd), GFP_KERNEL);
if (!imxpd)
return -ENOMEM;
imxpd = devm_drm_bridge_alloc(dev, struct imx_parallel_display, bridge,
&imx_pd_bridge_funcs);
if (IS_ERR(imxpd))
return PTR_ERR(imxpd);
/* port@1 is the output port */
imxpd->next_bridge = devm_drm_of_get_bridge(dev, np, 1, 0);
@@ -258,6 +256,8 @@ static int imx_pd_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, imxpd);
devm_drm_bridge_add(dev, &imxpd->bridge);
return component_add(dev, &imx_pd_ops);
}

View File

@@ -686,10 +686,6 @@ err_free:
for (i = 0; i < private->data->mmsys_dev_num; i++)
private->all_drm_private[i]->drm = NULL;
err_put_dev:
for (i = 0; i < private->data->mmsys_dev_num; i++) {
/* For device_find_child in mtk_drm_get_all_priv() */
put_device(private->all_drm_private[i]->dev);
}
put_device(private->mutex_dev);
return ret;
}
@@ -697,18 +693,12 @@ err_put_dev:
static void mtk_drm_unbind(struct device *dev)
{
struct mtk_drm_private *private = dev_get_drvdata(dev);
int i;
/* for multi mmsys dev, unregister drm dev in mmsys master */
if (private->drm_master) {
drm_dev_unregister(private->drm);
mtk_drm_kms_deinit(private->drm);
drm_dev_put(private->drm);
for (i = 0; i < private->data->mmsys_dev_num; i++) {
/* For device_find_child in mtk_drm_get_all_priv() */
put_device(private->all_drm_private[i]->dev);
}
put_device(private->mutex_dev);
}
private->mtk_drm_bound = false;

View File

@@ -780,6 +780,9 @@ static bool fw_block_mem(struct a6xx_gmu_bo *bo, const struct block_header *blk)
return true;
}
#define NEXT_BLK(blk) \
((const struct block_header *)((const char *)(blk) + sizeof(*(blk)) + (blk)->size))
static int a6xx_gmu_fw_load(struct a6xx_gmu *gmu)
{
struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
@@ -811,7 +814,7 @@ static int a6xx_gmu_fw_load(struct a6xx_gmu *gmu)
for (blk = (const struct block_header *) fw_image->data;
(const u8*) blk < fw_image->data + fw_image->size;
blk = (const struct block_header *) &blk->data[blk->size >> 2]) {
blk = NEXT_BLK(blk)) {
if (blk->size == 0)
continue;

View File

@@ -348,13 +348,6 @@ int adreno_fault_handler(struct msm_gpu *gpu, unsigned long iova, int flags,
return 0;
}
static bool
adreno_smmu_has_prr(struct msm_gpu *gpu)
{
struct adreno_smmu_priv *adreno_smmu = dev_get_drvdata(&gpu->pdev->dev);
return adreno_smmu && adreno_smmu->set_prr_addr;
}
int adreno_get_param(struct msm_gpu *gpu, struct msm_context *ctx,
uint32_t param, uint64_t *value, uint32_t *len)
{

View File

@@ -1545,6 +1545,9 @@ static enum drm_mode_status dpu_crtc_mode_valid(struct drm_crtc *crtc,
adjusted_mode_clk = dpu_core_perf_adjusted_mode_clk(mode->clock,
dpu_kms->perf.perf_cfg);
if (dpu_kms->catalog->caps->has_3d_merge)
adjusted_mode_clk /= 2;
/*
* The given mode, adjusted for the perf clock factor, should not exceed
* the max core clock rate

View File

@@ -267,8 +267,8 @@ static const u32 wb2_formats_rgb_yuv[] = {
.base = 0x200, .len = 0xa0,}, \
.csc_blk = {.name = "csc", \
.base = 0x320, .len = 0x100,}, \
.format_list = plane_formats_yuv, \
.num_formats = ARRAY_SIZE(plane_formats_yuv), \
.format_list = plane_formats, \
.num_formats = ARRAY_SIZE(plane_formats), \
.rotation_cfg = NULL, \
}

View File

@@ -500,13 +500,15 @@ static void _dpu_plane_setup_pixel_ext(struct dpu_hw_scaler3_cfg *scale_cfg,
int i;
for (i = 0; i < DPU_MAX_PLANES; i++) {
uint32_t w = src_w, h = src_h;
if (i == DPU_SSPP_COMP_1_2 || i == DPU_SSPP_COMP_2) {
src_w /= chroma_subsmpl_h;
src_h /= chroma_subsmpl_v;
w /= chroma_subsmpl_h;
h /= chroma_subsmpl_v;
}
pixel_ext->num_ext_pxls_top[i] = src_h;
pixel_ext->num_ext_pxls_left[i] = src_w;
pixel_ext->num_ext_pxls_top[i] = h;
pixel_ext->num_ext_pxls_left[i] = w;
}
}
@@ -740,7 +742,7 @@ static int dpu_plane_atomic_check_pipe(struct dpu_plane *pdpu,
* We already have verified scaling against platform limitations.
* Now check if the SSPP supports scaling at all.
*/
if (!sblk->scaler_blk.len &&
if (!(sblk->scaler_blk.len && pipe->sspp->ops.setup_scaler) &&
((drm_rect_width(&new_plane_state->src) >> 16 !=
drm_rect_width(&new_plane_state->dst)) ||
(drm_rect_height(&new_plane_state->src) >> 16 !=
@@ -1278,7 +1280,7 @@ int dpu_assign_plane_resources(struct dpu_global_state *global_state,
state, plane_state,
prev_adjacent_plane_state);
if (ret)
break;
return ret;
prev_adjacent_plane_state = plane_state;
}

View File

@@ -842,7 +842,7 @@ struct dpu_hw_sspp *dpu_rm_reserve_sspp(struct dpu_rm *rm,
if (!reqs->scale && !reqs->yuv)
hw_sspp = dpu_rm_try_sspp(rm, global_state, crtc, reqs, SSPP_TYPE_DMA);
if (!hw_sspp && reqs->scale)
if (!hw_sspp && !reqs->yuv)
hw_sspp = dpu_rm_try_sspp(rm, global_state, crtc, reqs, SSPP_TYPE_RGB);
if (!hw_sspp)
hw_sspp = dpu_rm_try_sspp(rm, global_state, crtc, reqs, SSPP_TYPE_VIG);

View File

@@ -72,6 +72,9 @@ static int dpu_wb_conn_atomic_check(struct drm_connector *connector,
DPU_ERROR("invalid fb w=%d, maxlinewidth=%u\n",
fb->width, dpu_wb_conn->maxlinewidth);
return -EINVAL;
} else if (fb->modifier != DRM_FORMAT_MOD_LINEAR) {
DPU_ERROR("unsupported fb modifier:%#llx\n", fb->modifier);
return -EINVAL;
}
return drm_atomic_helper_check_wb_connector_state(conn_state->connector, conn_state->state);

View File

@@ -109,7 +109,6 @@ struct msm_dsi_phy {
struct msm_dsi_dphy_timing timing;
const struct msm_dsi_phy_cfg *cfg;
void *tuning_cfg;
void *pll_data;
enum msm_dsi_phy_usecase usecase;
bool regulator_ldo_mode;

View File

@@ -426,11 +426,8 @@ static void dsi_pll_enable_pll_bias(struct dsi_pll_7nm *pll)
u32 data;
spin_lock_irqsave(&pll->pll_enable_lock, flags);
if (pll->pll_enable_cnt++) {
spin_unlock_irqrestore(&pll->pll_enable_lock, flags);
WARN_ON(pll->pll_enable_cnt == INT_MAX);
return;
}
pll->pll_enable_cnt++;
WARN_ON(pll->pll_enable_cnt == INT_MAX);
data = readl(pll->phy->base + REG_DSI_7nm_PHY_CMN_CTRL_0);
data |= DSI_7nm_PHY_CMN_CTRL_0_PLL_SHUTDOWNB;
@@ -876,7 +873,6 @@ static int dsi_pll_7nm_init(struct msm_dsi_phy *phy)
spin_lock_init(&pll_7nm->pll_enable_lock);
pll_7nm->phy = phy;
phy->pll_data = pll_7nm;
ret = pll_7nm_register(pll_7nm, phy->provided_clocks->hws);
if (ret) {
@@ -965,10 +961,8 @@ static int dsi_7nm_phy_enable(struct msm_dsi_phy *phy,
u32 const delay_us = 5;
u32 const timeout_us = 1000;
struct msm_dsi_dphy_timing *timing = &phy->timing;
struct dsi_pll_7nm *pll = phy->pll_data;
void __iomem *base = phy->base;
bool less_than_1500_mhz;
unsigned long flags;
u32 vreg_ctrl_0, vreg_ctrl_1, lane_ctrl0;
u32 glbl_pemph_ctrl_0;
u32 glbl_str_swi_cal_sel_ctrl, glbl_hstx_str_ctrl_0;
@@ -1090,13 +1084,10 @@ static int dsi_7nm_phy_enable(struct msm_dsi_phy *phy,
glbl_rescode_bot_ctrl = 0x3c;
}
spin_lock_irqsave(&pll->pll_enable_lock, flags);
pll->pll_enable_cnt = 1;
/* de-assert digital and pll power down */
data = DSI_7nm_PHY_CMN_CTRL_0_DIGTOP_PWRDN_B |
DSI_7nm_PHY_CMN_CTRL_0_PLL_SHUTDOWNB;
writel(data, base + REG_DSI_7nm_PHY_CMN_CTRL_0);
spin_unlock_irqrestore(&pll->pll_enable_lock, flags);
/* Assert PLL core reset */
writel(0x00, base + REG_DSI_7nm_PHY_CMN_PLL_CNTRL);
@@ -1209,9 +1200,7 @@ static bool dsi_7nm_set_continuous_clock(struct msm_dsi_phy *phy, bool enable)
static void dsi_7nm_phy_disable(struct msm_dsi_phy *phy)
{
struct dsi_pll_7nm *pll = phy->pll_data;
void __iomem *base = phy->base;
unsigned long flags;
u32 data;
DBG("");
@@ -1238,11 +1227,8 @@ static void dsi_7nm_phy_disable(struct msm_dsi_phy *phy)
writel(data, base + REG_DSI_7nm_PHY_CMN_CTRL_0);
writel(0, base + REG_DSI_7nm_PHY_CMN_LANE_CTRL0);
spin_lock_irqsave(&pll->pll_enable_lock, flags);
pll->pll_enable_cnt = 0;
/* Turn off all PHY blocks */
writel(0x00, base + REG_DSI_7nm_PHY_CMN_CTRL_0);
spin_unlock_irqrestore(&pll->pll_enable_lock, flags);
/* make sure phy is turned off */
wmb();

View File

@@ -1120,12 +1120,16 @@ static void msm_gem_free_object(struct drm_gem_object *obj)
put_pages(obj);
}
if (obj->resv != &obj->_resv) {
/*
* In error paths, we could end up here before msm_gem_new_handle()
* has changed obj->resv to point to the shared resv. In this case,
* we don't want to drop a ref to the shared r_obj that we haven't
* taken yet.
*/
if ((msm_obj->flags & MSM_BO_NO_SHARE) && (obj->resv != &obj->_resv)) {
struct drm_gem_object *r_obj =
container_of(obj->resv, struct drm_gem_object, _resv);
WARN_ON(!(msm_obj->flags & MSM_BO_NO_SHARE));
/* Drop reference we hold to shared resv obj: */
drm_gem_object_put(r_obj);
}

View File

@@ -414,6 +414,11 @@ static void submit_attach_object_fences(struct msm_gem_submit *submit)
submit->user_fence,
DMA_RESV_USAGE_BOOKKEEP,
DMA_RESV_USAGE_BOOKKEEP);
last_fence = vm->last_fence;
vm->last_fence = dma_fence_unwrap_merge(submit->user_fence, last_fence);
dma_fence_put(last_fence);
return;
}
@@ -427,10 +432,6 @@ static void submit_attach_object_fences(struct msm_gem_submit *submit)
dma_resv_add_fence(obj->resv, submit->user_fence,
DMA_RESV_USAGE_READ);
}
last_fence = vm->last_fence;
vm->last_fence = dma_fence_unwrap_merge(submit->user_fence, last_fence);
dma_fence_put(last_fence);
}
static int submit_bo(struct msm_gem_submit *submit, uint32_t idx,

View File

@@ -971,6 +971,7 @@ static int
lookup_op(struct msm_vm_bind_job *job, const struct drm_msm_vm_bind_op *op)
{
struct drm_device *dev = job->vm->drm;
struct msm_drm_private *priv = dev->dev_private;
int i = job->nr_ops++;
int ret = 0;
@@ -1017,6 +1018,11 @@ lookup_op(struct msm_vm_bind_job *job, const struct drm_msm_vm_bind_op *op)
break;
}
if ((op->op == MSM_VM_BIND_OP_MAP_NULL) &&
!adreno_smmu_has_prr(priv->gpu)) {
ret = UERR(EINVAL, dev, "PRR not supported\n");
}
return ret;
}
@@ -1421,7 +1427,7 @@ msm_ioctl_vm_bind(struct drm_device *dev, void *data, struct drm_file *file)
* Maybe we could allow just UNMAP ops? OTOH userspace should just
* immediately close the device file and all will be torn down.
*/
if (to_msm_vm(ctx->vm)->unusable)
if (to_msm_vm(msm_context_vm(dev, ctx))->unusable)
return UERR(EPIPE, dev, "context is unusable");
/*

View File

@@ -299,6 +299,17 @@ static inline struct msm_gpu *dev_to_gpu(struct device *dev)
return container_of(adreno_smmu, struct msm_gpu, adreno_smmu);
}
static inline bool
adreno_smmu_has_prr(struct msm_gpu *gpu)
{
struct adreno_smmu_priv *adreno_smmu = dev_get_drvdata(&gpu->pdev->dev);
if (!adreno_smmu)
return false;
return adreno_smmu && adreno_smmu->set_prr_addr;
}
/* It turns out that all targets use the same ringbuffer size */
#define MSM_GPU_RINGBUFFER_SZ SZ_32K
#define MSM_GPU_RINGBUFFER_BLKSIZE 32

View File

@@ -338,6 +338,8 @@ msm_iommu_pagetable_prealloc_allocate(struct msm_mmu *mmu, struct msm_mmu_preall
ret = kmem_cache_alloc_bulk(pt_cache, GFP_KERNEL, p->count, p->pages);
if (ret != p->count) {
kfree(p->pages);
p->pages = NULL;
p->count = ret;
return -ENOMEM;
}
@@ -351,6 +353,9 @@ msm_iommu_pagetable_prealloc_cleanup(struct msm_mmu *mmu, struct msm_mmu_preallo
struct kmem_cache *pt_cache = get_pt_cache(mmu);
uint32_t remaining_pt_count = p->count - p->ptr;
if (!p->pages)
return;
if (p->count > 0)
trace_msm_mmu_prealloc_cleanup(p->count, remaining_pt_count);

View File

@@ -482,6 +482,17 @@ nouveau_sched_create(struct nouveau_sched **psched, struct nouveau_drm *drm,
return 0;
}
static bool
nouveau_sched_job_list_empty(struct nouveau_sched *sched)
{
bool empty;
spin_lock(&sched->job.list.lock);
empty = list_empty(&sched->job.list.head);
spin_unlock(&sched->job.list.lock);
return empty;
}
static void
nouveau_sched_fini(struct nouveau_sched *sched)
@@ -489,8 +500,7 @@ nouveau_sched_fini(struct nouveau_sched *sched)
struct drm_gpu_scheduler *drm_sched = &sched->base;
struct drm_sched_entity *entity = &sched->entity;
rmb(); /* for list_empty to work without lock */
wait_event(sched->job.wq, list_empty(&sched->job.list.head));
wait_event(sched->job.wq, nouveau_sched_job_list_empty(sched));
drm_sched_entity_fini(entity);
drm_sched_fini(drm_sched);

View File

@@ -359,7 +359,7 @@ static int kingdisplay_panel_probe(struct mipi_dsi_device *dsi)
dsi->lanes = 4;
dsi->format = MIPI_DSI_FMT_RGB888;
dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST |
MIPI_DSI_MODE_LPM;
MIPI_DSI_MODE_LPM | MIPI_DSI_MODE_NO_EOT_PACKET;
kingdisplay = devm_drm_panel_alloc(&dsi->dev, __typeof(*kingdisplay), base,
&kingdisplay_panel_funcs,

View File

@@ -249,6 +249,11 @@ static const struct drm_display_mode default_mode = {
.flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC,
};
/*
* The mode data for this panel has been reverse engineered without access
* to the panel datasheet / manual. Using DRM_MODE_FLAG_PHSYNC like all
* other panels results in garbage data on the display.
*/
static const struct drm_display_mode t28cp45tn89_mode = {
.clock = 6008,
.hdisplay = 240,
@@ -261,7 +266,7 @@ static const struct drm_display_mode t28cp45tn89_mode = {
.vtotal = 320 + 8 + 4 + 4,
.width_mm = 43,
.height_mm = 57,
.flags = DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC,
.flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC,
};
static const struct drm_display_mode et028013dma_mode = {

View File

@@ -314,17 +314,17 @@ static int radeon_pci_probe(struct pci_dev *pdev,
ret = pci_enable_device(pdev);
if (ret)
goto err_free;
return ret;
pci_set_drvdata(pdev, ddev);
ret = radeon_driver_load_kms(ddev, flags);
if (ret)
goto err_agp;
goto err;
ret = drm_dev_register(ddev, flags);
if (ret)
goto err_agp;
goto err;
if (rdev->mc.real_vram_size <= (8 * 1024 * 1024))
format = drm_format_info(DRM_FORMAT_C8);
@@ -337,30 +337,14 @@ static int radeon_pci_probe(struct pci_dev *pdev,
return 0;
err_agp:
err:
pci_disable_device(pdev);
err_free:
drm_dev_put(ddev);
return ret;
}
static void
radeon_pci_remove(struct pci_dev *pdev)
{
struct drm_device *dev = pci_get_drvdata(pdev);
drm_put_dev(dev);
}
static void
radeon_pci_shutdown(struct pci_dev *pdev)
{
/* if we are running in a VM, make sure the device
* torn down properly on reboot/shutdown
*/
if (radeon_device_is_virtual())
radeon_pci_remove(pdev);
#if defined(CONFIG_PPC64) || defined(CONFIG_MACH_LOONGSON64)
/*
* Some adapters need to be suspended before a
@@ -613,7 +597,6 @@ static struct pci_driver radeon_kms_pci_driver = {
.name = DRIVER_NAME,
.id_table = pciidlist,
.probe = radeon_pci_probe,
.remove = radeon_pci_remove,
.shutdown = radeon_pci_shutdown,
.driver.pm = &radeon_pm_ops,
};

View File

@@ -84,7 +84,6 @@ void radeon_driver_unload_kms(struct drm_device *dev)
rdev->agp = NULL;
done_free:
kfree(rdev);
dev->dev_private = NULL;
}

View File

@@ -70,6 +70,7 @@ int drm_sched_entity_init(struct drm_sched_entity *entity,
entity->guilty = guilty;
entity->num_sched_list = num_sched_list;
entity->priority = priority;
entity->last_user = current->group_leader;
/*
* It's perfectly valid to initialize an entity without having a valid
* scheduler attached. It's just not valid to use the scheduler before it
@@ -302,7 +303,7 @@ long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout)
/* For a killed process disallow further enqueueing of jobs. */
last_user = cmpxchg(&entity->last_user, current->group_leader, NULL);
if ((!last_user || last_user == current->group_leader) &&
if (last_user == current->group_leader &&
(current->flags & PF_EXITING) && (current->exit_code == SIGKILL))
drm_sched_entity_kill(entity);
@@ -552,10 +553,11 @@ void drm_sched_entity_select_rq(struct drm_sched_entity *entity)
drm_sched_rq_remove_entity(entity->rq, entity);
entity->rq = rq;
}
spin_unlock(&entity->lock);
if (entity->num_sched_list == 1)
entity->sched_list = NULL;
spin_unlock(&entity->lock);
}
/**

View File

@@ -813,12 +813,16 @@ static int gt_reset(struct xe_gt *gt)
unsigned int fw_ref;
int err;
if (xe_device_wedged(gt_to_xe(gt)))
return -ECANCELED;
if (xe_device_wedged(gt_to_xe(gt))) {
err = -ECANCELED;
goto err_pm_put;
}
/* We only support GT resets with GuC submission */
if (!xe_device_uc_enabled(gt_to_xe(gt)))
return -ENODEV;
if (!xe_device_uc_enabled(gt_to_xe(gt))) {
err = -ENODEV;
goto err_pm_put;
}
xe_gt_info(gt, "reset started\n");
@@ -826,8 +830,6 @@ static int gt_reset(struct xe_gt *gt)
if (!err)
xe_gt_warn(gt, "reset block failed to get lifted");
xe_pm_runtime_get(gt_to_xe(gt));
if (xe_fault_inject_gt_reset()) {
err = -ECANCELED;
goto err_fail;
@@ -874,6 +876,7 @@ err_fail:
xe_gt_err(gt, "reset failed (%pe)\n", ERR_PTR(err));
xe_device_declare_wedged(gt_to_xe(gt));
err_pm_put:
xe_pm_runtime_put(gt_to_xe(gt));
return err;
@@ -895,7 +898,9 @@ void xe_gt_reset_async(struct xe_gt *gt)
return;
xe_gt_info(gt, "reset queued\n");
queue_work(gt->ordered_wq, &gt->reset.worker);
xe_pm_runtime_get_noresume(gt_to_xe(gt));
if (!queue_work(gt->ordered_wq, &gt->reset.worker))
xe_pm_runtime_put(gt_to_xe(gt));
}
void xe_gt_suspend_prepare(struct xe_gt *gt)

View File

@@ -166,10 +166,10 @@ xe_validation_device_init(struct xe_validation_device *val)
*/
DEFINE_CLASS(xe_validation, struct xe_validation_ctx *,
if (_T) xe_validation_ctx_fini(_T);,
({_ret = xe_validation_ctx_init(_ctx, _val, _exec, _flags);
_ret ? NULL : _ctx; }),
({*_ret = xe_validation_ctx_init(_ctx, _val, _exec, _flags);
*_ret ? NULL : _ctx; }),
struct xe_validation_ctx *_ctx, struct xe_validation_device *_val,
struct drm_exec *_exec, const struct xe_val_flags _flags, int _ret);
struct drm_exec *_exec, const struct xe_val_flags _flags, int *_ret);
static inline void *class_xe_validation_lock_ptr(class_xe_validation_t *_T)
{return *_T; }
#define class_xe_validation_is_conditional true
@@ -186,7 +186,7 @@ static inline void *class_xe_validation_lock_ptr(class_xe_validation_t *_T)
* exhaustive eviction.
*/
#define xe_validation_guard(_ctx, _val, _exec, _flags, _ret) \
scoped_guard(xe_validation, _ctx, _val, _exec, _flags, _ret) \
scoped_guard(xe_validation, _ctx, _val, _exec, _flags, &_ret) \
drm_exec_until_all_locked(_exec)
#endif