mirror of
https://github.com/torvalds/linux.git
synced 2025-12-07 20:06:24 +00:00
Merge tag 'drm-fixes-2023-10-20' of git://anongit.freedesktop.org/drm/drm
Pull drm fixes from Dave Airlie: "Regular fixes for the week, amdgpu, i915, nouveau, with some other scattered around, nothing major. amdgpu: - Fix possible NULL pointer dereference - Avoid possible BUG_ON in GPUVM updates - Disable AMD_CTX_PRIORITY_UNSET i915: - Fix display issue that was blocking S0ix - Retry gtt fault when out of fence registers bridge: - ti-sn65dsi86: Fix device lifetime edid: - Add quirk for BenQ GW2765 ivpu: - Extend address range for MMU mmap nouveau: - DP-connector fixes - Documentation fixes panel: - Move AUX B116XW03 into panel-simple scheduler: - Eliminate DRM_SCHED_PRIORITY_UNSET ttm: - Fix possible NULL-ptr deref in cleanup mediatek: - Correctly free sg_table in gem prime vmap" * tag 'drm-fixes-2023-10-20' of git://anongit.freedesktop.org/drm/drm: drm/amdgpu: Reserve fences for VM update drm/amdgpu: Fix possible null pointer dereference accel/ivpu: Extend address range for MMU mmap Revert "accel/ivpu: Use cached buffers for FW loading" accel/ivpu: Don't enter d0i3 during FLR drm/i915: Retry gtt fault when out of fence registers drm/i915/cx0: Only clear/set the Pipe Reset bit of the PHY Lanes Owned gpu/drm: Eliminate DRM_SCHED_PRIORITY_UNSET drm/amdgpu: Unset context priority is now invalid drm/mediatek: Correctly free sg_table in gem prime vmap drm/edid: add 8 bpc quirk to the BenQ GW2765 drm/ttm: Reorder sys manager cleanup step drm/nouveau/disp: fix DP capable DSM connectors drm/nouveau: exec: fix ioctl kernel-doc warning drm/panel: Move AUX B116XW03 out of panel-edp back to panel-simple drm/bridge: ti-sn65dsi86: Associate DSI device lifetime with auxiliary device
This commit is contained in:
@@ -367,14 +367,19 @@ int ivpu_boot(struct ivpu_device *vdev)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
int ivpu_shutdown(struct ivpu_device *vdev)
|
void ivpu_prepare_for_reset(struct ivpu_device *vdev)
|
||||||
{
|
{
|
||||||
int ret;
|
|
||||||
|
|
||||||
ivpu_hw_irq_disable(vdev);
|
ivpu_hw_irq_disable(vdev);
|
||||||
disable_irq(vdev->irq);
|
disable_irq(vdev->irq);
|
||||||
ivpu_ipc_disable(vdev);
|
ivpu_ipc_disable(vdev);
|
||||||
ivpu_mmu_disable(vdev);
|
ivpu_mmu_disable(vdev);
|
||||||
|
}
|
||||||
|
|
||||||
|
int ivpu_shutdown(struct ivpu_device *vdev)
|
||||||
|
{
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
ivpu_prepare_for_reset(vdev);
|
||||||
|
|
||||||
ret = ivpu_hw_power_down(vdev);
|
ret = ivpu_hw_power_down(vdev);
|
||||||
if (ret)
|
if (ret)
|
||||||
|
|||||||
@@ -151,6 +151,7 @@ void ivpu_file_priv_put(struct ivpu_file_priv **link);
|
|||||||
|
|
||||||
int ivpu_boot(struct ivpu_device *vdev);
|
int ivpu_boot(struct ivpu_device *vdev);
|
||||||
int ivpu_shutdown(struct ivpu_device *vdev);
|
int ivpu_shutdown(struct ivpu_device *vdev);
|
||||||
|
void ivpu_prepare_for_reset(struct ivpu_device *vdev);
|
||||||
|
|
||||||
static inline u8 ivpu_revision(struct ivpu_device *vdev)
|
static inline u8 ivpu_revision(struct ivpu_device *vdev)
|
||||||
{
|
{
|
||||||
|
|||||||
@@ -220,8 +220,7 @@ static int ivpu_fw_mem_init(struct ivpu_device *vdev)
|
|||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
fw->mem = ivpu_bo_alloc_internal(vdev, fw->runtime_addr, fw->runtime_size,
|
fw->mem = ivpu_bo_alloc_internal(vdev, fw->runtime_addr, fw->runtime_size, DRM_IVPU_BO_WC);
|
||||||
DRM_IVPU_BO_CACHED | DRM_IVPU_BO_NOSNOOP);
|
|
||||||
if (!fw->mem) {
|
if (!fw->mem) {
|
||||||
ivpu_err(vdev, "Failed to allocate firmware runtime memory\n");
|
ivpu_err(vdev, "Failed to allocate firmware runtime memory\n");
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
@@ -331,7 +330,7 @@ int ivpu_fw_load(struct ivpu_device *vdev)
|
|||||||
memset(start, 0, size);
|
memset(start, 0, size);
|
||||||
}
|
}
|
||||||
|
|
||||||
clflush_cache_range(fw->mem->kvaddr, fw->mem->base.size);
|
wmb(); /* Flush WC buffers after writing fw->mem */
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@@ -433,7 +432,7 @@ void ivpu_fw_boot_params_setup(struct ivpu_device *vdev, struct vpu_boot_params
|
|||||||
if (!ivpu_fw_is_cold_boot(vdev)) {
|
if (!ivpu_fw_is_cold_boot(vdev)) {
|
||||||
boot_params->save_restore_ret_address = 0;
|
boot_params->save_restore_ret_address = 0;
|
||||||
vdev->pm->is_warmboot = true;
|
vdev->pm->is_warmboot = true;
|
||||||
clflush_cache_range(vdev->fw->mem->kvaddr, SZ_4K);
|
wmb(); /* Flush WC buffers after writing save_restore_ret_address */
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -495,7 +494,7 @@ void ivpu_fw_boot_params_setup(struct ivpu_device *vdev, struct vpu_boot_params
|
|||||||
boot_params->punit_telemetry_sram_size = ivpu_hw_reg_telemetry_size_get(vdev);
|
boot_params->punit_telemetry_sram_size = ivpu_hw_reg_telemetry_size_get(vdev);
|
||||||
boot_params->vpu_telemetry_enable = ivpu_hw_reg_telemetry_enable_get(vdev);
|
boot_params->vpu_telemetry_enable = ivpu_hw_reg_telemetry_enable_get(vdev);
|
||||||
|
|
||||||
clflush_cache_range(vdev->fw->mem->kvaddr, SZ_4K);
|
wmb(); /* Flush WC buffers after writing bootparams */
|
||||||
|
|
||||||
ivpu_fw_boot_params_print(vdev, boot_params);
|
ivpu_fw_boot_params_print(vdev, boot_params);
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -8,8 +8,6 @@
|
|||||||
#include <drm/drm_gem.h>
|
#include <drm/drm_gem.h>
|
||||||
#include <drm/drm_mm.h>
|
#include <drm/drm_mm.h>
|
||||||
|
|
||||||
#define DRM_IVPU_BO_NOSNOOP 0x10000000
|
|
||||||
|
|
||||||
struct dma_buf;
|
struct dma_buf;
|
||||||
struct ivpu_bo_ops;
|
struct ivpu_bo_ops;
|
||||||
struct ivpu_file_priv;
|
struct ivpu_file_priv;
|
||||||
@@ -85,9 +83,6 @@ static inline u32 ivpu_bo_cache_mode(struct ivpu_bo *bo)
|
|||||||
|
|
||||||
static inline bool ivpu_bo_is_snooped(struct ivpu_bo *bo)
|
static inline bool ivpu_bo_is_snooped(struct ivpu_bo *bo)
|
||||||
{
|
{
|
||||||
if (bo->flags & DRM_IVPU_BO_NOSNOOP)
|
|
||||||
return false;
|
|
||||||
|
|
||||||
return ivpu_bo_cache_mode(bo) == DRM_IVPU_BO_CACHED;
|
return ivpu_bo_cache_mode(bo) == DRM_IVPU_BO_CACHED;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -13,6 +13,7 @@ struct ivpu_hw_ops {
|
|||||||
int (*power_up)(struct ivpu_device *vdev);
|
int (*power_up)(struct ivpu_device *vdev);
|
||||||
int (*boot_fw)(struct ivpu_device *vdev);
|
int (*boot_fw)(struct ivpu_device *vdev);
|
||||||
int (*power_down)(struct ivpu_device *vdev);
|
int (*power_down)(struct ivpu_device *vdev);
|
||||||
|
int (*reset)(struct ivpu_device *vdev);
|
||||||
bool (*is_idle)(struct ivpu_device *vdev);
|
bool (*is_idle)(struct ivpu_device *vdev);
|
||||||
void (*wdt_disable)(struct ivpu_device *vdev);
|
void (*wdt_disable)(struct ivpu_device *vdev);
|
||||||
void (*diagnose_failure)(struct ivpu_device *vdev);
|
void (*diagnose_failure)(struct ivpu_device *vdev);
|
||||||
@@ -91,6 +92,13 @@ static inline int ivpu_hw_power_down(struct ivpu_device *vdev)
|
|||||||
return vdev->hw->ops->power_down(vdev);
|
return vdev->hw->ops->power_down(vdev);
|
||||||
};
|
};
|
||||||
|
|
||||||
|
static inline int ivpu_hw_reset(struct ivpu_device *vdev)
|
||||||
|
{
|
||||||
|
ivpu_dbg(vdev, PM, "HW reset\n");
|
||||||
|
|
||||||
|
return vdev->hw->ops->reset(vdev);
|
||||||
|
};
|
||||||
|
|
||||||
static inline void ivpu_hw_wdt_disable(struct ivpu_device *vdev)
|
static inline void ivpu_hw_wdt_disable(struct ivpu_device *vdev)
|
||||||
{
|
{
|
||||||
vdev->hw->ops->wdt_disable(vdev);
|
vdev->hw->ops->wdt_disable(vdev);
|
||||||
|
|||||||
@@ -1029,6 +1029,7 @@ const struct ivpu_hw_ops ivpu_hw_37xx_ops = {
|
|||||||
.power_up = ivpu_hw_37xx_power_up,
|
.power_up = ivpu_hw_37xx_power_up,
|
||||||
.is_idle = ivpu_hw_37xx_is_idle,
|
.is_idle = ivpu_hw_37xx_is_idle,
|
||||||
.power_down = ivpu_hw_37xx_power_down,
|
.power_down = ivpu_hw_37xx_power_down,
|
||||||
|
.reset = ivpu_hw_37xx_reset,
|
||||||
.boot_fw = ivpu_hw_37xx_boot_fw,
|
.boot_fw = ivpu_hw_37xx_boot_fw,
|
||||||
.wdt_disable = ivpu_hw_37xx_wdt_disable,
|
.wdt_disable = ivpu_hw_37xx_wdt_disable,
|
||||||
.diagnose_failure = ivpu_hw_37xx_diagnose_failure,
|
.diagnose_failure = ivpu_hw_37xx_diagnose_failure,
|
||||||
|
|||||||
@@ -1179,6 +1179,7 @@ const struct ivpu_hw_ops ivpu_hw_40xx_ops = {
|
|||||||
.power_up = ivpu_hw_40xx_power_up,
|
.power_up = ivpu_hw_40xx_power_up,
|
||||||
.is_idle = ivpu_hw_40xx_is_idle,
|
.is_idle = ivpu_hw_40xx_is_idle,
|
||||||
.power_down = ivpu_hw_40xx_power_down,
|
.power_down = ivpu_hw_40xx_power_down,
|
||||||
|
.reset = ivpu_hw_40xx_reset,
|
||||||
.boot_fw = ivpu_hw_40xx_boot_fw,
|
.boot_fw = ivpu_hw_40xx_boot_fw,
|
||||||
.wdt_disable = ivpu_hw_40xx_wdt_disable,
|
.wdt_disable = ivpu_hw_40xx_wdt_disable,
|
||||||
.diagnose_failure = ivpu_hw_40xx_diagnose_failure,
|
.diagnose_failure = ivpu_hw_40xx_diagnose_failure,
|
||||||
|
|||||||
@@ -11,6 +11,7 @@
|
|||||||
#include "ivpu_mmu.h"
|
#include "ivpu_mmu.h"
|
||||||
#include "ivpu_mmu_context.h"
|
#include "ivpu_mmu_context.h"
|
||||||
|
|
||||||
|
#define IVPU_MMU_VPU_ADDRESS_MASK GENMASK(47, 12)
|
||||||
#define IVPU_MMU_PGD_INDEX_MASK GENMASK(47, 39)
|
#define IVPU_MMU_PGD_INDEX_MASK GENMASK(47, 39)
|
||||||
#define IVPU_MMU_PUD_INDEX_MASK GENMASK(38, 30)
|
#define IVPU_MMU_PUD_INDEX_MASK GENMASK(38, 30)
|
||||||
#define IVPU_MMU_PMD_INDEX_MASK GENMASK(29, 21)
|
#define IVPU_MMU_PMD_INDEX_MASK GENMASK(29, 21)
|
||||||
@@ -328,12 +329,8 @@ ivpu_mmu_context_map_sgt(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx,
|
|||||||
|
|
||||||
if (!IS_ALIGNED(vpu_addr, IVPU_MMU_PAGE_SIZE))
|
if (!IS_ALIGNED(vpu_addr, IVPU_MMU_PAGE_SIZE))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
/*
|
|
||||||
* VPU is only 32 bit, but DMA engine is 38 bit
|
if (vpu_addr & ~IVPU_MMU_VPU_ADDRESS_MASK)
|
||||||
* Ranges < 2 GB are reserved for VPU internal registers
|
|
||||||
* Limit range to 8 GB
|
|
||||||
*/
|
|
||||||
if (vpu_addr < SZ_2G || vpu_addr > SZ_8G)
|
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
prot = IVPU_MMU_ENTRY_MAPPED;
|
prot = IVPU_MMU_ENTRY_MAPPED;
|
||||||
|
|||||||
@@ -261,7 +261,8 @@ void ivpu_pm_reset_prepare_cb(struct pci_dev *pdev)
|
|||||||
ivpu_dbg(vdev, PM, "Pre-reset..\n");
|
ivpu_dbg(vdev, PM, "Pre-reset..\n");
|
||||||
atomic_inc(&vdev->pm->reset_counter);
|
atomic_inc(&vdev->pm->reset_counter);
|
||||||
atomic_set(&vdev->pm->in_reset, 1);
|
atomic_set(&vdev->pm->in_reset, 1);
|
||||||
ivpu_shutdown(vdev);
|
ivpu_prepare_for_reset(vdev);
|
||||||
|
ivpu_hw_reset(vdev);
|
||||||
ivpu_pm_prepare_cold_boot(vdev);
|
ivpu_pm_prepare_cold_boot(vdev);
|
||||||
ivpu_jobs_abort_all(vdev);
|
ivpu_jobs_abort_all(vdev);
|
||||||
ivpu_dbg(vdev, PM, "Pre-reset done.\n");
|
ivpu_dbg(vdev, PM, "Pre-reset done.\n");
|
||||||
|
|||||||
@@ -47,7 +47,6 @@ const unsigned int amdgpu_ctx_num_entities[AMDGPU_HW_IP_NUM] = {
|
|||||||
bool amdgpu_ctx_priority_is_valid(int32_t ctx_prio)
|
bool amdgpu_ctx_priority_is_valid(int32_t ctx_prio)
|
||||||
{
|
{
|
||||||
switch (ctx_prio) {
|
switch (ctx_prio) {
|
||||||
case AMDGPU_CTX_PRIORITY_UNSET:
|
|
||||||
case AMDGPU_CTX_PRIORITY_VERY_LOW:
|
case AMDGPU_CTX_PRIORITY_VERY_LOW:
|
||||||
case AMDGPU_CTX_PRIORITY_LOW:
|
case AMDGPU_CTX_PRIORITY_LOW:
|
||||||
case AMDGPU_CTX_PRIORITY_NORMAL:
|
case AMDGPU_CTX_PRIORITY_NORMAL:
|
||||||
@@ -55,6 +54,7 @@ bool amdgpu_ctx_priority_is_valid(int32_t ctx_prio)
|
|||||||
case AMDGPU_CTX_PRIORITY_VERY_HIGH:
|
case AMDGPU_CTX_PRIORITY_VERY_HIGH:
|
||||||
return true;
|
return true;
|
||||||
default:
|
default:
|
||||||
|
case AMDGPU_CTX_PRIORITY_UNSET:
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -64,7 +64,8 @@ amdgpu_ctx_to_drm_sched_prio(int32_t ctx_prio)
|
|||||||
{
|
{
|
||||||
switch (ctx_prio) {
|
switch (ctx_prio) {
|
||||||
case AMDGPU_CTX_PRIORITY_UNSET:
|
case AMDGPU_CTX_PRIORITY_UNSET:
|
||||||
return DRM_SCHED_PRIORITY_UNSET;
|
pr_warn_once("AMD-->DRM context priority value UNSET-->NORMAL");
|
||||||
|
return DRM_SCHED_PRIORITY_NORMAL;
|
||||||
|
|
||||||
case AMDGPU_CTX_PRIORITY_VERY_LOW:
|
case AMDGPU_CTX_PRIORITY_VERY_LOW:
|
||||||
return DRM_SCHED_PRIORITY_MIN;
|
return DRM_SCHED_PRIORITY_MIN;
|
||||||
|
|||||||
@@ -403,7 +403,10 @@ amdgpu_dma_buf_move_notify(struct dma_buf_attachment *attach)
|
|||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
r = amdgpu_vm_clear_freed(adev, vm, NULL);
|
/* Reserve fences for two SDMA page table updates */
|
||||||
|
r = dma_resv_reserve_fences(resv, 2);
|
||||||
|
if (!r)
|
||||||
|
r = amdgpu_vm_clear_freed(adev, vm, NULL);
|
||||||
if (!r)
|
if (!r)
|
||||||
r = amdgpu_vm_handle_moved(adev, vm);
|
r = amdgpu_vm_handle_moved(adev, vm);
|
||||||
|
|
||||||
|
|||||||
@@ -1090,7 +1090,8 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
|
|||||||
struct drm_gem_object *gobj = dma_buf->priv;
|
struct drm_gem_object *gobj = dma_buf->priv;
|
||||||
struct amdgpu_bo *abo = gem_to_amdgpu_bo(gobj);
|
struct amdgpu_bo *abo = gem_to_amdgpu_bo(gobj);
|
||||||
|
|
||||||
if (abo->tbo.resource->mem_type == TTM_PL_VRAM)
|
if (abo->tbo.resource &&
|
||||||
|
abo->tbo.resource->mem_type == TTM_PL_VRAM)
|
||||||
bo = gem_to_amdgpu_bo(gobj);
|
bo = gem_to_amdgpu_bo(gobj);
|
||||||
}
|
}
|
||||||
mem = bo->tbo.resource;
|
mem = bo->tbo.resource;
|
||||||
|
|||||||
@@ -692,7 +692,7 @@ static struct ti_sn65dsi86 *bridge_to_ti_sn65dsi86(struct drm_bridge *bridge)
|
|||||||
return container_of(bridge, struct ti_sn65dsi86, bridge);
|
return container_of(bridge, struct ti_sn65dsi86, bridge);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int ti_sn_attach_host(struct ti_sn65dsi86 *pdata)
|
static int ti_sn_attach_host(struct auxiliary_device *adev, struct ti_sn65dsi86 *pdata)
|
||||||
{
|
{
|
||||||
int val;
|
int val;
|
||||||
struct mipi_dsi_host *host;
|
struct mipi_dsi_host *host;
|
||||||
@@ -707,7 +707,7 @@ static int ti_sn_attach_host(struct ti_sn65dsi86 *pdata)
|
|||||||
if (!host)
|
if (!host)
|
||||||
return -EPROBE_DEFER;
|
return -EPROBE_DEFER;
|
||||||
|
|
||||||
dsi = devm_mipi_dsi_device_register_full(dev, host, &info);
|
dsi = devm_mipi_dsi_device_register_full(&adev->dev, host, &info);
|
||||||
if (IS_ERR(dsi))
|
if (IS_ERR(dsi))
|
||||||
return PTR_ERR(dsi);
|
return PTR_ERR(dsi);
|
||||||
|
|
||||||
@@ -725,7 +725,7 @@ static int ti_sn_attach_host(struct ti_sn65dsi86 *pdata)
|
|||||||
|
|
||||||
pdata->dsi = dsi;
|
pdata->dsi = dsi;
|
||||||
|
|
||||||
return devm_mipi_dsi_attach(dev, dsi);
|
return devm_mipi_dsi_attach(&adev->dev, dsi);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int ti_sn_bridge_attach(struct drm_bridge *bridge,
|
static int ti_sn_bridge_attach(struct drm_bridge *bridge,
|
||||||
@@ -1298,9 +1298,9 @@ static int ti_sn_bridge_probe(struct auxiliary_device *adev,
|
|||||||
struct device_node *np = pdata->dev->of_node;
|
struct device_node *np = pdata->dev->of_node;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
pdata->next_bridge = devm_drm_of_get_bridge(pdata->dev, np, 1, 0);
|
pdata->next_bridge = devm_drm_of_get_bridge(&adev->dev, np, 1, 0);
|
||||||
if (IS_ERR(pdata->next_bridge))
|
if (IS_ERR(pdata->next_bridge))
|
||||||
return dev_err_probe(pdata->dev, PTR_ERR(pdata->next_bridge),
|
return dev_err_probe(&adev->dev, PTR_ERR(pdata->next_bridge),
|
||||||
"failed to create panel bridge\n");
|
"failed to create panel bridge\n");
|
||||||
|
|
||||||
ti_sn_bridge_parse_lanes(pdata, np);
|
ti_sn_bridge_parse_lanes(pdata, np);
|
||||||
@@ -1319,9 +1319,9 @@ static int ti_sn_bridge_probe(struct auxiliary_device *adev,
|
|||||||
|
|
||||||
drm_bridge_add(&pdata->bridge);
|
drm_bridge_add(&pdata->bridge);
|
||||||
|
|
||||||
ret = ti_sn_attach_host(pdata);
|
ret = ti_sn_attach_host(adev, pdata);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
dev_err_probe(pdata->dev, ret, "failed to attach dsi host\n");
|
dev_err_probe(&adev->dev, ret, "failed to attach dsi host\n");
|
||||||
goto err_remove_bridge;
|
goto err_remove_bridge;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -123,6 +123,9 @@ static const struct edid_quirk {
|
|||||||
/* AEO model 0 reports 8 bpc, but is a 6 bpc panel */
|
/* AEO model 0 reports 8 bpc, but is a 6 bpc panel */
|
||||||
EDID_QUIRK('A', 'E', 'O', 0, EDID_QUIRK_FORCE_6BPC),
|
EDID_QUIRK('A', 'E', 'O', 0, EDID_QUIRK_FORCE_6BPC),
|
||||||
|
|
||||||
|
/* BenQ GW2765 */
|
||||||
|
EDID_QUIRK('B', 'N', 'Q', 0x78d6, EDID_QUIRK_FORCE_8BPC),
|
||||||
|
|
||||||
/* BOE model on HP Pavilion 15-n233sl reports 8 bpc, but is a 6 bpc panel */
|
/* BOE model on HP Pavilion 15-n233sl reports 8 bpc, but is a 6 bpc panel */
|
||||||
EDID_QUIRK('B', 'O', 'E', 0x78b, EDID_QUIRK_FORCE_6BPC),
|
EDID_QUIRK('B', 'O', 'E', 0x78b, EDID_QUIRK_FORCE_6BPC),
|
||||||
|
|
||||||
|
|||||||
@@ -2553,8 +2553,7 @@ static void intel_cx0_phy_lane_reset(struct drm_i915_private *i915,
|
|||||||
drm_warn(&i915->drm, "PHY %c failed to bring out of SOC reset after %dus.\n",
|
drm_warn(&i915->drm, "PHY %c failed to bring out of SOC reset after %dus.\n",
|
||||||
phy_name(phy), XELPDP_PORT_BUF_SOC_READY_TIMEOUT_US);
|
phy_name(phy), XELPDP_PORT_BUF_SOC_READY_TIMEOUT_US);
|
||||||
|
|
||||||
intel_de_rmw(i915, XELPDP_PORT_BUF_CTL2(port),
|
intel_de_rmw(i915, XELPDP_PORT_BUF_CTL2(port), lane_pipe_reset,
|
||||||
XELPDP_LANE_PIPE_RESET(0) | XELPDP_LANE_PIPE_RESET(1),
|
|
||||||
lane_pipe_reset);
|
lane_pipe_reset);
|
||||||
|
|
||||||
if (__intel_de_wait_for_register(i915, XELPDP_PORT_BUF_CTL2(port),
|
if (__intel_de_wait_for_register(i915, XELPDP_PORT_BUF_CTL2(port),
|
||||||
|
|||||||
@@ -235,6 +235,7 @@ static vm_fault_t i915_error_to_vmf_fault(int err)
|
|||||||
case 0:
|
case 0:
|
||||||
case -EAGAIN:
|
case -EAGAIN:
|
||||||
case -ENOSPC: /* transient failure to evict? */
|
case -ENOSPC: /* transient failure to evict? */
|
||||||
|
case -ENOBUFS: /* temporarily out of fences? */
|
||||||
case -ERESTARTSYS:
|
case -ERESTARTSYS:
|
||||||
case -EINTR:
|
case -EINTR:
|
||||||
case -EBUSY:
|
case -EBUSY:
|
||||||
|
|||||||
@@ -239,6 +239,7 @@ int mtk_drm_gem_prime_vmap(struct drm_gem_object *obj, struct iosys_map *map)
|
|||||||
npages = obj->size >> PAGE_SHIFT;
|
npages = obj->size >> PAGE_SHIFT;
|
||||||
mtk_gem->pages = kcalloc(npages, sizeof(*mtk_gem->pages), GFP_KERNEL);
|
mtk_gem->pages = kcalloc(npages, sizeof(*mtk_gem->pages), GFP_KERNEL);
|
||||||
if (!mtk_gem->pages) {
|
if (!mtk_gem->pages) {
|
||||||
|
sg_free_table(sgt);
|
||||||
kfree(sgt);
|
kfree(sgt);
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
@@ -248,12 +249,15 @@ int mtk_drm_gem_prime_vmap(struct drm_gem_object *obj, struct iosys_map *map)
|
|||||||
mtk_gem->kvaddr = vmap(mtk_gem->pages, npages, VM_MAP,
|
mtk_gem->kvaddr = vmap(mtk_gem->pages, npages, VM_MAP,
|
||||||
pgprot_writecombine(PAGE_KERNEL));
|
pgprot_writecombine(PAGE_KERNEL));
|
||||||
if (!mtk_gem->kvaddr) {
|
if (!mtk_gem->kvaddr) {
|
||||||
|
sg_free_table(sgt);
|
||||||
kfree(sgt);
|
kfree(sgt);
|
||||||
kfree(mtk_gem->pages);
|
kfree(mtk_gem->pages);
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
out:
|
sg_free_table(sgt);
|
||||||
kfree(sgt);
|
kfree(sgt);
|
||||||
|
|
||||||
|
out:
|
||||||
iosys_map_set_vaddr(map, mtk_gem->kvaddr);
|
iosys_map_set_vaddr(map, mtk_gem->kvaddr);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|||||||
@@ -62,6 +62,18 @@ nvkm_uconn_uevent_gpio(struct nvkm_object *object, u64 token, u32 bits)
|
|||||||
return object->client->event(token, &args, sizeof(args.v0));
|
return object->client->event(token, &args, sizeof(args.v0));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static bool
|
||||||
|
nvkm_connector_is_dp_dms(u8 type)
|
||||||
|
{
|
||||||
|
switch (type) {
|
||||||
|
case DCB_CONNECTOR_DMS59_DP0:
|
||||||
|
case DCB_CONNECTOR_DMS59_DP1:
|
||||||
|
return true;
|
||||||
|
default:
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
static int
|
static int
|
||||||
nvkm_uconn_uevent(struct nvkm_object *object, void *argv, u32 argc, struct nvkm_uevent *uevent)
|
nvkm_uconn_uevent(struct nvkm_object *object, void *argv, u32 argc, struct nvkm_uevent *uevent)
|
||||||
{
|
{
|
||||||
@@ -101,7 +113,7 @@ nvkm_uconn_uevent(struct nvkm_object *object, void *argv, u32 argc, struct nvkm_
|
|||||||
if (args->v0.types & NVIF_CONN_EVENT_V0_UNPLUG) bits |= NVKM_GPIO_LO;
|
if (args->v0.types & NVIF_CONN_EVENT_V0_UNPLUG) bits |= NVKM_GPIO_LO;
|
||||||
if (args->v0.types & NVIF_CONN_EVENT_V0_IRQ) {
|
if (args->v0.types & NVIF_CONN_EVENT_V0_IRQ) {
|
||||||
/* TODO: support DP IRQ on ANX9805 and remove this hack. */
|
/* TODO: support DP IRQ on ANX9805 and remove this hack. */
|
||||||
if (!outp->info.location)
|
if (!outp->info.location && !nvkm_connector_is_dp_dms(conn->info.type))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -976,32 +976,6 @@ static const struct panel_desc auo_b116xak01 = {
|
|||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
static const struct drm_display_mode auo_b116xw03_mode = {
|
|
||||||
.clock = 70589,
|
|
||||||
.hdisplay = 1366,
|
|
||||||
.hsync_start = 1366 + 40,
|
|
||||||
.hsync_end = 1366 + 40 + 40,
|
|
||||||
.htotal = 1366 + 40 + 40 + 32,
|
|
||||||
.vdisplay = 768,
|
|
||||||
.vsync_start = 768 + 10,
|
|
||||||
.vsync_end = 768 + 10 + 12,
|
|
||||||
.vtotal = 768 + 10 + 12 + 6,
|
|
||||||
.flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC,
|
|
||||||
};
|
|
||||||
|
|
||||||
static const struct panel_desc auo_b116xw03 = {
|
|
||||||
.modes = &auo_b116xw03_mode,
|
|
||||||
.num_modes = 1,
|
|
||||||
.bpc = 6,
|
|
||||||
.size = {
|
|
||||||
.width = 256,
|
|
||||||
.height = 144,
|
|
||||||
},
|
|
||||||
.delay = {
|
|
||||||
.enable = 400,
|
|
||||||
},
|
|
||||||
};
|
|
||||||
|
|
||||||
static const struct drm_display_mode auo_b133han05_mode = {
|
static const struct drm_display_mode auo_b133han05_mode = {
|
||||||
.clock = 142600,
|
.clock = 142600,
|
||||||
.hdisplay = 1920,
|
.hdisplay = 1920,
|
||||||
@@ -1725,9 +1699,6 @@ static const struct of_device_id platform_of_match[] = {
|
|||||||
}, {
|
}, {
|
||||||
.compatible = "auo,b116xa01",
|
.compatible = "auo,b116xa01",
|
||||||
.data = &auo_b116xak01,
|
.data = &auo_b116xak01,
|
||||||
}, {
|
|
||||||
.compatible = "auo,b116xw03",
|
|
||||||
.data = &auo_b116xw03,
|
|
||||||
}, {
|
}, {
|
||||||
.compatible = "auo,b133han05",
|
.compatible = "auo,b133han05",
|
||||||
.data = &auo_b133han05,
|
.data = &auo_b133han05,
|
||||||
|
|||||||
@@ -919,6 +919,38 @@ static const struct panel_desc auo_b101xtn01 = {
|
|||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
|
static const struct drm_display_mode auo_b116xw03_mode = {
|
||||||
|
.clock = 70589,
|
||||||
|
.hdisplay = 1366,
|
||||||
|
.hsync_start = 1366 + 40,
|
||||||
|
.hsync_end = 1366 + 40 + 40,
|
||||||
|
.htotal = 1366 + 40 + 40 + 32,
|
||||||
|
.vdisplay = 768,
|
||||||
|
.vsync_start = 768 + 10,
|
||||||
|
.vsync_end = 768 + 10 + 12,
|
||||||
|
.vtotal = 768 + 10 + 12 + 6,
|
||||||
|
.flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC,
|
||||||
|
};
|
||||||
|
|
||||||
|
static const struct panel_desc auo_b116xw03 = {
|
||||||
|
.modes = &auo_b116xw03_mode,
|
||||||
|
.num_modes = 1,
|
||||||
|
.bpc = 6,
|
||||||
|
.size = {
|
||||||
|
.width = 256,
|
||||||
|
.height = 144,
|
||||||
|
},
|
||||||
|
.delay = {
|
||||||
|
.prepare = 1,
|
||||||
|
.enable = 200,
|
||||||
|
.disable = 200,
|
||||||
|
.unprepare = 500,
|
||||||
|
},
|
||||||
|
.bus_format = MEDIA_BUS_FMT_RGB666_1X7X3_SPWG,
|
||||||
|
.bus_flags = DRM_BUS_FLAG_DE_HIGH,
|
||||||
|
.connector_type = DRM_MODE_CONNECTOR_LVDS,
|
||||||
|
};
|
||||||
|
|
||||||
static const struct display_timing auo_g070vvn01_timings = {
|
static const struct display_timing auo_g070vvn01_timings = {
|
||||||
.pixelclock = { 33300000, 34209000, 45000000 },
|
.pixelclock = { 33300000, 34209000, 45000000 },
|
||||||
.hactive = { 800, 800, 800 },
|
.hactive = { 800, 800, 800 },
|
||||||
@@ -4102,6 +4134,9 @@ static const struct of_device_id platform_of_match[] = {
|
|||||||
}, {
|
}, {
|
||||||
.compatible = "auo,b101xtn01",
|
.compatible = "auo,b101xtn01",
|
||||||
.data = &auo_b101xtn01,
|
.data = &auo_b101xtn01,
|
||||||
|
}, {
|
||||||
|
.compatible = "auo,b116xw03",
|
||||||
|
.data = &auo_b116xw03,
|
||||||
}, {
|
}, {
|
||||||
.compatible = "auo,g070vvn01",
|
.compatible = "auo,g070vvn01",
|
||||||
.data = &auo_g070vvn01,
|
.data = &auo_g070vvn01,
|
||||||
|
|||||||
@@ -232,10 +232,6 @@ void ttm_device_fini(struct ttm_device *bdev)
|
|||||||
struct ttm_resource_manager *man;
|
struct ttm_resource_manager *man;
|
||||||
unsigned i;
|
unsigned i;
|
||||||
|
|
||||||
man = ttm_manager_type(bdev, TTM_PL_SYSTEM);
|
|
||||||
ttm_resource_manager_set_used(man, false);
|
|
||||||
ttm_set_driver_manager(bdev, TTM_PL_SYSTEM, NULL);
|
|
||||||
|
|
||||||
mutex_lock(&ttm_global_mutex);
|
mutex_lock(&ttm_global_mutex);
|
||||||
list_del(&bdev->device_list);
|
list_del(&bdev->device_list);
|
||||||
mutex_unlock(&ttm_global_mutex);
|
mutex_unlock(&ttm_global_mutex);
|
||||||
@@ -243,6 +239,10 @@ void ttm_device_fini(struct ttm_device *bdev)
|
|||||||
drain_workqueue(bdev->wq);
|
drain_workqueue(bdev->wq);
|
||||||
destroy_workqueue(bdev->wq);
|
destroy_workqueue(bdev->wq);
|
||||||
|
|
||||||
|
man = ttm_manager_type(bdev, TTM_PL_SYSTEM);
|
||||||
|
ttm_resource_manager_set_used(man, false);
|
||||||
|
ttm_set_driver_manager(bdev, TTM_PL_SYSTEM, NULL);
|
||||||
|
|
||||||
spin_lock(&bdev->lru_lock);
|
spin_lock(&bdev->lru_lock);
|
||||||
for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i)
|
for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i)
|
||||||
if (list_empty(&man->lru[0]))
|
if (list_empty(&man->lru[0]))
|
||||||
|
|||||||
@@ -68,8 +68,7 @@ enum drm_sched_priority {
|
|||||||
DRM_SCHED_PRIORITY_HIGH,
|
DRM_SCHED_PRIORITY_HIGH,
|
||||||
DRM_SCHED_PRIORITY_KERNEL,
|
DRM_SCHED_PRIORITY_KERNEL,
|
||||||
|
|
||||||
DRM_SCHED_PRIORITY_COUNT,
|
DRM_SCHED_PRIORITY_COUNT
|
||||||
DRM_SCHED_PRIORITY_UNSET = -2
|
|
||||||
};
|
};
|
||||||
|
|
||||||
/* Used to chose between FIFO and RR jobs scheduling */
|
/* Used to chose between FIFO and RR jobs scheduling */
|
||||||
|
|||||||
@@ -45,8 +45,8 @@ extern "C" {
|
|||||||
#define NOUVEAU_GETPARAM_HAS_BO_USAGE 15
|
#define NOUVEAU_GETPARAM_HAS_BO_USAGE 15
|
||||||
#define NOUVEAU_GETPARAM_HAS_PAGEFLIP 16
|
#define NOUVEAU_GETPARAM_HAS_PAGEFLIP 16
|
||||||
|
|
||||||
/**
|
/*
|
||||||
* @NOUVEAU_GETPARAM_EXEC_PUSH_MAX
|
* NOUVEAU_GETPARAM_EXEC_PUSH_MAX - query max pushes through getparam
|
||||||
*
|
*
|
||||||
* Query the maximum amount of IBs that can be pushed through a single
|
* Query the maximum amount of IBs that can be pushed through a single
|
||||||
* &drm_nouveau_exec structure and hence a single &DRM_IOCTL_NOUVEAU_EXEC
|
* &drm_nouveau_exec structure and hence a single &DRM_IOCTL_NOUVEAU_EXEC
|
||||||
|
|||||||
Reference in New Issue
Block a user