Merge tag 'drm-intel-gt-next-2025-11-14' of https://gitlab.freedesktop.org/drm/i915/kernel into drm-next

Driver Changes:

Fixes/improvements/new stuff:

- Avoid lock inversion when pinning to GGTT on CHV/BXT+VTD (Janusz Krzysztofik)
- Use standard API for seqcount read in TLB invalidation [gt] (Andi Shyti)

Miscellaneous:

- Wait longer for threads in migrate selftest on CHV/BXT+VTD (Janusz Krzysztofik)
- Wait for page_sizes_gtt in gtt selftest on CHV/BXT+VTD (Janusz Krzysztofik)

Signed-off-by: Dave Airlie <airlied@redhat.com>
From: Tvrtko Ursulin <tursulin@igalia.com>
Link: https://patch.msgid.link/aRdXOAKlTVX_b0en@linux
This commit is contained in:
Dave Airlie
2025-11-18 06:52:08 +10:00
4 changed files with 27 additions and 4 deletions

View File

@@ -18,7 +18,7 @@ void intel_gt_fini_tlb(struct intel_gt *gt);
static inline u32 intel_gt_tlb_seqno(const struct intel_gt *gt)
{
return seqprop_sequence(&gt->tlb.seqno);
return raw_read_seqcount(&gt->tlb.seqno);
}
static inline u32 intel_gt_next_invalidate_tlb_full(const struct intel_gt *gt)

View File

@@ -710,7 +710,14 @@ static int threaded_migrate(struct intel_migrate *migrate,
thread[i].tsk = tsk;
}
msleep(10 * n_cpus); /* start all threads before we kthread_stop() */
/*
* Start all threads before we kthread_stop().
* In CHV / BXT+VTD environments, where VMA pinning is committed
* asynchronously, empirically determined 100ms delay is needed
* to avoid stopping threads that may still wait for completion of
* intel_ggtt_bind_vma and fail with -ERESTARTSYS when interrupted.
*/
msleep((intel_vm_no_concurrent_access_wa(migrate->context->vm->i915) ? 100 : 10) * n_cpus);
for (i = 0; i < n_cpus; ++i) {
struct task_struct *tsk = thread[i].tsk;

View File

@@ -1597,8 +1597,20 @@ err_unlock:
err_vma_res:
i915_vma_resource_free(vma_res);
err_fence:
if (work)
dma_fence_work_commit_imm(&work->base);
if (work) {
/*
* When pinning VMA to GGTT on CHV or BXT with VTD enabled,
* commit VMA binding asynchronously to avoid risk of lock
* inversion among reservation_ww locks held here and
* cpu_hotplug_lock acquired from stop_machine(), which we
* wrap around GGTT updates when running in those environments.
*/
if (i915_vma_is_ggtt(vma) &&
intel_vm_no_concurrent_access_wa(vma->vm->i915))
dma_fence_work_commit(&work->base);
else
dma_fence_work_commit_imm(&work->base);
}
err_rpm:
intel_runtime_pm_put(&vma->vm->i915->runtime_pm, wakeref);

View File

@@ -1118,6 +1118,10 @@ static int misaligned_case(struct i915_address_space *vm, struct intel_memory_re
goto err_put;
}
/* make sure page_sizes_gtt has been populated before use */
if (i915_is_ggtt(vm) && intel_vm_no_concurrent_access_wa(vm->i915))
i915_vma_wait_for_bind(vma);
expected_vma_size = round_up(size, 1 << (ffs(vma->resource->page_sizes_gtt) - 1));
expected_node_size = expected_vma_size;