mirror of
https://github.com/torvalds/linux.git
synced 2025-12-07 20:06:24 +00:00
drm/xe: Use ordered wq for preempt fence waiting
Preempt fences can sleep waiting for an exec queue suspend operation to
complete. If the system_unbound_wq is used for waiting and the number of
waiters exceeds max_active this will result in other users of the
system_unbound_wq getting starved. Use a device private work queue for
preempt fences to avoid starvation of the system_unbound_wq.
Even though suspend operations can complete out-of-order, all suspend
operations within a VM need to complete before the preempt rebind worker
can start. With that, use a device private ordered wq for preempt fence
waiting.
v2:
- Add comment about cleanup on failure (Matt R)
- Update commit message (Lucas)
Fixes: dd08ebf6c3 ("drm/xe: Introduce a new DRM driver for Intel GPUs")
Signed-off-by: Matthew Brost <matthew.brost@intel.com>
Reviewed-by: Lucas De Marchi <lucas.demarchi@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20240401221913.139672-2-matthew.brost@intel.com
Signed-off-by: Lucas De Marchi <lucas.demarchi@intel.com>
This commit is contained in:
committed by
Lucas De Marchi
parent
9f18b55b6d
commit
37c15c4aae
@@ -226,6 +226,9 @@ static void xe_device_destroy(struct drm_device *dev, void *dummy)
|
||||
{
|
||||
struct xe_device *xe = to_xe_device(dev);
|
||||
|
||||
if (xe->preempt_fence_wq)
|
||||
destroy_workqueue(xe->preempt_fence_wq);
|
||||
|
||||
if (xe->ordered_wq)
|
||||
destroy_workqueue(xe->ordered_wq);
|
||||
|
||||
@@ -291,9 +294,15 @@ struct xe_device *xe_device_create(struct pci_dev *pdev,
|
||||
INIT_LIST_HEAD(&xe->pinned.external_vram);
|
||||
INIT_LIST_HEAD(&xe->pinned.evicted);
|
||||
|
||||
xe->preempt_fence_wq = alloc_ordered_workqueue("xe-preempt-fence-wq", 0);
|
||||
xe->ordered_wq = alloc_ordered_workqueue("xe-ordered-wq", 0);
|
||||
xe->unordered_wq = alloc_workqueue("xe-unordered-wq", 0, 0);
|
||||
if (!xe->ordered_wq || !xe->unordered_wq) {
|
||||
if (!xe->ordered_wq || !xe->unordered_wq ||
|
||||
!xe->preempt_fence_wq) {
|
||||
/*
|
||||
* Cleanup done in xe_device_destroy via
|
||||
* drmm_add_action_or_reset register above
|
||||
*/
|
||||
drm_err(&xe->drm, "Failed to allocate xe workqueues\n");
|
||||
err = -ENOMEM;
|
||||
goto err;
|
||||
|
||||
@@ -363,6 +363,9 @@ struct xe_device {
|
||||
/** @ufence_wq: user fence wait queue */
|
||||
wait_queue_head_t ufence_wq;
|
||||
|
||||
/** @preempt_fence_wq: used to serialize preempt fences */
|
||||
struct workqueue_struct *preempt_fence_wq;
|
||||
|
||||
/** @ordered_wq: used to serialize compute mode resume */
|
||||
struct workqueue_struct *ordered_wq;
|
||||
|
||||
|
||||
@@ -49,7 +49,7 @@ static bool preempt_fence_enable_signaling(struct dma_fence *fence)
|
||||
struct xe_exec_queue *q = pfence->q;
|
||||
|
||||
pfence->error = q->ops->suspend(q);
|
||||
queue_work(system_unbound_wq, &pfence->preempt_work);
|
||||
queue_work(q->vm->xe->preempt_fence_wq, &pfence->preempt_work);
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
Reference in New Issue
Block a user