drm/xe: Limit number of jobs per exec queue

Add a limit to the number of jobs that can be queued in a single
exec queue to avoid potential resource exhaustion.

A new field `job_cnt` is introduced in `struct xe_exec_queue` to
track the number of active DRM jobs, along with a maximum limit
`XE_MAX_JOB_COUNT_PER_EXEC_QUEUE` set to 1000.

If the job count exceeds this threshold, `xe_exec_ioctl()` now
returns `-EAGAIN` to signal that the caller should retry later.

A trace event is added to track when the limit is reached:
"xe_exec_queue_reach_max_job_count: dev=0000:03:00.0, job count
exceeded the maximum limit (1000) per exec queue. engine_class=0x3,
logical_mask=0x1, guc_id=2"

v3: add assert in xe_exec_queue_destroy that q->job_cnt is zero. (Matt)
v2 (Matt):
 - add log to trace the limit is hit.
 - Change max count from 0x1000 to 1000.
 - Use atomic_t for job_cnt.

Suggested-by: Matthew Brost <matthew.brost@intel.com>
Signed-off-by: Shuicheng Lin <shuicheng.lin@intel.com>
Reviewed-by: Matthew Brost <matthew.brost@intel.com>
Signed-off-by: Matthew Brost <matthew.brost@intel.com>
Link: https://patch.msgid.link/20251027202118.3339905-2-shuicheng.lin@intel.com
This commit is contained in:
Shuicheng Lin
2025-10-27 20:21:19 +00:00
committed by Matthew Brost
parent 4504e78068
commit 4a7fe36a12
5 changed files with 39 additions and 0 deletions

View File

@@ -21,6 +21,7 @@
#include "xe_sched_job.h"
#include "xe_sync.h"
#include "xe_svm.h"
#include "xe_trace.h"
#include "xe_vm.h"
/**
@@ -154,6 +155,12 @@ int xe_exec_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
goto err_exec_queue;
}
if (atomic_read(&q->job_cnt) >= XE_MAX_JOB_COUNT_PER_EXEC_QUEUE) {
trace_xe_exec_queue_reach_max_job_count(q, XE_MAX_JOB_COUNT_PER_EXEC_QUEUE);
err = -EAGAIN;
goto err_exec_queue;
}
if (args->num_syncs) {
syncs = kcalloc(args->num_syncs, sizeof(*syncs), GFP_KERNEL);
if (!syncs) {

View File

@@ -377,6 +377,8 @@ void xe_exec_queue_destroy(struct kref *ref)
struct xe_exec_queue *q = container_of(ref, struct xe_exec_queue, refcount);
struct xe_exec_queue *eq, *next;
xe_assert(gt_to_xe(q->gt), atomic_read(&q->job_cnt) == 0);
if (xe_exec_queue_uses_pxp(q))
xe_pxp_exec_queue_remove(gt_to_xe(q->gt)->pxp, q);

View File

@@ -162,6 +162,11 @@ struct xe_exec_queue {
const struct xe_ring_ops *ring_ops;
/** @entity: DRM sched entity for this exec queue (1 to 1 relationship) */
struct drm_sched_entity *entity;
#define XE_MAX_JOB_COUNT_PER_EXEC_QUEUE 1000
/** @job_cnt: number of drm jobs in this exec queue */
atomic_t job_cnt;
/**
* @tlb_flush_seqno: The seqno of the last rebind tlb flush performed
* Protected by @vm's resv. Unused if @vm == NULL.

View File

@@ -146,6 +146,7 @@ struct xe_sched_job *xe_sched_job_create(struct xe_exec_queue *q,
for (i = 0; i < width; ++i)
job->ptrs[i].batch_addr = batch_addr[i];
atomic_inc(&q->job_cnt);
xe_pm_runtime_get_noresume(job_to_xe(job));
trace_xe_sched_job_create(job);
return job;
@@ -177,6 +178,7 @@ void xe_sched_job_destroy(struct kref *ref)
dma_fence_put(job->fence);
drm_sched_job_cleanup(&job->drm);
job_free(job);
atomic_dec(&q->job_cnt);
xe_exec_queue_put(q);
xe_pm_runtime_put(xe);
}

View File

@@ -441,6 +441,29 @@ TRACE_EVENT(xe_eu_stall_data_read,
__entry->read_size, __entry->total_size)
);
TRACE_EVENT(xe_exec_queue_reach_max_job_count,
TP_PROTO(struct xe_exec_queue *q, int max_cnt),
TP_ARGS(q, max_cnt),
TP_STRUCT__entry(__string(dev, __dev_name_eq(q))
__field(enum xe_engine_class, class)
__field(u32, logical_mask)
__field(u16, guc_id)
__field(int, max_cnt)
),
TP_fast_assign(__assign_str(dev);
__entry->class = q->class;
__entry->logical_mask = q->logical_mask;
__entry->guc_id = q->guc->id;
__entry->max_cnt = max_cnt;
),
TP_printk("dev=%s, job count exceeded the maximum limit (%d) per exec queue. engine_class=0x%x, logical_mask=0x%x, guc_id=%d",
__get_str(dev), __entry->max_cnt,
__entry->class, __entry->logical_mask, __entry->guc_id)
);
#endif
/* This part must be outside protection */