mirror of
https://github.com/torvalds/linux.git
synced 2025-12-07 20:06:24 +00:00
drm/sched/tests: Implement cancel_job() callback
The GPU Scheduler now supports a new callback, cancel_job(), which lets the scheduler cancel all jobs which might not yet be freed when drm_sched_fini() runs. Using this callback allows for significantly simplifying the mock scheduler teardown code. Implement the cancel_job() callback and adjust the code where necessary. Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@igalia.com> Signed-off-by: Philipp Stanner <phasta@kernel.org> Link: https://lore.kernel.org/r/20250710125412.128476-5-phasta@kernel.org
This commit is contained in:
@@ -63,7 +63,7 @@ static void drm_mock_sched_job_complete(struct drm_mock_sched_job *job)
|
||||
lockdep_assert_held(&sched->lock);
|
||||
|
||||
job->flags |= DRM_MOCK_SCHED_JOB_DONE;
|
||||
list_move_tail(&job->link, &sched->done_list);
|
||||
list_del(&job->link);
|
||||
dma_fence_signal_locked(&job->hw_fence);
|
||||
complete(&job->done);
|
||||
}
|
||||
@@ -236,26 +236,41 @@ mock_sched_timedout_job(struct drm_sched_job *sched_job)
|
||||
|
||||
static void mock_sched_free_job(struct drm_sched_job *sched_job)
|
||||
{
|
||||
struct drm_mock_scheduler *sched =
|
||||
drm_sched_to_mock_sched(sched_job->sched);
|
||||
struct drm_mock_sched_job *job = drm_sched_job_to_mock_job(sched_job);
|
||||
unsigned long flags;
|
||||
|
||||
/* Remove from the scheduler done list. */
|
||||
spin_lock_irqsave(&sched->lock, flags);
|
||||
list_del(&job->link);
|
||||
spin_unlock_irqrestore(&sched->lock, flags);
|
||||
dma_fence_put(&job->hw_fence);
|
||||
|
||||
drm_sched_job_cleanup(sched_job);
|
||||
|
||||
/* Mock job itself is freed by the kunit framework. */
|
||||
}
|
||||
|
||||
static void mock_sched_cancel_job(struct drm_sched_job *sched_job)
|
||||
{
|
||||
struct drm_mock_scheduler *sched = drm_sched_to_mock_sched(sched_job->sched);
|
||||
struct drm_mock_sched_job *job = drm_sched_job_to_mock_job(sched_job);
|
||||
unsigned long flags;
|
||||
|
||||
hrtimer_cancel(&job->timer);
|
||||
|
||||
spin_lock_irqsave(&sched->lock, flags);
|
||||
if (!dma_fence_is_signaled_locked(&job->hw_fence)) {
|
||||
list_del(&job->link);
|
||||
dma_fence_set_error(&job->hw_fence, -ECANCELED);
|
||||
dma_fence_signal_locked(&job->hw_fence);
|
||||
}
|
||||
spin_unlock_irqrestore(&sched->lock, flags);
|
||||
|
||||
/*
|
||||
* The GPU Scheduler will call drm_sched_backend_ops.free_job(), still.
|
||||
* Mock job itself is freed by the kunit framework.
|
||||
*/
|
||||
}
|
||||
|
||||
static const struct drm_sched_backend_ops drm_mock_scheduler_ops = {
|
||||
.run_job = mock_sched_run_job,
|
||||
.timedout_job = mock_sched_timedout_job,
|
||||
.free_job = mock_sched_free_job
|
||||
.free_job = mock_sched_free_job,
|
||||
.cancel_job = mock_sched_cancel_job,
|
||||
};
|
||||
|
||||
/**
|
||||
@@ -289,7 +304,6 @@ struct drm_mock_scheduler *drm_mock_sched_new(struct kunit *test, long timeout)
|
||||
sched->hw_timeline.context = dma_fence_context_alloc(1);
|
||||
atomic_set(&sched->hw_timeline.next_seqno, 0);
|
||||
INIT_LIST_HEAD(&sched->job_list);
|
||||
INIT_LIST_HEAD(&sched->done_list);
|
||||
spin_lock_init(&sched->lock);
|
||||
|
||||
return sched;
|
||||
@@ -304,38 +318,6 @@ struct drm_mock_scheduler *drm_mock_sched_new(struct kunit *test, long timeout)
|
||||
*/
|
||||
void drm_mock_sched_fini(struct drm_mock_scheduler *sched)
|
||||
{
|
||||
struct drm_mock_sched_job *job, *next;
|
||||
unsigned long flags;
|
||||
LIST_HEAD(list);
|
||||
|
||||
drm_sched_wqueue_stop(&sched->base);
|
||||
|
||||
/* Force complete all unfinished jobs. */
|
||||
spin_lock_irqsave(&sched->lock, flags);
|
||||
list_for_each_entry_safe(job, next, &sched->job_list, link)
|
||||
list_move_tail(&job->link, &list);
|
||||
spin_unlock_irqrestore(&sched->lock, flags);
|
||||
|
||||
list_for_each_entry(job, &list, link)
|
||||
hrtimer_cancel(&job->timer);
|
||||
|
||||
spin_lock_irqsave(&sched->lock, flags);
|
||||
list_for_each_entry_safe(job, next, &list, link)
|
||||
drm_mock_sched_job_complete(job);
|
||||
spin_unlock_irqrestore(&sched->lock, flags);
|
||||
|
||||
/*
|
||||
* Free completed jobs and jobs not yet processed by the DRM scheduler
|
||||
* free worker.
|
||||
*/
|
||||
spin_lock_irqsave(&sched->lock, flags);
|
||||
list_for_each_entry_safe(job, next, &sched->done_list, link)
|
||||
list_move_tail(&job->link, &list);
|
||||
spin_unlock_irqrestore(&sched->lock, flags);
|
||||
|
||||
list_for_each_entry_safe(job, next, &list, link)
|
||||
mock_sched_free_job(&job->base);
|
||||
|
||||
drm_sched_fini(&sched->base);
|
||||
}
|
||||
|
||||
|
||||
@@ -49,7 +49,6 @@ struct drm_mock_scheduler {
|
||||
|
||||
spinlock_t lock;
|
||||
struct list_head job_list;
|
||||
struct list_head done_list;
|
||||
|
||||
struct {
|
||||
u64 context;
|
||||
|
||||
Reference in New Issue
Block a user