mirror of
https://github.com/torvalds/linux.git
synced 2025-12-07 20:06:24 +00:00
drm/panthor: assign unique names to queues
Userspace relies on the ring field of gpu_scheduler tracepoints to identify a drm_gpu_scheduler. The value of the ring field is taken from sched->name. Because we typically have multiple schedulers running in parallel in each process, assign unique names to schedulers such that userspace can distinguish them. Signed-off-by: Chia-I Wu <olvaffe@gmail.com> Reviewed-by: Boris Brezillon <boris.brezillon@collabora.com> Reviewed-by: Steven Price <steven.price@arm.com> Signed-off-by: Steven Price <steven.price@arm.com> Link: https://lore.kernel.org/r/20250902200624.428175-1-olvaffe@gmail.com
This commit is contained in:
@@ -1105,7 +1105,7 @@ static int panthor_ioctl_group_create(struct drm_device *ddev, void *data,
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
ret = panthor_group_create(pfile, args, queue_args);
|
||||
ret = panthor_group_create(pfile, args, queue_args, file->client_id);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
args->group_handle = ret;
|
||||
|
||||
@@ -360,6 +360,9 @@ struct panthor_queue {
|
||||
/** @entity: DRM scheduling entity used for this queue. */
|
||||
struct drm_sched_entity entity;
|
||||
|
||||
/** @name: DRM scheduler name for this queue. */
|
||||
char *name;
|
||||
|
||||
/**
|
||||
* @remaining_time: Time remaining before the job timeout expires.
|
||||
*
|
||||
@@ -901,6 +904,8 @@ static void group_free_queue(struct panthor_group *group, struct panthor_queue *
|
||||
if (queue->scheduler.ops)
|
||||
drm_sched_fini(&queue->scheduler);
|
||||
|
||||
kfree(queue->name);
|
||||
|
||||
panthor_queue_put_syncwait_obj(queue);
|
||||
|
||||
panthor_kernel_bo_destroy(queue->ringbuf);
|
||||
@@ -3308,9 +3313,10 @@ static u32 calc_profiling_ringbuf_num_slots(struct panthor_device *ptdev,
|
||||
|
||||
static struct panthor_queue *
|
||||
group_create_queue(struct panthor_group *group,
|
||||
const struct drm_panthor_queue_create *args)
|
||||
const struct drm_panthor_queue_create *args,
|
||||
u64 drm_client_id, u32 gid, u32 qid)
|
||||
{
|
||||
const struct drm_sched_init_args sched_args = {
|
||||
struct drm_sched_init_args sched_args = {
|
||||
.ops = &panthor_queue_sched_ops,
|
||||
.submit_wq = group->ptdev->scheduler->wq,
|
||||
.num_rqs = 1,
|
||||
@@ -3323,7 +3329,6 @@ group_create_queue(struct panthor_group *group,
|
||||
.credit_limit = args->ringbuf_size / sizeof(u64),
|
||||
.timeout = msecs_to_jiffies(JOB_TIMEOUT_MS),
|
||||
.timeout_wq = group->ptdev->reset.wq,
|
||||
.name = "panthor-queue",
|
||||
.dev = group->ptdev->base.dev,
|
||||
};
|
||||
struct drm_gpu_scheduler *drm_sched;
|
||||
@@ -3398,6 +3403,15 @@ group_create_queue(struct panthor_group *group,
|
||||
if (ret)
|
||||
goto err_free_queue;
|
||||
|
||||
/* assign a unique name */
|
||||
queue->name = kasprintf(GFP_KERNEL, "panthor-queue-%llu-%u-%u", drm_client_id, gid, qid);
|
||||
if (!queue->name) {
|
||||
ret = -ENOMEM;
|
||||
goto err_free_queue;
|
||||
}
|
||||
|
||||
sched_args.name = queue->name;
|
||||
|
||||
ret = drm_sched_init(&queue->scheduler, &sched_args);
|
||||
if (ret)
|
||||
goto err_free_queue;
|
||||
@@ -3447,7 +3461,8 @@ static void add_group_kbo_sizes(struct panthor_device *ptdev,
|
||||
|
||||
int panthor_group_create(struct panthor_file *pfile,
|
||||
const struct drm_panthor_group_create *group_args,
|
||||
const struct drm_panthor_queue_create *queue_args)
|
||||
const struct drm_panthor_queue_create *queue_args,
|
||||
u64 drm_client_id)
|
||||
{
|
||||
struct panthor_device *ptdev = pfile->ptdev;
|
||||
struct panthor_group_pool *gpool = pfile->groups;
|
||||
@@ -3540,12 +3555,16 @@ int panthor_group_create(struct panthor_file *pfile,
|
||||
memset(group->syncobjs->kmap, 0,
|
||||
group_args->queues.count * sizeof(struct panthor_syncobj_64b));
|
||||
|
||||
ret = xa_alloc(&gpool->xa, &gid, group, XA_LIMIT(1, MAX_GROUPS_PER_POOL), GFP_KERNEL);
|
||||
if (ret)
|
||||
goto err_put_group;
|
||||
|
||||
for (i = 0; i < group_args->queues.count; i++) {
|
||||
group->queues[i] = group_create_queue(group, &queue_args[i]);
|
||||
group->queues[i] = group_create_queue(group, &queue_args[i], drm_client_id, gid, i);
|
||||
if (IS_ERR(group->queues[i])) {
|
||||
ret = PTR_ERR(group->queues[i]);
|
||||
group->queues[i] = NULL;
|
||||
goto err_put_group;
|
||||
goto err_erase_gid;
|
||||
}
|
||||
|
||||
group->queue_count++;
|
||||
@@ -3553,10 +3572,6 @@ int panthor_group_create(struct panthor_file *pfile,
|
||||
|
||||
group->idle_queues = GENMASK(group->queue_count - 1, 0);
|
||||
|
||||
ret = xa_alloc(&gpool->xa, &gid, group, XA_LIMIT(1, MAX_GROUPS_PER_POOL), GFP_KERNEL);
|
||||
if (ret)
|
||||
goto err_put_group;
|
||||
|
||||
mutex_lock(&sched->reset.lock);
|
||||
if (atomic_read(&sched->reset.in_progress)) {
|
||||
panthor_group_stop(group);
|
||||
@@ -3575,6 +3590,9 @@ int panthor_group_create(struct panthor_file *pfile,
|
||||
|
||||
return gid;
|
||||
|
||||
err_erase_gid:
|
||||
xa_erase(&gpool->xa, gid);
|
||||
|
||||
err_put_group:
|
||||
group_put(group);
|
||||
return ret;
|
||||
|
||||
@@ -21,7 +21,8 @@ struct panthor_job;
|
||||
|
||||
int panthor_group_create(struct panthor_file *pfile,
|
||||
const struct drm_panthor_group_create *group_args,
|
||||
const struct drm_panthor_queue_create *queue_args);
|
||||
const struct drm_panthor_queue_create *queue_args,
|
||||
u64 drm_client_id);
|
||||
int panthor_group_destroy(struct panthor_file *pfile, u32 group_handle);
|
||||
int panthor_group_get_state(struct panthor_file *pfile,
|
||||
struct drm_panthor_group_get_state *get_state);
|
||||
|
||||
Reference in New Issue
Block a user