mirror of
https://github.com/torvalds/linux.git
synced 2025-12-07 20:06:24 +00:00
drm/amdgpu: add userq object va track helpers
Add the userq object virtual address list_add() helpers for tracking the userq obj va address usage. Signed-off-by: Prike Liang <Prike.Liang@amd.com> Reviewed-by: Alex Deucher <alexander.deucher@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
committed by
Alex Deucher
parent
1bea57ea75
commit
5cfa33fabf
@@ -96,6 +96,7 @@ struct amdgpu_bo_va {
|
||||
* if non-zero, cannot unmap from GPU because user queues may still access it
|
||||
*/
|
||||
unsigned int queue_refcount;
|
||||
atomic_t userq_va_mapped;
|
||||
};
|
||||
|
||||
struct amdgpu_bo {
|
||||
|
||||
@@ -44,10 +44,29 @@ u32 amdgpu_userq_get_supported_ip_mask(struct amdgpu_device *adev)
|
||||
return userq_ip_mask;
|
||||
}
|
||||
|
||||
int amdgpu_userq_input_va_validate(struct amdgpu_vm *vm, u64 addr,
|
||||
u64 expected_size)
|
||||
static int amdgpu_userq_buffer_va_list_add(struct amdgpu_usermode_queue *queue,
|
||||
struct amdgpu_bo_va_mapping *va_map, u64 addr)
|
||||
{
|
||||
struct amdgpu_userq_va_cursor *va_cursor;
|
||||
struct userq_va_list;
|
||||
|
||||
va_cursor = kzalloc(sizeof(*va_cursor), GFP_KERNEL);
|
||||
if (!va_cursor)
|
||||
return -ENOMEM;
|
||||
|
||||
INIT_LIST_HEAD(&va_cursor->list);
|
||||
va_cursor->gpu_addr = addr;
|
||||
atomic_set(&va_map->bo_va->userq_va_mapped, 1);
|
||||
list_add(&va_cursor->list, &queue->userq_va_list);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int amdgpu_userq_input_va_validate(struct amdgpu_usermode_queue *queue,
|
||||
u64 addr, u64 expected_size)
|
||||
{
|
||||
struct amdgpu_bo_va_mapping *va_map;
|
||||
struct amdgpu_vm *vm = queue->vm;
|
||||
u64 user_addr;
|
||||
u64 size;
|
||||
int r = 0;
|
||||
@@ -67,6 +86,7 @@ int amdgpu_userq_input_va_validate(struct amdgpu_vm *vm, u64 addr,
|
||||
/* Only validate the userq whether resident in the VM mapping range */
|
||||
if (user_addr >= va_map->start &&
|
||||
va_map->last - user_addr + 1 >= size) {
|
||||
amdgpu_userq_buffer_va_list_add(queue, va_map, user_addr);
|
||||
amdgpu_bo_unreserve(vm->root.bo);
|
||||
return 0;
|
||||
}
|
||||
@@ -185,6 +205,7 @@ amdgpu_userq_cleanup(struct amdgpu_userq_mgr *uq_mgr,
|
||||
uq_funcs->mqd_destroy(uq_mgr, queue);
|
||||
amdgpu_userq_fence_driver_free(queue);
|
||||
idr_remove(&uq_mgr->userq_idr, queue_id);
|
||||
list_del(&queue->userq_va_list);
|
||||
kfree(queue);
|
||||
}
|
||||
|
||||
@@ -505,14 +526,7 @@ amdgpu_userq_create(struct drm_file *filp, union drm_amdgpu_userq *args)
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
/* Validate the userq virtual address.*/
|
||||
if (amdgpu_userq_input_va_validate(&fpriv->vm, args->in.queue_va, args->in.queue_size) ||
|
||||
amdgpu_userq_input_va_validate(&fpriv->vm, args->in.rptr_va, AMDGPU_GPU_PAGE_SIZE) ||
|
||||
amdgpu_userq_input_va_validate(&fpriv->vm, args->in.wptr_va, AMDGPU_GPU_PAGE_SIZE)) {
|
||||
r = -EINVAL;
|
||||
kfree(queue);
|
||||
goto unlock;
|
||||
}
|
||||
INIT_LIST_HEAD(&queue->userq_va_list);
|
||||
queue->doorbell_handle = args->in.doorbell_handle;
|
||||
queue->queue_type = args->in.ip_type;
|
||||
queue->vm = &fpriv->vm;
|
||||
@@ -523,6 +537,15 @@ amdgpu_userq_create(struct drm_file *filp, union drm_amdgpu_userq *args)
|
||||
db_info.db_obj = &queue->db_obj;
|
||||
db_info.doorbell_offset = args->in.doorbell_offset;
|
||||
|
||||
/* Validate the userq virtual address.*/
|
||||
if (amdgpu_userq_input_va_validate(queue, args->in.queue_va, args->in.queue_size) ||
|
||||
amdgpu_userq_input_va_validate(queue, args->in.rptr_va, AMDGPU_GPU_PAGE_SIZE) ||
|
||||
amdgpu_userq_input_va_validate(queue, args->in.wptr_va, AMDGPU_GPU_PAGE_SIZE)) {
|
||||
r = -EINVAL;
|
||||
kfree(queue);
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
/* Convert relative doorbell offset into absolute doorbell index */
|
||||
index = amdgpu_userq_get_doorbell_index(uq_mgr, &db_info, filp);
|
||||
if (index == (uint64_t)-EINVAL) {
|
||||
@@ -548,7 +571,6 @@ amdgpu_userq_create(struct drm_file *filp, union drm_amdgpu_userq *args)
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
|
||||
qid = idr_alloc(&uq_mgr->userq_idr, queue, 1, AMDGPU_MAX_USERQ_COUNT, GFP_KERNEL);
|
||||
if (qid < 0) {
|
||||
drm_file_err(uq_mgr->file, "Failed to allocate a queue id\n");
|
||||
|
||||
@@ -48,6 +48,11 @@ struct amdgpu_userq_obj {
|
||||
struct amdgpu_bo *obj;
|
||||
};
|
||||
|
||||
struct amdgpu_userq_va_cursor {
|
||||
u64 gpu_addr;
|
||||
struct list_head list;
|
||||
};
|
||||
|
||||
struct amdgpu_usermode_queue {
|
||||
int queue_type;
|
||||
enum amdgpu_userq_state state;
|
||||
@@ -67,6 +72,8 @@ struct amdgpu_usermode_queue {
|
||||
u32 xcp_id;
|
||||
int priority;
|
||||
struct dentry *debugfs_queue;
|
||||
|
||||
struct list_head userq_va_list;
|
||||
};
|
||||
|
||||
struct amdgpu_userq_funcs {
|
||||
@@ -137,7 +144,6 @@ int amdgpu_userq_stop_sched_for_enforce_isolation(struct amdgpu_device *adev,
|
||||
u32 idx);
|
||||
int amdgpu_userq_start_sched_for_enforce_isolation(struct amdgpu_device *adev,
|
||||
u32 idx);
|
||||
|
||||
int amdgpu_userq_input_va_validate(struct amdgpu_vm *vm, u64 addr,
|
||||
u64 expected_size);
|
||||
int amdgpu_userq_input_va_validate(struct amdgpu_usermode_queue *queue,
|
||||
u64 addr, u64 expected_size);
|
||||
#endif
|
||||
|
||||
@@ -298,8 +298,9 @@ static int mes_userq_mqd_create(struct amdgpu_userq_mgr *uq_mgr,
|
||||
goto free_mqd;
|
||||
}
|
||||
|
||||
if (amdgpu_userq_input_va_validate(queue->vm, compute_mqd->eop_va,
|
||||
max_t(u32, PAGE_SIZE, AMDGPU_GPU_PAGE_SIZE)))
|
||||
r = amdgpu_userq_input_va_validate(queue, compute_mqd->eop_va,
|
||||
max_t(u32, PAGE_SIZE, AMDGPU_GPU_PAGE_SIZE));
|
||||
if (r)
|
||||
goto free_mqd;
|
||||
|
||||
userq_props->eop_gpu_addr = compute_mqd->eop_va;
|
||||
@@ -330,8 +331,9 @@ static int mes_userq_mqd_create(struct amdgpu_userq_mgr *uq_mgr,
|
||||
userq_props->tmz_queue =
|
||||
mqd_user->flags & AMDGPU_USERQ_CREATE_FLAGS_QUEUE_SECURE;
|
||||
|
||||
if (amdgpu_userq_input_va_validate(queue->vm, mqd_gfx_v11->shadow_va,
|
||||
shadow_info.shadow_size))
|
||||
r = amdgpu_userq_input_va_validate(queue, mqd_gfx_v11->shadow_va,
|
||||
shadow_info.shadow_size);
|
||||
if (r)
|
||||
goto free_mqd;
|
||||
|
||||
kfree(mqd_gfx_v11);
|
||||
@@ -350,9 +352,9 @@ static int mes_userq_mqd_create(struct amdgpu_userq_mgr *uq_mgr,
|
||||
r = -ENOMEM;
|
||||
goto free_mqd;
|
||||
}
|
||||
|
||||
if (amdgpu_userq_input_va_validate(queue->vm, mqd_sdma_v11->csa_va,
|
||||
shadow_info.csa_size))
|
||||
r = amdgpu_userq_input_va_validate(queue, mqd_sdma_v11->csa_va,
|
||||
shadow_info.csa_size);
|
||||
if (r)
|
||||
goto free_mqd;
|
||||
|
||||
userq_props->csa_addr = mqd_sdma_v11->csa_va;
|
||||
|
||||
Reference in New Issue
Block a user