drm/xe: Disallow input fences on zero batch execs and zero binds

Prevent input fences from being installed on zero batch execs or zero
binds, which were originally added to support queue idling in Mesa via
output fences. Although input fence support was introduced for interface
consistency, it leads to incorrect behavior due to chained composite
fences, which are disallowed.

Avoid the complexity of fixing this by removing support, as input fences
for these cases are not used in practice.

Signed-off-by: Matthew Brost <matthew.brost@intel.com>
Reviewed-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
Link: https://patch.msgid.link/20251031234050.3043507-6-matthew.brost@intel.com
This commit is contained in:
Matthew Brost
2025-10-31 16:40:49 -07:00
parent ebb0880d49
commit aa87b681bc

View File

@@ -301,84 +301,55 @@ xe_sync_in_fence_get(struct xe_sync_entry *sync, int num_sync,
lockdep_assert_held(&vm->lock);
/* Count in-fences */
for (i = 0; i < num_sync; ++i) {
if (sync[i].fence) {
++num_fence;
fence = sync[i].fence;
}
}
/* Reject in fences */
for (i = 0; i < num_sync; ++i)
if (sync[i].fence)
return ERR_PTR(-EOPNOTSUPP);
/* Easy case... */
if (!num_fence) {
if (q->flags & EXEC_QUEUE_FLAG_VM) {
struct xe_exec_queue *__q;
struct xe_tile *tile;
u8 id;
if (q->flags & EXEC_QUEUE_FLAG_VM) {
struct xe_exec_queue *__q;
struct xe_tile *tile;
u8 id;
for_each_tile(tile, vm->xe, id)
num_fence += (1 + XE_MAX_GT_PER_TILE);
for_each_tile(tile, vm->xe, id)
num_fence += (1 + XE_MAX_GT_PER_TILE);
fences = kmalloc_array(num_fence, sizeof(*fences),
GFP_KERNEL);
if (!fences)
return ERR_PTR(-ENOMEM);
fences = kmalloc_array(num_fence, sizeof(*fences),
GFP_KERNEL);
if (!fences)
return ERR_PTR(-ENOMEM);
fences[current_fence++] =
xe_exec_queue_last_fence_get(q, vm);
for_each_tlb_inval(i)
fences[current_fence++] =
xe_exec_queue_last_fence_get(q, vm);
xe_exec_queue_tlb_inval_last_fence_get(q, vm, i);
list_for_each_entry(__q, &q->multi_gt_list,
multi_gt_link) {
fences[current_fence++] =
xe_exec_queue_last_fence_get(__q, vm);
for_each_tlb_inval(i)
fences[current_fence++] =
xe_exec_queue_tlb_inval_last_fence_get(q, vm, i);
list_for_each_entry(__q, &q->multi_gt_list,
multi_gt_link) {
fences[current_fence++] =
xe_exec_queue_last_fence_get(__q, vm);
for_each_tlb_inval(i)
fences[current_fence++] =
xe_exec_queue_tlb_inval_last_fence_get(__q, vm, i);
}
xe_assert(vm->xe, current_fence == num_fence);
cf = dma_fence_array_create(num_fence, fences,
dma_fence_context_alloc(1),
1, false);
if (!cf)
goto err_out;
return &cf->base;
xe_exec_queue_tlb_inval_last_fence_get(__q, vm, i);
}
fence = xe_exec_queue_last_fence_get(q, vm);
return fence;
xe_assert(vm->xe, current_fence == num_fence);
cf = dma_fence_array_create(num_fence, fences,
dma_fence_context_alloc(1),
1, false);
if (!cf)
goto err_out;
return &cf->base;
}
/*
* Create composite fence - FIXME - the below code doesn't work. This is
* unused in Mesa so we are ok for the moment. Perhaps we just disable
* this entire code path if number of in fences != 0.
*/
fences = kmalloc_array(num_fence + 1, sizeof(*fences), GFP_KERNEL);
if (!fences)
return ERR_PTR(-ENOMEM);
for (i = 0; i < num_sync; ++i) {
if (sync[i].fence) {
dma_fence_get(sync[i].fence);
fences[current_fence++] = sync[i].fence;
}
}
fences[current_fence++] = xe_exec_queue_last_fence_get(q, vm);
cf = dma_fence_array_create(num_fence, fences,
dma_fence_context_alloc(1), 1, false);
if (!cf)
goto err_out;
return &cf->base;
fence = xe_exec_queue_last_fence_get(q, vm);
return fence;
err_out:
while (current_fence)
dma_fence_put(fences[--current_fence]);
kfree(fences);
kfree(cf);
return ERR_PTR(-ENOMEM);
}