mirror of
https://github.com/torvalds/linux.git
synced 2025-12-07 20:06:24 +00:00
drm/xe/uapi: Remove sync binds
Remove concept of async vs sync VM bind queues, rather make all binds async. The following bits have dropped from the uAPI: DRM_XE_ENGINE_CLASS_VM_BIND_ASYNC DRM_XE_ENGINE_CLASS_VM_BIND_SYNC DRM_XE_VM_CREATE_FLAG_ASYNC_DEFAULT DRM_XE_VM_BIND_FLAG_ASYNC To implement sync binds the UMD is expected to use the out-fence interface. v2: Send correct version v3: Drop drm_xe_syncs Cc: Rodrigo Vivi <rodrigo.vivi@intel.com> Cc: Thomas Hellström <thomas.hellstrom@linux.intel.com> Cc: Francois Dugast <francois.dugast@intel.com> Signed-off-by: Matthew Brost <matthew.brost@intel.com> Reviewed-by: Thomas Hellström <thomas.hellstrom@linux.intel.com> Acked-by: José Roberto de Souza <jose.souza@intel.com> Acked-by: Mateusz Naklicki <mateusz.naklicki@intel.com> Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
This commit is contained in:
committed by
Rodrigo Vivi
parent
7e9337c29f
commit
d3d767396a
@@ -625,10 +625,7 @@ int xe_exec_queue_create_ioctl(struct drm_device *dev, void *data,
|
|||||||
if (XE_IOCTL_DBG(xe, eci[0].gt_id >= xe->info.gt_count))
|
if (XE_IOCTL_DBG(xe, eci[0].gt_id >= xe->info.gt_count))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
if (eci[0].engine_class >= DRM_XE_ENGINE_CLASS_VM_BIND_ASYNC) {
|
if (eci[0].engine_class == DRM_XE_ENGINE_CLASS_VM_BIND) {
|
||||||
bool sync = eci[0].engine_class ==
|
|
||||||
DRM_XE_ENGINE_CLASS_VM_BIND_SYNC;
|
|
||||||
|
|
||||||
for_each_gt(gt, xe, id) {
|
for_each_gt(gt, xe, id) {
|
||||||
struct xe_exec_queue *new;
|
struct xe_exec_queue *new;
|
||||||
|
|
||||||
@@ -654,8 +651,6 @@ int xe_exec_queue_create_ioctl(struct drm_device *dev, void *data,
|
|||||||
args->width, hwe,
|
args->width, hwe,
|
||||||
EXEC_QUEUE_FLAG_PERSISTENT |
|
EXEC_QUEUE_FLAG_PERSISTENT |
|
||||||
EXEC_QUEUE_FLAG_VM |
|
EXEC_QUEUE_FLAG_VM |
|
||||||
(sync ? 0 :
|
|
||||||
EXEC_QUEUE_FLAG_VM_ASYNC) |
|
|
||||||
(id ?
|
(id ?
|
||||||
EXEC_QUEUE_FLAG_BIND_ENGINE_CHILD :
|
EXEC_QUEUE_FLAG_BIND_ENGINE_CHILD :
|
||||||
0));
|
0));
|
||||||
|
|||||||
@@ -84,8 +84,6 @@ struct xe_exec_queue {
|
|||||||
#define EXEC_QUEUE_FLAG_VM BIT(4)
|
#define EXEC_QUEUE_FLAG_VM BIT(4)
|
||||||
/* child of VM queue for multi-tile VM jobs */
|
/* child of VM queue for multi-tile VM jobs */
|
||||||
#define EXEC_QUEUE_FLAG_BIND_ENGINE_CHILD BIT(5)
|
#define EXEC_QUEUE_FLAG_BIND_ENGINE_CHILD BIT(5)
|
||||||
/* VM jobs for this queue are asynchronous */
|
|
||||||
#define EXEC_QUEUE_FLAG_VM_ASYNC BIT(6)
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @flags: flags for this exec queue, should statically setup aside from ban
|
* @flags: flags for this exec queue, should statically setup aside from ban
|
||||||
|
|||||||
@@ -1343,9 +1343,7 @@ struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags)
|
|||||||
struct xe_gt *gt = tile->primary_gt;
|
struct xe_gt *gt = tile->primary_gt;
|
||||||
struct xe_vm *migrate_vm;
|
struct xe_vm *migrate_vm;
|
||||||
struct xe_exec_queue *q;
|
struct xe_exec_queue *q;
|
||||||
u32 create_flags = EXEC_QUEUE_FLAG_VM |
|
u32 create_flags = EXEC_QUEUE_FLAG_VM;
|
||||||
((flags & XE_VM_FLAG_ASYNC_DEFAULT) ?
|
|
||||||
EXEC_QUEUE_FLAG_VM_ASYNC : 0);
|
|
||||||
|
|
||||||
if (!vm->pt_root[id])
|
if (!vm->pt_root[id])
|
||||||
continue;
|
continue;
|
||||||
@@ -1712,12 +1710,6 @@ err_fences:
|
|||||||
return ERR_PTR(err);
|
return ERR_PTR(err);
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool xe_vm_sync_mode(struct xe_vm *vm, struct xe_exec_queue *q)
|
|
||||||
{
|
|
||||||
return q ? !(q->flags & EXEC_QUEUE_FLAG_VM_ASYNC) :
|
|
||||||
!(vm->flags & XE_VM_FLAG_ASYNC_DEFAULT);
|
|
||||||
}
|
|
||||||
|
|
||||||
static int __xe_vm_bind(struct xe_vm *vm, struct xe_vma *vma,
|
static int __xe_vm_bind(struct xe_vm *vm, struct xe_vma *vma,
|
||||||
struct xe_exec_queue *q, struct xe_sync_entry *syncs,
|
struct xe_exec_queue *q, struct xe_sync_entry *syncs,
|
||||||
u32 num_syncs, bool immediate, bool first_op,
|
u32 num_syncs, bool immediate, bool first_op,
|
||||||
@@ -1747,8 +1739,6 @@ static int __xe_vm_bind(struct xe_vm *vm, struct xe_vma *vma,
|
|||||||
|
|
||||||
if (last_op)
|
if (last_op)
|
||||||
xe_exec_queue_last_fence_set(wait_exec_queue, vm, fence);
|
xe_exec_queue_last_fence_set(wait_exec_queue, vm, fence);
|
||||||
if (last_op && xe_vm_sync_mode(vm, q))
|
|
||||||
dma_fence_wait(fence, true);
|
|
||||||
dma_fence_put(fence);
|
dma_fence_put(fence);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
@@ -1791,8 +1781,6 @@ static int xe_vm_unbind(struct xe_vm *vm, struct xe_vma *vma,
|
|||||||
xe_vma_destroy(vma, fence);
|
xe_vma_destroy(vma, fence);
|
||||||
if (last_op)
|
if (last_op)
|
||||||
xe_exec_queue_last_fence_set(wait_exec_queue, vm, fence);
|
xe_exec_queue_last_fence_set(wait_exec_queue, vm, fence);
|
||||||
if (last_op && xe_vm_sync_mode(vm, q))
|
|
||||||
dma_fence_wait(fence, true);
|
|
||||||
dma_fence_put(fence);
|
dma_fence_put(fence);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
@@ -1800,7 +1788,6 @@ static int xe_vm_unbind(struct xe_vm *vm, struct xe_vma *vma,
|
|||||||
|
|
||||||
#define ALL_DRM_XE_VM_CREATE_FLAGS (DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE | \
|
#define ALL_DRM_XE_VM_CREATE_FLAGS (DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE | \
|
||||||
DRM_XE_VM_CREATE_FLAG_LR_MODE | \
|
DRM_XE_VM_CREATE_FLAG_LR_MODE | \
|
||||||
DRM_XE_VM_CREATE_FLAG_ASYNC_DEFAULT | \
|
|
||||||
DRM_XE_VM_CREATE_FLAG_FAULT_MODE)
|
DRM_XE_VM_CREATE_FLAG_FAULT_MODE)
|
||||||
|
|
||||||
int xe_vm_create_ioctl(struct drm_device *dev, void *data,
|
int xe_vm_create_ioctl(struct drm_device *dev, void *data,
|
||||||
@@ -1854,8 +1841,6 @@ int xe_vm_create_ioctl(struct drm_device *dev, void *data,
|
|||||||
flags |= XE_VM_FLAG_SCRATCH_PAGE;
|
flags |= XE_VM_FLAG_SCRATCH_PAGE;
|
||||||
if (args->flags & DRM_XE_VM_CREATE_FLAG_LR_MODE)
|
if (args->flags & DRM_XE_VM_CREATE_FLAG_LR_MODE)
|
||||||
flags |= XE_VM_FLAG_LR_MODE;
|
flags |= XE_VM_FLAG_LR_MODE;
|
||||||
if (args->flags & DRM_XE_VM_CREATE_FLAG_ASYNC_DEFAULT)
|
|
||||||
flags |= XE_VM_FLAG_ASYNC_DEFAULT;
|
|
||||||
if (args->flags & DRM_XE_VM_CREATE_FLAG_FAULT_MODE)
|
if (args->flags & DRM_XE_VM_CREATE_FLAG_FAULT_MODE)
|
||||||
flags |= XE_VM_FLAG_FAULT_MODE;
|
flags |= XE_VM_FLAG_FAULT_MODE;
|
||||||
|
|
||||||
@@ -2263,8 +2248,7 @@ static int xe_vma_op_commit(struct xe_vm *vm, struct xe_vma_op *op)
|
|||||||
static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct xe_exec_queue *q,
|
static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct xe_exec_queue *q,
|
||||||
struct drm_gpuva_ops *ops,
|
struct drm_gpuva_ops *ops,
|
||||||
struct xe_sync_entry *syncs, u32 num_syncs,
|
struct xe_sync_entry *syncs, u32 num_syncs,
|
||||||
struct list_head *ops_list, bool last,
|
struct list_head *ops_list, bool last)
|
||||||
bool async)
|
|
||||||
{
|
{
|
||||||
struct xe_vma_op *last_op = NULL;
|
struct xe_vma_op *last_op = NULL;
|
||||||
struct drm_gpuva_op *__op;
|
struct drm_gpuva_op *__op;
|
||||||
@@ -2696,23 +2680,22 @@ static int vm_bind_ioctl_ops_execute(struct xe_vm *vm,
|
|||||||
|
|
||||||
#ifdef TEST_VM_ASYNC_OPS_ERROR
|
#ifdef TEST_VM_ASYNC_OPS_ERROR
|
||||||
#define SUPPORTED_FLAGS \
|
#define SUPPORTED_FLAGS \
|
||||||
(FORCE_ASYNC_OP_ERROR | DRM_XE_VM_BIND_FLAG_ASYNC | \
|
(FORCE_ASYNC_OP_ERROR | DRM_XE_VM_BIND_FLAG_READONLY | \
|
||||||
DRM_XE_VM_BIND_FLAG_READONLY | DRM_XE_VM_BIND_FLAG_IMMEDIATE | \
|
DRM_XE_VM_BIND_FLAG_IMMEDIATE | DRM_XE_VM_BIND_FLAG_NULL | 0xffff)
|
||||||
DRM_XE_VM_BIND_FLAG_NULL | 0xffff)
|
|
||||||
#else
|
#else
|
||||||
#define SUPPORTED_FLAGS \
|
#define SUPPORTED_FLAGS \
|
||||||
(DRM_XE_VM_BIND_FLAG_ASYNC | DRM_XE_VM_BIND_FLAG_READONLY | \
|
(DRM_XE_VM_BIND_FLAG_READONLY | \
|
||||||
DRM_XE_VM_BIND_FLAG_IMMEDIATE | DRM_XE_VM_BIND_FLAG_NULL | \
|
DRM_XE_VM_BIND_FLAG_IMMEDIATE | DRM_XE_VM_BIND_FLAG_NULL | \
|
||||||
0xffff)
|
0xffff)
|
||||||
#endif
|
#endif
|
||||||
#define XE_64K_PAGE_MASK 0xffffull
|
#define XE_64K_PAGE_MASK 0xffffull
|
||||||
|
#define ALL_DRM_XE_SYNCS_FLAGS (DRM_XE_SYNCS_FLAG_WAIT_FOR_OP)
|
||||||
|
|
||||||
#define MAX_BINDS 512 /* FIXME: Picking random upper limit */
|
#define MAX_BINDS 512 /* FIXME: Picking random upper limit */
|
||||||
|
|
||||||
static int vm_bind_ioctl_check_args(struct xe_device *xe,
|
static int vm_bind_ioctl_check_args(struct xe_device *xe,
|
||||||
struct drm_xe_vm_bind *args,
|
struct drm_xe_vm_bind *args,
|
||||||
struct drm_xe_vm_bind_op **bind_ops,
|
struct drm_xe_vm_bind_op **bind_ops)
|
||||||
bool *async)
|
|
||||||
{
|
{
|
||||||
int err;
|
int err;
|
||||||
int i;
|
int i;
|
||||||
@@ -2775,18 +2758,6 @@ static int vm_bind_ioctl_check_args(struct xe_device *xe,
|
|||||||
goto free_bind_ops;
|
goto free_bind_ops;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (i == 0) {
|
|
||||||
*async = !!(flags & DRM_XE_VM_BIND_FLAG_ASYNC);
|
|
||||||
if (XE_IOCTL_DBG(xe, !*async && args->num_syncs)) {
|
|
||||||
err = -EINVAL;
|
|
||||||
goto free_bind_ops;
|
|
||||||
}
|
|
||||||
} else if (XE_IOCTL_DBG(xe, *async !=
|
|
||||||
!!(flags & DRM_XE_VM_BIND_FLAG_ASYNC))) {
|
|
||||||
err = -EINVAL;
|
|
||||||
goto free_bind_ops;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (XE_IOCTL_DBG(xe, op > DRM_XE_VM_BIND_OP_PREFETCH) ||
|
if (XE_IOCTL_DBG(xe, op > DRM_XE_VM_BIND_OP_PREFETCH) ||
|
||||||
XE_IOCTL_DBG(xe, flags & ~SUPPORTED_FLAGS) ||
|
XE_IOCTL_DBG(xe, flags & ~SUPPORTED_FLAGS) ||
|
||||||
XE_IOCTL_DBG(xe, obj && is_null) ||
|
XE_IOCTL_DBG(xe, obj && is_null) ||
|
||||||
@@ -2854,14 +2825,6 @@ static int vm_bind_ioctl_signal_fences(struct xe_vm *vm,
|
|||||||
|
|
||||||
xe_exec_queue_last_fence_set(to_wait_exec_queue(vm, q), vm,
|
xe_exec_queue_last_fence_set(to_wait_exec_queue(vm, q), vm,
|
||||||
fence);
|
fence);
|
||||||
|
|
||||||
if (xe_vm_sync_mode(vm, q)) {
|
|
||||||
long timeout = dma_fence_wait(fence, true);
|
|
||||||
|
|
||||||
if (timeout < 0)
|
|
||||||
err = -EINTR;
|
|
||||||
}
|
|
||||||
|
|
||||||
dma_fence_put(fence);
|
dma_fence_put(fence);
|
||||||
|
|
||||||
return err;
|
return err;
|
||||||
@@ -2881,18 +2844,13 @@ int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
|
|||||||
struct xe_sync_entry *syncs = NULL;
|
struct xe_sync_entry *syncs = NULL;
|
||||||
struct drm_xe_vm_bind_op *bind_ops;
|
struct drm_xe_vm_bind_op *bind_ops;
|
||||||
LIST_HEAD(ops_list);
|
LIST_HEAD(ops_list);
|
||||||
bool async;
|
|
||||||
int err;
|
int err;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
err = vm_bind_ioctl_check_args(xe, args, &bind_ops, &async);
|
err = vm_bind_ioctl_check_args(xe, args, &bind_ops);
|
||||||
if (err)
|
if (err)
|
||||||
return err;
|
return err;
|
||||||
|
|
||||||
if (XE_IOCTL_DBG(xe, args->pad || args->pad2) ||
|
|
||||||
XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
|
|
||||||
return -EINVAL;
|
|
||||||
|
|
||||||
if (args->exec_queue_id) {
|
if (args->exec_queue_id) {
|
||||||
q = xe_exec_queue_lookup(xef, args->exec_queue_id);
|
q = xe_exec_queue_lookup(xef, args->exec_queue_id);
|
||||||
if (XE_IOCTL_DBG(xe, !q)) {
|
if (XE_IOCTL_DBG(xe, !q)) {
|
||||||
@@ -2904,12 +2862,6 @@ int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
|
|||||||
err = -EINVAL;
|
err = -EINVAL;
|
||||||
goto put_exec_queue;
|
goto put_exec_queue;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (XE_IOCTL_DBG(xe, args->num_binds && async !=
|
|
||||||
!!(q->flags & EXEC_QUEUE_FLAG_VM_ASYNC))) {
|
|
||||||
err = -EINVAL;
|
|
||||||
goto put_exec_queue;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
vm = xe_vm_lookup(xef, args->vm_id);
|
vm = xe_vm_lookup(xef, args->vm_id);
|
||||||
@@ -2918,14 +2870,6 @@ int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
|
|||||||
goto put_exec_queue;
|
goto put_exec_queue;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!args->exec_queue_id) {
|
|
||||||
if (XE_IOCTL_DBG(xe, args->num_binds && async !=
|
|
||||||
!!(vm->flags & XE_VM_FLAG_ASYNC_DEFAULT))) {
|
|
||||||
err = -EINVAL;
|
|
||||||
goto put_vm;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
err = down_write_killable(&vm->lock);
|
err = down_write_killable(&vm->lock);
|
||||||
if (err)
|
if (err)
|
||||||
goto put_vm;
|
goto put_vm;
|
||||||
@@ -3060,8 +3004,7 @@ int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
|
|||||||
|
|
||||||
err = vm_bind_ioctl_ops_parse(vm, q, ops[i], syncs, num_syncs,
|
err = vm_bind_ioctl_ops_parse(vm, q, ops[i], syncs, num_syncs,
|
||||||
&ops_list,
|
&ops_list,
|
||||||
i == args->num_binds - 1,
|
i == args->num_binds - 1);
|
||||||
async);
|
|
||||||
if (err)
|
if (err)
|
||||||
goto unwind_ops;
|
goto unwind_ops;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -138,13 +138,12 @@ struct xe_vm {
|
|||||||
*/
|
*/
|
||||||
#define XE_VM_FLAG_64K BIT(0)
|
#define XE_VM_FLAG_64K BIT(0)
|
||||||
#define XE_VM_FLAG_LR_MODE BIT(1)
|
#define XE_VM_FLAG_LR_MODE BIT(1)
|
||||||
#define XE_VM_FLAG_ASYNC_DEFAULT BIT(2)
|
#define XE_VM_FLAG_MIGRATION BIT(2)
|
||||||
#define XE_VM_FLAG_MIGRATION BIT(3)
|
#define XE_VM_FLAG_SCRATCH_PAGE BIT(3)
|
||||||
#define XE_VM_FLAG_SCRATCH_PAGE BIT(4)
|
#define XE_VM_FLAG_FAULT_MODE BIT(4)
|
||||||
#define XE_VM_FLAG_FAULT_MODE BIT(5)
|
#define XE_VM_FLAG_BANNED BIT(5)
|
||||||
#define XE_VM_FLAG_BANNED BIT(6)
|
#define XE_VM_FLAG_TILE_ID(flags) FIELD_GET(GENMASK(7, 6), flags)
|
||||||
#define XE_VM_FLAG_TILE_ID(flags) FIELD_GET(GENMASK(8, 7), flags)
|
#define XE_VM_FLAG_SET_TILE_ID(tile) FIELD_PREP(GENMASK(7, 6), (tile)->id)
|
||||||
#define XE_VM_FLAG_SET_TILE_ID(tile) FIELD_PREP(GENMASK(8, 7), (tile)->id)
|
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
/** @composite_fence_ctx: context composite fence */
|
/** @composite_fence_ctx: context composite fence */
|
||||||
|
|||||||
@@ -139,8 +139,7 @@ struct drm_xe_engine_class_instance {
|
|||||||
* Kernel only classes (not actual hardware engine class). Used for
|
* Kernel only classes (not actual hardware engine class). Used for
|
||||||
* creating ordered queues of VM bind operations.
|
* creating ordered queues of VM bind operations.
|
||||||
*/
|
*/
|
||||||
#define DRM_XE_ENGINE_CLASS_VM_BIND_ASYNC 5
|
#define DRM_XE_ENGINE_CLASS_VM_BIND 5
|
||||||
#define DRM_XE_ENGINE_CLASS_VM_BIND_SYNC 6
|
|
||||||
/** @engine_class: engine class id */
|
/** @engine_class: engine class id */
|
||||||
__u16 engine_class;
|
__u16 engine_class;
|
||||||
/** @engine_instance: engine instance id */
|
/** @engine_instance: engine instance id */
|
||||||
@@ -660,7 +659,6 @@ struct drm_xe_vm_create {
|
|||||||
* still enable recoverable pagefaults if supported by the device.
|
* still enable recoverable pagefaults if supported by the device.
|
||||||
*/
|
*/
|
||||||
#define DRM_XE_VM_CREATE_FLAG_LR_MODE (1 << 1)
|
#define DRM_XE_VM_CREATE_FLAG_LR_MODE (1 << 1)
|
||||||
#define DRM_XE_VM_CREATE_FLAG_ASYNC_DEFAULT (1 << 2)
|
|
||||||
/*
|
/*
|
||||||
* DRM_XE_VM_CREATE_FLAG_FAULT_MODE requires also
|
* DRM_XE_VM_CREATE_FLAG_FAULT_MODE requires also
|
||||||
* DRM_XE_VM_CREATE_FLAG_LR_MODE. It allows memory to be allocated
|
* DRM_XE_VM_CREATE_FLAG_LR_MODE. It allows memory to be allocated
|
||||||
@@ -668,7 +666,7 @@ struct drm_xe_vm_create {
|
|||||||
* The xe driver internally uses recoverable pagefaults to implement
|
* The xe driver internally uses recoverable pagefaults to implement
|
||||||
* this.
|
* this.
|
||||||
*/
|
*/
|
||||||
#define DRM_XE_VM_CREATE_FLAG_FAULT_MODE (1 << 3)
|
#define DRM_XE_VM_CREATE_FLAG_FAULT_MODE (1 << 2)
|
||||||
/** @flags: Flags */
|
/** @flags: Flags */
|
||||||
__u32 flags;
|
__u32 flags;
|
||||||
|
|
||||||
@@ -776,12 +774,11 @@ struct drm_xe_vm_bind_op {
|
|||||||
__u32 op;
|
__u32 op;
|
||||||
|
|
||||||
#define DRM_XE_VM_BIND_FLAG_READONLY (1 << 0)
|
#define DRM_XE_VM_BIND_FLAG_READONLY (1 << 0)
|
||||||
#define DRM_XE_VM_BIND_FLAG_ASYNC (1 << 1)
|
|
||||||
/*
|
/*
|
||||||
* Valid on a faulting VM only, do the MAP operation immediately rather
|
* Valid on a faulting VM only, do the MAP operation immediately rather
|
||||||
* than deferring the MAP to the page fault handler.
|
* than deferring the MAP to the page fault handler.
|
||||||
*/
|
*/
|
||||||
#define DRM_XE_VM_BIND_FLAG_IMMEDIATE (1 << 2)
|
#define DRM_XE_VM_BIND_FLAG_IMMEDIATE (1 << 1)
|
||||||
/*
|
/*
|
||||||
* When the NULL flag is set, the page tables are setup with a special
|
* When the NULL flag is set, the page tables are setup with a special
|
||||||
* bit which indicates writes are dropped and all reads return zero. In
|
* bit which indicates writes are dropped and all reads return zero. In
|
||||||
@@ -789,7 +786,7 @@ struct drm_xe_vm_bind_op {
|
|||||||
* operations, the BO handle MBZ, and the BO offset MBZ. This flag is
|
* operations, the BO handle MBZ, and the BO offset MBZ. This flag is
|
||||||
* intended to implement VK sparse bindings.
|
* intended to implement VK sparse bindings.
|
||||||
*/
|
*/
|
||||||
#define DRM_XE_VM_BIND_FLAG_NULL (1 << 3)
|
#define DRM_XE_VM_BIND_FLAG_NULL (1 << 2)
|
||||||
/** @flags: Bind flags */
|
/** @flags: Bind flags */
|
||||||
__u32 flags;
|
__u32 flags;
|
||||||
|
|
||||||
|
|||||||
Reference in New Issue
Block a user