summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/xe/xe_vm.c
diff options
context:
space:
mode:
authorFrancois Dugast <francois.dugast@intel.com>2023-09-20 22:29:25 +0300
committerRodrigo Vivi <rodrigo.vivi@intel.com>2023-12-21 19:43:10 +0300
commit78ddc872c6a91d8973ca89209793323efaa86345 (patch)
treefc3ca89df28505ce7ad46cf8851387c7e763eeac /drivers/gpu/drm/xe/xe_vm.c
parentea0640fc6971f555c8f921e2060376d768685805 (diff)
downloadlinux-78ddc872c6a91d8973ca89209793323efaa86345.tar.xz
drm/xe/vm: Remove VM_BIND_OP macro
This macro was necessary when bind operations were shifted but this is no longer the case, so removing to simplify code. Signed-off-by: Francois Dugast <francois.dugast@intel.com> Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com> Reviewed-by: Matthew Brost <matthew.brost@intel.com>
Diffstat (limited to 'drivers/gpu/drm/xe/xe_vm.c')
-rw-r--r--drivers/gpu/drm/xe/xe_vm.c42
1 files changed, 19 insertions, 23 deletions
diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
index 3ae911ade7e4..1a0546beef87 100644
--- a/drivers/gpu/drm/xe/xe_vm.c
+++ b/drivers/gpu/drm/xe/xe_vm.c
@@ -2273,8 +2273,6 @@ static int xe_vm_prefetch(struct xe_vm *vm, struct xe_vma *vma,
}
}
-#define VM_BIND_OP(op) (op & 0xffff)
-
static void vm_set_async_error(struct xe_vm *vm, int err)
{
lockdep_assert_held(&vm->lock);
@@ -2290,7 +2288,7 @@ static int vm_bind_ioctl_lookup_vma(struct xe_vm *vm, struct xe_bo *bo,
lockdep_assert_held(&vm->lock);
- switch (VM_BIND_OP(op)) {
+ switch (op) {
case XE_VM_BIND_OP_MAP:
case XE_VM_BIND_OP_MAP_USERPTR:
vma = xe_vm_find_overlapping_vma(vm, addr, range);
@@ -2400,10 +2398,10 @@ vm_bind_ioctl_ops_create(struct xe_vm *vm, struct xe_bo *bo,
vm_dbg(&vm->xe->drm,
"op=%d, addr=0x%016llx, range=0x%016llx, bo_offset_or_userptr=0x%016llx",
- VM_BIND_OP(operation), (ULL)addr, (ULL)range,
+ operation, (ULL)addr, (ULL)range,
(ULL)bo_offset_or_userptr);
- switch (VM_BIND_OP(operation)) {
+ switch (operation) {
case XE_VM_BIND_OP_MAP:
case XE_VM_BIND_OP_MAP_USERPTR:
ops = drm_gpuvm_sm_map_ops_create(&vm->gpuvm, addr, range,
@@ -3246,50 +3244,48 @@ static int vm_bind_ioctl_check_args(struct xe_device *xe,
*async = !!(flags & XE_VM_BIND_FLAG_ASYNC);
} else if (XE_IOCTL_DBG(xe, !*async) ||
XE_IOCTL_DBG(xe, !(flags & XE_VM_BIND_FLAG_ASYNC)) ||
- XE_IOCTL_DBG(xe, VM_BIND_OP(op) ==
- XE_VM_BIND_OP_RESTART)) {
+ XE_IOCTL_DBG(xe, op == XE_VM_BIND_OP_RESTART)) {
err = -EINVAL;
goto free_bind_ops;
}
if (XE_IOCTL_DBG(xe, !*async &&
- VM_BIND_OP(op) == XE_VM_BIND_OP_UNMAP_ALL)) {
+ op == XE_VM_BIND_OP_UNMAP_ALL)) {
err = -EINVAL;
goto free_bind_ops;
}
if (XE_IOCTL_DBG(xe, !*async &&
- VM_BIND_OP(op) == XE_VM_BIND_OP_PREFETCH)) {
+ op == XE_VM_BIND_OP_PREFETCH)) {
err = -EINVAL;
goto free_bind_ops;
}
- if (XE_IOCTL_DBG(xe, VM_BIND_OP(op) >
- XE_VM_BIND_OP_PREFETCH) ||
+ if (XE_IOCTL_DBG(xe, op > XE_VM_BIND_OP_PREFETCH) ||
XE_IOCTL_DBG(xe, flags & ~SUPPORTED_FLAGS) ||
XE_IOCTL_DBG(xe, obj && is_null) ||
XE_IOCTL_DBG(xe, obj_offset && is_null) ||
- XE_IOCTL_DBG(xe, VM_BIND_OP(op) != XE_VM_BIND_OP_MAP &&
+ XE_IOCTL_DBG(xe, op != XE_VM_BIND_OP_MAP &&
is_null) ||
XE_IOCTL_DBG(xe, !obj &&
- VM_BIND_OP(op) == XE_VM_BIND_OP_MAP &&
+ op == XE_VM_BIND_OP_MAP &&
!is_null) ||
XE_IOCTL_DBG(xe, !obj &&
- VM_BIND_OP(op) == XE_VM_BIND_OP_UNMAP_ALL) ||
+ op == XE_VM_BIND_OP_UNMAP_ALL) ||
XE_IOCTL_DBG(xe, addr &&
- VM_BIND_OP(op) == XE_VM_BIND_OP_UNMAP_ALL) ||
+ op == XE_VM_BIND_OP_UNMAP_ALL) ||
XE_IOCTL_DBG(xe, range &&
- VM_BIND_OP(op) == XE_VM_BIND_OP_UNMAP_ALL) ||
+ op == XE_VM_BIND_OP_UNMAP_ALL) ||
XE_IOCTL_DBG(xe, obj &&
- VM_BIND_OP(op) == XE_VM_BIND_OP_MAP_USERPTR) ||
+ op == XE_VM_BIND_OP_MAP_USERPTR) ||
XE_IOCTL_DBG(xe, obj &&
- VM_BIND_OP(op) == XE_VM_BIND_OP_PREFETCH) ||
+ op == XE_VM_BIND_OP_PREFETCH) ||
XE_IOCTL_DBG(xe, region &&
- VM_BIND_OP(op) != XE_VM_BIND_OP_PREFETCH) ||
+ op != XE_VM_BIND_OP_PREFETCH) ||
XE_IOCTL_DBG(xe, !(BIT(region) &
xe->info.mem_region_mask)) ||
XE_IOCTL_DBG(xe, obj &&
- VM_BIND_OP(op) == XE_VM_BIND_OP_UNMAP)) {
+ op == XE_VM_BIND_OP_UNMAP)) {
err = -EINVAL;
goto free_bind_ops;
}
@@ -3297,9 +3293,9 @@ static int vm_bind_ioctl_check_args(struct xe_device *xe,
if (XE_IOCTL_DBG(xe, obj_offset & ~PAGE_MASK) ||
XE_IOCTL_DBG(xe, addr & ~PAGE_MASK) ||
XE_IOCTL_DBG(xe, range & ~PAGE_MASK) ||
- XE_IOCTL_DBG(xe, !range && VM_BIND_OP(op) !=
+ XE_IOCTL_DBG(xe, !range && op !=
XE_VM_BIND_OP_RESTART &&
- VM_BIND_OP(op) != XE_VM_BIND_OP_UNMAP_ALL)) {
+ op != XE_VM_BIND_OP_UNMAP_ALL)) {
err = -EINVAL;
goto free_bind_ops;
}
@@ -3363,7 +3359,7 @@ int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
goto release_vm_lock;
}
- if (VM_BIND_OP(bind_ops[0].op) == XE_VM_BIND_OP_RESTART) {
+ if (bind_ops[0].op == XE_VM_BIND_OP_RESTART) {
if (XE_IOCTL_DBG(xe, !(vm->flags & XE_VM_FLAG_ASYNC_BIND_OPS)))
err = -EOPNOTSUPP;
if (XE_IOCTL_DBG(xe, !err && args->num_syncs))