diff options
author | Thomas Hellström <thomas.hellstrom@linux.intel.com> | 2023-11-27 15:33:49 +0300 |
---|---|---|
committer | Rodrigo Vivi <rodrigo.vivi@intel.com> | 2023-12-21 19:44:58 +0300 |
commit | fdb6a05383fab3952c9a56ac716e460134990a69 (patch) | |
tree | 3e5d33aa20ffe075f6063a2029f485a36a89e057 /drivers/gpu/drm/xe/xe_vm.h | |
parent | d2f51c50b941f89850c9a9561486938b71c0b9f8 (diff) | |
download | linux-fdb6a05383fab3952c9a56ac716e460134990a69.tar.xz |
drm/xe: Internally change the compute_mode and no_dma_fence mode naming
The name "compute_mode" can be confusing since compute uses either this
mode or fault_mode to achieve the long-running semantics, and compute_mode
can, moving forward, enable fault_mode under the hood to work around
hardware limitations.
Also the name no_dma_fence_mode really refers to what we elsewhere call
long-running mode and the mode contrary to what its name suggests allows
dma-fences as in-fences.
So in an attempt to be more consistent, rename
no_dma_fence_mode -> lr_mode
compute_mode -> preempt_fence_mode
And adjust flags so that
preempt_fence_mode sets XE_VM_FLAG_LR_MODE
fault_mode sets XE_VM_FLAG_LR_MODE | XE_VM_FLAG_FAULT_MODE
v2:
- Fix a typo in the commit message (Oak Zeng)
Signed-off-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
Reviewed-by: Oak Zeng <oak.zeng@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20231127123349.23698-1-thomas.hellstrom@linux.intel.com
Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Diffstat (limited to 'drivers/gpu/drm/xe/xe_vm.h')
-rw-r--r-- | drivers/gpu/drm/xe/xe_vm.h | 16 |
1 files changed, 8 insertions, 8 deletions
diff --git a/drivers/gpu/drm/xe/xe_vm.h b/drivers/gpu/drm/xe/xe_vm.h index 45b70ba86553..12bb5d79487f 100644 --- a/drivers/gpu/drm/xe/xe_vm.h +++ b/drivers/gpu/drm/xe/xe_vm.h @@ -149,19 +149,19 @@ int xe_vm_bind_ioctl(struct drm_device *dev, void *data, void xe_vm_close_and_put(struct xe_vm *vm); -static inline bool xe_vm_in_compute_mode(struct xe_vm *vm) +static inline bool xe_vm_in_fault_mode(struct xe_vm *vm) { - return vm->flags & XE_VM_FLAG_COMPUTE_MODE; + return vm->flags & XE_VM_FLAG_FAULT_MODE; } -static inline bool xe_vm_in_fault_mode(struct xe_vm *vm) +static inline bool xe_vm_in_lr_mode(struct xe_vm *vm) { - return vm->flags & XE_VM_FLAG_FAULT_MODE; + return vm->flags & XE_VM_FLAG_LR_MODE; } -static inline bool xe_vm_no_dma_fences(struct xe_vm *vm) +static inline bool xe_vm_in_preempt_fence_mode(struct xe_vm *vm) { - return xe_vm_in_compute_mode(vm) || xe_vm_in_fault_mode(vm); + return xe_vm_in_lr_mode(vm) && !xe_vm_in_fault_mode(vm); } int xe_vm_add_compute_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q); @@ -181,7 +181,7 @@ extern struct ttm_device_funcs xe_ttm_funcs; static inline void xe_vm_queue_rebind_worker(struct xe_vm *vm) { - xe_assert(vm->xe, xe_vm_in_compute_mode(vm)); + xe_assert(vm->xe, xe_vm_in_preempt_fence_mode(vm)); queue_work(vm->xe->ordered_wq, &vm->preempt.rebind_work); } @@ -196,7 +196,7 @@ static inline void xe_vm_queue_rebind_worker(struct xe_vm *vm) */ static inline void xe_vm_reactivate_rebind(struct xe_vm *vm) { - if (xe_vm_in_compute_mode(vm) && vm->preempt.rebind_deactivated) { + if (xe_vm_in_preempt_fence_mode(vm) && vm->preempt.rebind_deactivated) { vm->preempt.rebind_deactivated = false; xe_vm_queue_rebind_worker(vm); } |