summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/xe/xe_bo.c
diff options
context:
space:
mode:
authorMatthew Auld <matthew.auld@intel.com>2023-07-13 12:00:49 +0300
committerRodrigo Vivi <rodrigo.vivi@intel.com>2023-12-21 19:37:37 +0300
commitee82d2da9c8ac13486550b2c86068e1d6edddf51 (patch)
treea77e2a1ebcbd0e408423c2dd5947779b13195a8f /drivers/gpu/drm/xe/xe_bo.c
parent5a142f9c675ab524a5f18457859ed2002507ea74 (diff)
downloadlinux-ee82d2da9c8ac13486550b2c86068e1d6edddf51.tar.xz
drm/xe: add missing bulk_move reset
It looks like bulk_move is set during object construction, but is only removed on object close, however in various places we might not yet have an actual fd to close, like on the error paths for the gem_create ioctl, and also one internal user for the evict_test_run_gt() selftest. Try to handle those cases by manually resetting the bulk_move. This should prevent triggering: WARNING: CPU: 7 PID: 8252 at drivers/gpu/drm/ttm/ttm_bo.c:327 ttm_bo_release+0x25e/0x2a0 [ttm] v2 (Nirmoy): - It should be safe to just unconditionally call __xe_bo_unset_bulk_move() in most places. Signed-off-by: Matthew Auld <matthew.auld@intel.com> Cc: Matthew Brost <matthew.brost@intel.com> Reviewed-by: Nirmoy Das <nirmoy.das@intel.com> Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Diffstat (limited to 'drivers/gpu/drm/xe/xe_bo.c')
-rw-r--r--drivers/gpu/drm/xe/xe_bo.c26
1 files changed, 17 insertions, 9 deletions
diff --git a/drivers/gpu/drm/xe/xe_bo.c b/drivers/gpu/drm/xe/xe_bo.c
index 9ad5cf3e2463..a3bb14aa2234 100644
--- a/drivers/gpu/drm/xe/xe_bo.c
+++ b/drivers/gpu/drm/xe/xe_bo.c
@@ -1327,6 +1327,7 @@ xe_bo_create_locked_range(struct xe_device *xe,
return bo;
err_unlock_put_bo:
+ __xe_bo_unset_bulk_move(bo);
xe_bo_unlock_vm_held(bo);
xe_bo_put(bo);
return ERR_PTR(err);
@@ -1770,22 +1771,29 @@ int xe_gem_create_ioctl(struct drm_device *dev, void *data,
bo_flags |= args->flags << (ffs(XE_BO_CREATE_SYSTEM_BIT) - 1);
bo = xe_bo_create(xe, NULL, vm, args->size, ttm_bo_type_device,
bo_flags);
- if (vm) {
- xe_vm_unlock(vm, &ww);
- xe_vm_put(vm);
+ if (IS_ERR(bo)) {
+ err = PTR_ERR(bo);
+ goto out_vm;
}
- if (IS_ERR(bo))
- return PTR_ERR(bo);
-
err = drm_gem_handle_create(file, &bo->ttm.base, &handle);
- xe_bo_put(bo);
if (err)
- return err;
+ goto out_bulk;
args->handle = handle;
+ goto out_put;
- return 0;
+out_bulk:
+ if (vm && !xe_vm_in_fault_mode(vm))
+ __xe_bo_unset_bulk_move(bo);
+out_put:
+ xe_bo_put(bo);
+out_vm:
+ if (vm) {
+ xe_vm_unlock(vm, &ww);
+ xe_vm_put(vm);
+ }
+ return err;
}
int xe_gem_mmap_offset_ioctl(struct drm_device *dev, void *data,