summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
authorMatthew Brost <matthew.brost@intel.com>2024-03-12 21:39:07 +0300
committerSasha Levin <sashal@kernel.org>2024-03-27 01:17:33 +0300
commit567d34a745d066a87b44d8c66a797aa40fcb2dea (patch)
tree854fd1d32648e31d303646b6b02d11e276a11dce /drivers
parent262da920896e2f2ab0e3947d9dbee0aa09045818 (diff)
downloadlinux-567d34a745d066a87b44d8c66a797aa40fcb2dea.tar.xz
drm/xe: Invalidate userptr VMA on page pin fault
[ Upstream commit 386021394394eccef248dc5eb9c9370240821a8c ] Rather than return an error to the user or ban the VM when userptr VMA page pin fails with -EFAULT, invalidate VMA mappings. This supports the UMD use case of freeing userptr while still having bindings. Now that non-faulting VMs can invalidate VMAs, drop the usm prefix for the tile_invalidated member. v2: - Fix build error (CI) v3: - Don't invalidate VMA if in fault mode, rather kill VM (Thomas) - Update commit message with tile_invalidated name chagne (Thomas) - Wait VM bookkeep slots with VM resv lock (Thomas) v4: - Move list_del_init(&userptr.repin_link) after error check (Thomas) - Assert not in fault mode (Matthew) Fixes: dd08ebf6c352 ("drm/xe: Introduce a new DRM driver for Intel GPUs") Signed-off-by: Matthew Brost <matthew.brost@intel.com> Reviewed-by: Thomas Hellström <thomas.hellstrom@linux.intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20240312183907.933835-1-matthew.brost@intel.com (cherry picked from commit 521db22a1d70dbc596a07544a738416025b1b63c) Signed-off-by: Lucas De Marchi <lucas.demarchi@intel.com> Signed-off-by: Sasha Levin <sashal@kernel.org>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/gpu/drm/xe/xe_gt_pagefault.c4
-rw-r--r--drivers/gpu/drm/xe/xe_trace.h2
-rw-r--r--drivers/gpu/drm/xe/xe_vm.c32
-rw-r--r--drivers/gpu/drm/xe/xe_vm_types.h7
4 files changed, 29 insertions, 16 deletions
diff --git a/drivers/gpu/drm/xe/xe_gt_pagefault.c b/drivers/gpu/drm/xe/xe_gt_pagefault.c
index e2cf1759527c..ab8536c4fd58 100644
--- a/drivers/gpu/drm/xe/xe_gt_pagefault.c
+++ b/drivers/gpu/drm/xe/xe_gt_pagefault.c
@@ -69,7 +69,7 @@ static bool access_is_atomic(enum access_type access_type)
static bool vma_is_valid(struct xe_tile *tile, struct xe_vma *vma)
{
return BIT(tile->id) & vma->tile_present &&
- !(BIT(tile->id) & vma->usm.tile_invalidated);
+ !(BIT(tile->id) & vma->tile_invalidated);
}
static bool vma_matches(struct xe_vma *vma, u64 page_addr)
@@ -226,7 +226,7 @@ retry_userptr:
if (xe_vma_is_userptr(vma))
ret = xe_vma_userptr_check_repin(to_userptr_vma(vma));
- vma->usm.tile_invalidated &= ~BIT(tile->id);
+ vma->tile_invalidated &= ~BIT(tile->id);
unlock_dma_resv:
drm_exec_fini(&exec);
diff --git a/drivers/gpu/drm/xe/xe_trace.h b/drivers/gpu/drm/xe/xe_trace.h
index 4ddc55527f9a..846f14507d5f 100644
--- a/drivers/gpu/drm/xe/xe_trace.h
+++ b/drivers/gpu/drm/xe/xe_trace.h
@@ -468,7 +468,7 @@ DEFINE_EVENT(xe_vma, xe_vma_userptr_invalidate,
TP_ARGS(vma)
);
-DEFINE_EVENT(xe_vma, xe_vma_usm_invalidate,
+DEFINE_EVENT(xe_vma, xe_vma_invalidate,
TP_PROTO(struct xe_vma *vma),
TP_ARGS(vma)
);
diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
index 3b21afe5b488..a2397f232efc 100644
--- a/drivers/gpu/drm/xe/xe_vm.c
+++ b/drivers/gpu/drm/xe/xe_vm.c
@@ -706,6 +706,7 @@ int xe_vm_userptr_pin(struct xe_vm *vm)
int err = 0;
LIST_HEAD(tmp_evict);
+ xe_assert(vm->xe, !xe_vm_in_fault_mode(vm));
lockdep_assert_held_write(&vm->lock);
/* Collect invalidated userptrs */
@@ -722,11 +723,27 @@ int xe_vm_userptr_pin(struct xe_vm *vm)
list_for_each_entry_safe(uvma, next, &vm->userptr.repin_list,
userptr.repin_link) {
err = xe_vma_userptr_pin_pages(uvma);
- if (err < 0)
- return err;
+ if (err == -EFAULT) {
+ list_del_init(&uvma->userptr.repin_link);
- list_del_init(&uvma->userptr.repin_link);
- list_move_tail(&uvma->vma.combined_links.rebind, &vm->rebind_list);
+ /* Wait for pending binds */
+ xe_vm_lock(vm, false);
+ dma_resv_wait_timeout(xe_vm_resv(vm),
+ DMA_RESV_USAGE_BOOKKEEP,
+ false, MAX_SCHEDULE_TIMEOUT);
+
+ err = xe_vm_invalidate_vma(&uvma->vma);
+ xe_vm_unlock(vm);
+ if (err)
+ return err;
+ } else {
+ if (err < 0)
+ return err;
+
+ list_del_init(&uvma->userptr.repin_link);
+ list_move_tail(&uvma->vma.combined_links.rebind,
+ &vm->rebind_list);
+ }
}
return 0;
@@ -2010,7 +2027,7 @@ static int xe_vm_prefetch(struct xe_vm *vm, struct xe_vma *vma,
return err;
}
- if (vma->tile_mask != (vma->tile_present & ~vma->usm.tile_invalidated)) {
+ if (vma->tile_mask != (vma->tile_present & ~vma->tile_invalidated)) {
return xe_vm_bind(vm, vma, q, xe_vma_bo(vma), syncs, num_syncs,
true, first_op, last_op);
} else {
@@ -3184,9 +3201,8 @@ int xe_vm_invalidate_vma(struct xe_vma *vma)
u8 id;
int ret;
- xe_assert(xe, xe_vm_in_fault_mode(xe_vma_vm(vma)));
xe_assert(xe, !xe_vma_is_null(vma));
- trace_xe_vma_usm_invalidate(vma);
+ trace_xe_vma_invalidate(vma);
/* Check that we don't race with page-table updates */
if (IS_ENABLED(CONFIG_PROVE_LOCKING)) {
@@ -3224,7 +3240,7 @@ int xe_vm_invalidate_vma(struct xe_vma *vma)
}
}
- vma->usm.tile_invalidated = vma->tile_mask;
+ vma->tile_invalidated = vma->tile_mask;
return 0;
}
diff --git a/drivers/gpu/drm/xe/xe_vm_types.h b/drivers/gpu/drm/xe/xe_vm_types.h
index 7300eea5394b..5b9654947950 100644
--- a/drivers/gpu/drm/xe/xe_vm_types.h
+++ b/drivers/gpu/drm/xe/xe_vm_types.h
@@ -83,11 +83,8 @@ struct xe_vma {
struct work_struct destroy_work;
};
- /** @usm: unified shared memory state */
- struct {
- /** @tile_invalidated: VMA has been invalidated */
- u8 tile_invalidated;
- } usm;
+ /** @tile_invalidated: VMA has been invalidated */
+ u8 tile_invalidated;
/** @tile_mask: Tile mask of where to create binding for this VMA */
u8 tile_mask;