summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/xe/xe_gt_pagefault.c
diff options
context:
space:
mode:
authorMatthew Brost <matthew.brost@intel.com>2023-07-08 08:23:57 +0300
committerRodrigo Vivi <rodrigo.vivi@intel.com>2023-12-21 19:35:18 +0300
commitb06d47be7c83165d3b3e45e1d5f9520b79c7f5cc (patch)
treeb27852e1de1904c7dc2b689f4594e63ea3c6f685 /drivers/gpu/drm/xe/xe_gt_pagefault.c
parent5cecdd0bb6bf4b8979b7d071017560daecfc9200 (diff)
downloadlinux-b06d47be7c83165d3b3e45e1d5f9520b79c7f5cc.tar.xz
drm/xe: Port Xe to GPUVA
Rather than open coding VM binds and VMA tracking, use the GPUVA library. GPUVA provides a common infrastructure for VM binds to use mmap / munmap semantics and support for VK sparse bindings. The concepts are: 1) xe_vm inherits from drm_gpuva_manager 2) xe_vma inherits from drm_gpuva 3) xe_vma_op inherits from drm_gpuva_op 4) VM bind operations (MAP, UNMAP, PREFETCH, UNMAP_ALL) call into the GPUVA code to generate an VMA operations list which is parsed, committed, and executed. v2 (CI): Add break after default in case statement. v3: Rebase v4: Fix some error handling v5: Use unlocked version VMA in error paths v6: Rebase, address some review feedback mainly Thomas H v7: Fix compile error in xe_vma_op_unwind, address checkpatch Signed-off-by: Matthew Brost <matthew.brost@intel.com> Reviewed-by: Thomas Hellström <thomas.hellstrom@linux.intel.com> Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Diffstat (limited to 'drivers/gpu/drm/xe/xe_gt_pagefault.c')
-rw-r--r--drivers/gpu/drm/xe/xe_gt_pagefault.c20
1 files changed, 7 insertions, 13 deletions
diff --git a/drivers/gpu/drm/xe/xe_gt_pagefault.c b/drivers/gpu/drm/xe/xe_gt_pagefault.c
index 4d0f402cc630..d8ff05e25eda 100644
--- a/drivers/gpu/drm/xe/xe_gt_pagefault.c
+++ b/drivers/gpu/drm/xe/xe_gt_pagefault.c
@@ -75,10 +75,10 @@ static bool vma_is_valid(struct xe_tile *tile, struct xe_vma *vma)
!(BIT(tile->id) & vma->usm.tile_invalidated);
}
-static bool vma_matches(struct xe_vma *vma, struct xe_vma *lookup)
+static bool vma_matches(struct xe_vma *vma, u64 page_addr)
{
- if (xe_vma_start(lookup) > xe_vma_end(vma) - 1 ||
- xe_vma_end(lookup) - 1 < xe_vma_start(vma))
+ if (page_addr > xe_vma_end(vma) - 1 ||
+ page_addr + SZ_4K - 1 < xe_vma_start(vma))
return false;
return true;
@@ -91,16 +91,14 @@ static bool only_needs_bo_lock(struct xe_bo *bo)
static struct xe_vma *lookup_vma(struct xe_vm *vm, u64 page_addr)
{
- struct xe_vma *vma = NULL, lookup;
+ struct xe_vma *vma = NULL;
- lookup.start = page_addr;
- lookup.end = lookup.start + SZ_4K - 1;
if (vm->usm.last_fault_vma) { /* Fast lookup */
- if (vma_matches(vm->usm.last_fault_vma, &lookup))
+ if (vma_matches(vm->usm.last_fault_vma, page_addr))
vma = vm->usm.last_fault_vma;
}
if (!vma)
- vma = xe_vm_find_overlapping_vma(vm, &lookup);
+ vma = xe_vm_find_overlapping_vma(vm, page_addr, SZ_4K);
return vma;
}
@@ -489,12 +487,8 @@ static struct xe_vma *get_acc_vma(struct xe_vm *vm, struct acc *acc)
{
u64 page_va = acc->va_range_base + (ffs(acc->sub_granularity) - 1) *
sub_granularity_in_byte(acc->granularity);
- struct xe_vma lookup;
-
- lookup.start = page_va;
- lookup.end = lookup.start + SZ_4K - 1;
- return xe_vm_find_overlapping_vma(vm, &lookup);
+ return xe_vm_find_overlapping_vma(vm, page_va, SZ_4K);
}
static int handle_acc(struct xe_gt *gt, struct acc *acc)