summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/xe/xe_migrate.c
diff options
context:
space:
mode:
authorMatthew Brost <matthew.brost@intel.com>2023-07-08 08:23:57 +0300
committerRodrigo Vivi <rodrigo.vivi@intel.com>2023-12-21 19:35:18 +0300
commitb06d47be7c83165d3b3e45e1d5f9520b79c7f5cc (patch)
treeb27852e1de1904c7dc2b689f4594e63ea3c6f685 /drivers/gpu/drm/xe/xe_migrate.c
parent5cecdd0bb6bf4b8979b7d071017560daecfc9200 (diff)
downloadlinux-b06d47be7c83165d3b3e45e1d5f9520b79c7f5cc.tar.xz
drm/xe: Port Xe to GPUVA
Rather than open coding VM binds and VMA tracking, use the GPUVA library. GPUVA provides a common infrastructure for VM binds to use mmap / munmap semantics and support for VK sparse bindings. The concepts are: 1) xe_vm inherits from drm_gpuva_manager 2) xe_vma inherits from drm_gpuva 3) xe_vma_op inherits from drm_gpuva_op 4) VM bind operations (MAP, UNMAP, PREFETCH, UNMAP_ALL) call into the GPUVA code to generate an VMA operations list which is parsed, committed, and executed. v2 (CI): Add break after default in case statement. v3: Rebase v4: Fix some error handling v5: Use unlocked version VMA in error paths v6: Rebase, address some review feedback mainly Thomas H v7: Fix compile error in xe_vma_op_unwind, address checkpatch Signed-off-by: Matthew Brost <matthew.brost@intel.com> Reviewed-by: Thomas Hellström <thomas.hellstrom@linux.intel.com> Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Diffstat (limited to 'drivers/gpu/drm/xe/xe_migrate.c')
-rw-r--r--drivers/gpu/drm/xe/xe_migrate.c14
1 files changed, 7 insertions, 7 deletions
diff --git a/drivers/gpu/drm/xe/xe_migrate.c b/drivers/gpu/drm/xe/xe_migrate.c
index be98690f2bc9..f05335b16a1a 100644
--- a/drivers/gpu/drm/xe/xe_migrate.c
+++ b/drivers/gpu/drm/xe/xe_migrate.c
@@ -197,7 +197,7 @@ static int xe_migrate_prepare_vm(struct xe_tile *tile, struct xe_migrate *m,
/* Map the entire BO in our level 0 pt */
for (i = 0, level = 0; i < num_entries; level++) {
entry = xe_pte_encode(NULL, bo, i * XE_PAGE_SIZE,
- XE_CACHE_WB, 0, 0);
+ XE_CACHE_WB, 0);
xe_map_wr(xe, &bo->vmap, map_ofs + level * 8, u64, entry);
@@ -216,7 +216,7 @@ static int xe_migrate_prepare_vm(struct xe_tile *tile, struct xe_migrate *m,
i += vm->flags & XE_VM_FLAGS_64K ? XE_64K_PAGE_SIZE :
XE_PAGE_SIZE) {
entry = xe_pte_encode(NULL, batch, i,
- XE_CACHE_WB, 0, 0);
+ XE_CACHE_WB, 0);
xe_map_wr(xe, &bo->vmap, map_ofs + level * 8, u64,
entry);
@@ -1068,7 +1068,7 @@ xe_migrate_update_pgtables_cpu(struct xe_migrate *m,
DMA_RESV_USAGE_KERNEL))
return ERR_PTR(-ETIME);
- if (wait_vm && !dma_resv_test_signaled(&vm->resv,
+ if (wait_vm && !dma_resv_test_signaled(xe_vm_resv(vm),
DMA_RESV_USAGE_BOOKKEEP))
return ERR_PTR(-ETIME);
@@ -1159,7 +1159,8 @@ xe_migrate_update_pgtables(struct xe_migrate *m,
u64 addr;
int err = 0;
bool usm = !eng && xe->info.supports_usm;
- bool first_munmap_rebind = vma && vma->first_munmap_rebind;
+ bool first_munmap_rebind = vma &&
+ vma->gpuva.flags & XE_VMA_FIRST_REBIND;
struct xe_engine *eng_override = !eng ? m->eng : eng;
/* Use the CPU if no in syncs and engine is idle */
@@ -1232,8 +1233,7 @@ xe_migrate_update_pgtables(struct xe_migrate *m,
BUG_ON(pt_bo->size != SZ_4K);
- addr = xe_pte_encode(NULL, pt_bo, 0, XE_CACHE_WB,
- 0, 0);
+ addr = xe_pte_encode(NULL, pt_bo, 0, XE_CACHE_WB, 0);
bb->cs[bb->len++] = lower_32_bits(addr);
bb->cs[bb->len++] = upper_32_bits(addr);
}
@@ -1281,7 +1281,7 @@ xe_migrate_update_pgtables(struct xe_migrate *m,
* trigger preempts before moving forward
*/
if (first_munmap_rebind) {
- err = job_add_deps(job, &vm->resv,
+ err = job_add_deps(job, xe_vm_resv(vm),
DMA_RESV_USAGE_BOOKKEEP);
if (err)
goto err_job;