From 3aa3c5c249086ffc920e8f6d6a15bdd441153d45 Mon Sep 17 00:00:00 2001 From: Matthew Brost Date: Thu, 1 Feb 2024 19:34:40 -0800 Subject: drm/xe: Map both mem.kernel_bb_pool and usm.bb_pool MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit For integrated devices we need to map both mem.kernel_bb_pool and usm.bb_pool to be able to run batches from both pools. Fixes: a682b6a42d4d ("drm/xe: Support device page faults on integrated platforms") Tested-by: Brian Welty Signed-off-by: Matthew Brost Reviewed-by: Brian Welty Link: https://patchwork.freedesktop.org/patch/msgid/20240202033440.2351862-1-matthew.brost@intel.com (cherry picked from commit 72f86ed3c88933d6fa09b036de93621ea71097a7) Signed-off-by: Thomas Hellström --- drivers/gpu/drm/xe/xe_gt.c | 5 ++++- drivers/gpu/drm/xe/xe_migrate.c | 23 ++++++++++++++++++----- 2 files changed, 22 insertions(+), 6 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/xe/xe_gt.c b/drivers/gpu/drm/xe/xe_gt.c index 3af2adec1295..35474ddbaf97 100644 --- a/drivers/gpu/drm/xe/xe_gt.c +++ b/drivers/gpu/drm/xe/xe_gt.c @@ -437,7 +437,10 @@ static int all_fw_domain_init(struct xe_gt *gt) * USM has its only SA pool to non-block behind user operations */ if (gt_to_xe(gt)->info.has_usm) { - gt->usm.bb_pool = xe_sa_bo_manager_init(gt_to_tile(gt), SZ_1M, 16); + struct xe_device *xe = gt_to_xe(gt); + + gt->usm.bb_pool = xe_sa_bo_manager_init(gt_to_tile(gt), + IS_DGFX(xe) ? SZ_1M : SZ_512K, 16); if (IS_ERR(gt->usm.bb_pool)) { err = PTR_ERR(gt->usm.bb_pool); goto err_force_wake; diff --git a/drivers/gpu/drm/xe/xe_migrate.c b/drivers/gpu/drm/xe/xe_migrate.c index 0cc31837ef26..70480c305602 100644 --- a/drivers/gpu/drm/xe/xe_migrate.c +++ b/drivers/gpu/drm/xe/xe_migrate.c @@ -170,11 +170,6 @@ static int xe_migrate_prepare_vm(struct xe_tile *tile, struct xe_migrate *m, if (!IS_DGFX(xe)) { /* Write out batch too */ m->batch_base_ofs = NUM_PT_SLOTS * XE_PAGE_SIZE; - if (xe->info.has_usm) { - batch = tile->primary_gt->usm.bb_pool->bo; - m->usm_batch_base_ofs = m->batch_base_ofs; - } - for (i = 0; i < batch->size; i += vm->flags & XE_VM_FLAG_64K ? XE_64K_PAGE_SIZE : XE_PAGE_SIZE) { @@ -185,6 +180,24 @@ static int xe_migrate_prepare_vm(struct xe_tile *tile, struct xe_migrate *m, entry); level++; } + if (xe->info.has_usm) { + xe_tile_assert(tile, batch->size == SZ_1M); + + batch = tile->primary_gt->usm.bb_pool->bo; + m->usm_batch_base_ofs = m->batch_base_ofs + SZ_1M; + xe_tile_assert(tile, batch->size == SZ_512K); + + for (i = 0; i < batch->size; + i += vm->flags & XE_VM_FLAG_64K ? XE_64K_PAGE_SIZE : + XE_PAGE_SIZE) { + entry = vm->pt_ops->pte_encode_bo(batch, i, + pat_index, 0); + + xe_map_wr(xe, &bo->vmap, map_ofs + level * 8, u64, + entry); + level++; + } + } } else { u64 batch_addr = xe_bo_addr(batch, 0, XE_PAGE_SIZE); -- cgit v1.2.3