summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/xe/xe_bo.c
diff options
context:
space:
mode:
authorMatt Roper <matthew.d.roper@intel.com>2023-07-25 03:34:34 +0300
committerRodrigo Vivi <rodrigo.vivi@intel.com>2023-12-21 19:37:54 +0300
commit7a060d786cc1d75ffa04256826d805686b8f1043 (patch)
tree287ef0d7ecacfbae6230b5ab50cc2c9b7e237e3f /drivers/gpu/drm/xe/xe_bo.c
parent9700a1df0a5568a3eb8483de103d4078e273b36b (diff)
downloadlinux-7a060d786cc1d75ffa04256826d805686b8f1043.tar.xz
drm/xe/mtl: Map PPGTT as CPU:WC
On MTL and beyond, the GPU performs non-coherent accesses to the PPGTT page tables. These page tables should be mapped as CPU:WC. Removes CAT errors triggered by xe_exec_basic@once-basic on MTL: xe 0000:00:02.0: [drm:__xe_pt_bind_vma [xe]] Preparing bind, with range [1a0000...1a0fff) engine 0000000000000000. xe 0000:00:02.0: [drm:xe_vm_dbg_print_entries [xe]] 1 entries to update xe 0000:00:02.0: [drm:xe_vm_dbg_print_entries [xe]] 0: Update level 3 at (0 + 1) [0...8000000000) f:0 xe 0000:00:02.0: [drm] Engine memory cat error: guc_id=2 xe 0000:00:02.0: [drm] Engine memory cat error: guc_id=2 xe 0000:00:02.0: [drm] Timedout job: seqno=4294967169, guc_id=2, flags=0x4 v2: - Rename to XE_BO_PAGETABLE to make it more clear that this BO is the pagetable itself, rather than just being bound in the PPGTT. (Lucas) Cc: Lucas De Marchi <lucas.demarchi@intel.com> Reviewed-by: Lucas De Marchi <lucas.demarchi@intel.com> Acked-by: Nirmoy Das <nirmoy.das@intel.com> Link: https://lore.kernel.org/r/20230725003433.1992137-3-matthew.d.roper@intel.com Signed-off-by: Matt Roper <matthew.d.roper@intel.com> Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Diffstat (limited to 'drivers/gpu/drm/xe/xe_bo.c')
-rw-r--r--drivers/gpu/drm/xe/xe_bo.c16
1 files changed, 12 insertions, 4 deletions
diff --git a/drivers/gpu/drm/xe/xe_bo.c b/drivers/gpu/drm/xe/xe_bo.c
index a78ac158e967..4b7678db88f3 100644
--- a/drivers/gpu/drm/xe/xe_bo.c
+++ b/drivers/gpu/drm/xe/xe_bo.c
@@ -301,6 +301,7 @@ static struct ttm_tt *xe_ttm_tt_create(struct ttm_buffer_object *ttm_bo,
struct xe_device *xe = xe_bo_device(bo);
struct xe_ttm_tt *tt;
unsigned long extra_pages;
+ enum ttm_caching caching = ttm_cached;
int err;
tt = kzalloc(sizeof(*tt), GFP_KERNEL);
@@ -314,10 +315,17 @@ static struct ttm_tt *xe_ttm_tt_create(struct ttm_buffer_object *ttm_bo,
extra_pages = DIV_ROUND_UP(xe_device_ccs_bytes(xe, bo->size),
PAGE_SIZE);
- /* TODO: Select caching mode */
- err = ttm_tt_init(&tt->ttm, &bo->ttm, page_flags,
- bo->flags & XE_BO_SCANOUT_BIT ? ttm_write_combined : ttm_cached,
- extra_pages);
+ /*
+ * Display scanout is always non-coherent with the CPU cache.
+ *
+ * For Xe_LPG and beyond, PPGTT PTE lookups are also non-coherent and
+ * require a CPU:WC mapping.
+ */
+ if (bo->flags & XE_BO_SCANOUT_BIT ||
+ (xe->info.graphics_verx100 >= 1270 && bo->flags & XE_BO_PAGETABLE))
+ caching = ttm_write_combined;
+
+ err = ttm_tt_init(&tt->ttm, &bo->ttm, page_flags, caching, extra_pages);
if (err) {
kfree(tt);
return NULL;