summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/xe/xe_migrate.c
diff options
context:
space:
mode:
authorHimal Prasad Ghimiray <himal.prasad.ghimiray@intel.com>2023-12-12 21:25:30 +0300
committerRodrigo Vivi <rodrigo.vivi@intel.com>2023-12-21 19:46:15 +0300
commit65ef8dbad1db9e35ca7af90e6958134595938d24 (patch)
tree264f689f4312886e0bb5a4d7f5e69c69814300e7 /drivers/gpu/drm/xe/xe_migrate.c
parent09427526793384fea6a13cc33ffebadb69fdcde4 (diff)
downloadlinux-65ef8dbad1db9e35ca7af90e6958134595938d24.tar.xz
drm/xe/xe2: Update emit_pte to use compression enabled PAT index
For indirect accessed buffer use compression enabled PAT index. v2: - Fix parameter name. v3: - use a relevant define instead of fix number. Cc: Thomas Hellström <thomas.hellstrom@linux.intel.com> Cc: Matthew Auld <matthew.auld@intel.com> Signed-off-by: Himal Prasad Ghimiray <himal.prasad.ghimiray@intel.com> Reviewed-by: Thomas Hellström <thomas.hellstrom@linux.intel.com> Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Diffstat (limited to 'drivers/gpu/drm/xe/xe_migrate.c')
-rw-r--r--drivers/gpu/drm/xe/xe_migrate.c21
1 files changed, 15 insertions, 6 deletions
diff --git a/drivers/gpu/drm/xe/xe_migrate.c b/drivers/gpu/drm/xe/xe_migrate.c
index 40f49e47d79e..48ada083d0b3 100644
--- a/drivers/gpu/drm/xe/xe_migrate.c
+++ b/drivers/gpu/drm/xe/xe_migrate.c
@@ -425,15 +425,24 @@ static u32 pte_update_size(struct xe_migrate *m,
static void emit_pte(struct xe_migrate *m,
struct xe_bb *bb, u32 at_pt,
- bool is_vram,
+ bool is_vram, bool is_comp_pte,
struct xe_res_cursor *cur,
u32 size, struct xe_bo *bo)
{
- u16 pat_index = tile_to_xe(m->tile)->pat.idx[XE_CACHE_WB];
+ struct xe_device *xe = tile_to_xe(m->tile);
+
+ u16 pat_index;
u32 ptes;
u64 ofs = at_pt * XE_PAGE_SIZE;
u64 cur_ofs;
+ /* Indirect access needs compression enabled uncached PAT index */
+ if (GRAPHICS_VERx100(xe) >= 2000)
+ pat_index = is_comp_pte ? xe->pat.idx[XE_CACHE_NONE_COMPRESSION] :
+ xe->pat.idx[XE_CACHE_NONE];
+ else
+ pat_index = xe->pat.idx[XE_CACHE_WB];
+
/*
* FIXME: Emitting VRAM PTEs to L0 PTs is forbidden. Currently
* we're only emitting VRAM PTEs during sanity tests, so when
@@ -720,19 +729,19 @@ struct dma_fence *xe_migrate_copy(struct xe_migrate *m,
}
if (!src_is_vram)
- emit_pte(m, bb, src_L0_pt, src_is_vram, &src_it, src_L0,
+ emit_pte(m, bb, src_L0_pt, src_is_vram, true, &src_it, src_L0,
src_bo);
else
xe_res_next(&src_it, src_L0);
if (!dst_is_vram)
- emit_pte(m, bb, dst_L0_pt, dst_is_vram, &dst_it, src_L0,
+ emit_pte(m, bb, dst_L0_pt, dst_is_vram, true, &dst_it, src_L0,
dst_bo);
else
xe_res_next(&dst_it, src_L0);
if (copy_system_ccs)
- emit_pte(m, bb, ccs_pt, false, &ccs_it, ccs_size, src_bo);
+ emit_pte(m, bb, ccs_pt, false, false, &ccs_it, ccs_size, src_bo);
bb->cs[bb->len++] = MI_BATCH_BUFFER_END;
update_idx = bb->len;
@@ -965,7 +974,7 @@ struct dma_fence *xe_migrate_clear(struct xe_migrate *m,
/* Preemption is enabled again by the ring ops. */
if (!clear_vram) {
- emit_pte(m, bb, clear_L0_pt, clear_vram, &src_it, clear_L0,
+ emit_pte(m, bb, clear_L0_pt, clear_vram, true, &src_it, clear_L0,
bo);
} else {
xe_res_next(&src_it, clear_L0);