summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/xe/xe_vm.c
diff options
context:
space:
mode:
authorMatthew Auld <matthew.auld@intel.com>2023-10-06 11:46:16 +0300
committerRodrigo Vivi <rodrigo.vivi@intel.com>2023-12-21 19:42:58 +0300
commite814389ff180514001df424f48645cf30f4a2a1e (patch)
tree21569930d4a99bfc796ecc484b7754ca18fd3159 /drivers/gpu/drm/xe/xe_vm.c
parent406be3cc186eec67367b87a2af91cb598ff8e239 (diff)
downloadlinux-e814389ff180514001df424f48645cf30f4a2a1e.tar.xz
drm/xe: directly use pat_index for pte_encode
In a future patch userspace will be able to directly set the pat_index as part of vm_bind. To support this we need to get away from using xe_cache_level in the low level routines and rather just use the pat_index directly. v2: Rebase v3: Some missed conversions, also prefer tile_to_xe() (Niranjana) v4: remove leftover const (Lucas) Signed-off-by: Matthew Auld <matthew.auld@intel.com> Cc: Niranjana Vishwanathapura <niranjana.vishwanathapura@intel.com> Cc: Pallavi Mishra <pallavi.mishra@intel.com> Cc: Lucas De Marchi <lucas.demarchi@intel.com> Cc: Matt Roper <matthew.d.roper@intel.com> Reviewed-by: José Roberto de Souza <jose.souza@intel.com> Reviewed-by: Matt Roper <matthew.d.roper@intel.com> Reviewed-by: Lucas De Marchi <lucas.demarchi@intel.com> Reviewed-by: Pallavi Mishra <pallavi.mishra@intel.com> Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Diffstat (limited to 'drivers/gpu/drm/xe/xe_vm.c')
-rw-r--r--drivers/gpu/drm/xe/xe_vm.c24
1 files changed, 11 insertions, 13 deletions
diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
index 665af2646243..035f3232e3b9 100644
--- a/drivers/gpu/drm/xe/xe_vm.c
+++ b/drivers/gpu/drm/xe/xe_vm.c
@@ -1211,9 +1211,8 @@ static struct drm_gpuvm_ops gpuvm_ops = {
.vm_free = xe_vm_free,
};
-static u64 pde_encode_cache(struct xe_device *xe, enum xe_cache_level cache)
+static u64 pde_encode_pat_index(struct xe_device *xe, u16 pat_index)
{
- u32 pat_index = xe->pat.idx[cache];
u64 pte = 0;
if (pat_index & BIT(0))
@@ -1225,9 +1224,8 @@ static u64 pde_encode_cache(struct xe_device *xe, enum xe_cache_level cache)
return pte;
}
-static u64 pte_encode_cache(struct xe_device *xe, enum xe_cache_level cache)
+static u64 pte_encode_pat_index(struct xe_device *xe, u16 pat_index)
{
- u32 pat_index = xe->pat.idx[cache];
u64 pte = 0;
if (pat_index & BIT(0))
@@ -1261,27 +1259,27 @@ static u64 pte_encode_ps(u32 pt_level)
}
static u64 xelp_pde_encode_bo(struct xe_bo *bo, u64 bo_offset,
- const enum xe_cache_level cache)
+ const u16 pat_index)
{
struct xe_device *xe = xe_bo_device(bo);
u64 pde;
pde = xe_bo_addr(bo, bo_offset, XE_PAGE_SIZE);
pde |= XE_PAGE_PRESENT | XE_PAGE_RW;
- pde |= pde_encode_cache(xe, cache);
+ pde |= pde_encode_pat_index(xe, pat_index);
return pde;
}
static u64 xelp_pte_encode_bo(struct xe_bo *bo, u64 bo_offset,
- enum xe_cache_level cache, u32 pt_level)
+ u16 pat_index, u32 pt_level)
{
struct xe_device *xe = xe_bo_device(bo);
u64 pte;
pte = xe_bo_addr(bo, bo_offset, XE_PAGE_SIZE);
pte |= XE_PAGE_PRESENT | XE_PAGE_RW;
- pte |= pte_encode_cache(xe, cache);
+ pte |= pte_encode_pat_index(xe, pat_index);
pte |= pte_encode_ps(pt_level);
if (xe_bo_is_vram(bo) || xe_bo_is_stolen_devmem(bo))
@@ -1291,7 +1289,7 @@ static u64 xelp_pte_encode_bo(struct xe_bo *bo, u64 bo_offset,
}
static u64 xelp_pte_encode_vma(u64 pte, struct xe_vma *vma,
- enum xe_cache_level cache, u32 pt_level)
+ u16 pat_index, u32 pt_level)
{
struct xe_device *xe = xe_vma_vm(vma)->xe;
@@ -1300,7 +1298,7 @@ static u64 xelp_pte_encode_vma(u64 pte, struct xe_vma *vma,
if (likely(!xe_vma_read_only(vma)))
pte |= XE_PAGE_RW;
- pte |= pte_encode_cache(xe, cache);
+ pte |= pte_encode_pat_index(xe, pat_index);
pte |= pte_encode_ps(pt_level);
if (unlikely(xe_vma_is_null(vma)))
@@ -1310,7 +1308,7 @@ static u64 xelp_pte_encode_vma(u64 pte, struct xe_vma *vma,
}
static u64 xelp_pte_encode_addr(struct xe_device *xe, u64 addr,
- enum xe_cache_level cache,
+ u16 pat_index,
u32 pt_level, bool devmem, u64 flags)
{
u64 pte;
@@ -1320,7 +1318,7 @@ static u64 xelp_pte_encode_addr(struct xe_device *xe, u64 addr,
pte = addr;
pte |= XE_PAGE_PRESENT | XE_PAGE_RW;
- pte |= pte_encode_cache(xe, cache);
+ pte |= pte_encode_pat_index(xe, pat_index);
pte |= pte_encode_ps(pt_level);
if (devmem)
@@ -1707,7 +1705,7 @@ struct xe_vm *xe_vm_lookup(struct xe_file *xef, u32 id)
u64 xe_vm_pdp4_descriptor(struct xe_vm *vm, struct xe_tile *tile)
{
return vm->pt_ops->pde_encode_bo(vm->pt_root[tile->id]->bo, 0,
- XE_CACHE_WB);
+ tile_to_xe(tile)->pat.idx[XE_CACHE_WB]);
}
static struct dma_fence *