summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--drivers/gpu/drm/xe/tests/xe_migrate.c15
-rw-r--r--drivers/gpu/drm/xe/xe_bo.h52
-rw-r--r--drivers/gpu/drm/xe/xe_ggtt.c24
-rw-r--r--drivers/gpu/drm/xe/xe_migrate.c69
-rw-r--r--drivers/gpu/drm/xe/xe_pt.c40
-rw-r--r--drivers/gpu/drm/xe/xe_vm.c20
6 files changed, 112 insertions, 108 deletions
diff --git a/drivers/gpu/drm/xe/tests/xe_migrate.c b/drivers/gpu/drm/xe/tests/xe_migrate.c
index cdcecf8d5eef..0f4371ad1fd9 100644
--- a/drivers/gpu/drm/xe/tests/xe_migrate.c
+++ b/drivers/gpu/drm/xe/tests/xe_migrate.c
@@ -265,7 +265,7 @@ static void xe_migrate_sanity_test(struct xe_migrate *m, struct kunit *test)
goto vunmap;
}
- pt = xe_bo_create_pin_map(xe, m->gt, m->eng->vm, GEN8_PAGE_SIZE,
+ pt = xe_bo_create_pin_map(xe, m->gt, m->eng->vm, XE_PAGE_SIZE,
ttm_bo_type_kernel,
XE_BO_CREATE_VRAM_IF_DGFX(m->gt) |
XE_BO_CREATE_PINNED_BIT);
@@ -294,20 +294,21 @@ static void xe_migrate_sanity_test(struct xe_migrate *m, struct kunit *test)
}
kunit_info(test, "Starting tests, top level PT addr: %lx, special pagetable base addr: %lx\n",
- (unsigned long)xe_bo_main_addr(m->eng->vm->pt_root[id]->bo, GEN8_PAGE_SIZE),
- (unsigned long)xe_bo_main_addr(m->pt_bo, GEN8_PAGE_SIZE));
+ (unsigned long)xe_bo_main_addr(m->eng->vm->pt_root[id]->bo, XE_PAGE_SIZE),
+ (unsigned long)xe_bo_main_addr(m->pt_bo, XE_PAGE_SIZE));
/* First part of the test, are we updating our pagetable bo with a new entry? */
- xe_map_wr(xe, &bo->vmap, GEN8_PAGE_SIZE * (NUM_KERNEL_PDE - 1), u64, 0xdeaddeadbeefbeef);
+ xe_map_wr(xe, &bo->vmap, XE_PAGE_SIZE * (NUM_KERNEL_PDE - 1), u64,
+ 0xdeaddeadbeefbeef);
expected = gen8_pte_encode(NULL, pt, 0, XE_CACHE_WB, 0, 0);
if (m->eng->vm->flags & XE_VM_FLAGS_64K)
- expected |= GEN12_PTE_PS64;
+ expected |= XE_PTE_PS64;
xe_res_first(pt->ttm.resource, 0, pt->size, &src_it);
emit_pte(m, bb, NUM_KERNEL_PDE - 1, xe_bo_is_vram(pt),
- &src_it, GEN8_PAGE_SIZE, pt);
+ &src_it, XE_PAGE_SIZE, pt);
run_sanity_job(m, xe, bb, bb->len, "Writing PTE for our fake PT", test);
- retval = xe_map_rd(xe, &bo->vmap, GEN8_PAGE_SIZE * (NUM_KERNEL_PDE - 1),
+ retval = xe_map_rd(xe, &bo->vmap, XE_PAGE_SIZE * (NUM_KERNEL_PDE - 1),
u64);
check(retval, expected, "PTE entry write", test);
diff --git a/drivers/gpu/drm/xe/xe_bo.h b/drivers/gpu/drm/xe/xe_bo.h
index effa9d0cf0f6..8354d05ccdf3 100644
--- a/drivers/gpu/drm/xe/xe_bo.h
+++ b/drivers/gpu/drm/xe/xe_bo.h
@@ -39,32 +39,32 @@
#define PPAT_CACHED BIT_ULL(7)
#define PPAT_DISPLAY_ELLC BIT_ULL(4)
-#define GEN8_PTE_SHIFT 12
-#define GEN8_PAGE_SIZE (1 << GEN8_PTE_SHIFT)
-#define GEN8_PTE_MASK (GEN8_PAGE_SIZE - 1)
-#define GEN8_PDE_SHIFT (GEN8_PTE_SHIFT - 3)
-#define GEN8_PDES (1 << GEN8_PDE_SHIFT)
-#define GEN8_PDE_MASK (GEN8_PDES - 1)
-
-#define GEN8_64K_PTE_SHIFT 16
-#define GEN8_64K_PAGE_SIZE (1 << GEN8_64K_PTE_SHIFT)
-#define GEN8_64K_PTE_MASK (GEN8_64K_PAGE_SIZE - 1)
-#define GEN8_64K_PDE_MASK (GEN8_PDE_MASK >> 4)
-
-#define GEN8_PDE_PS_2M BIT_ULL(7)
-#define GEN8_PDPE_PS_1G BIT_ULL(7)
-#define GEN8_PDE_IPS_64K BIT_ULL(11)
-
-#define GEN12_GGTT_PTE_LM BIT_ULL(1)
-#define GEN12_USM_PPGTT_PTE_AE BIT_ULL(10)
-#define GEN12_PPGTT_PTE_LM BIT_ULL(11)
-#define GEN12_PDE_64K BIT_ULL(6)
-#define GEN12_PTE_PS64 BIT_ULL(8)
-
-#define GEN8_PAGE_PRESENT BIT_ULL(0)
-#define GEN8_PAGE_RW BIT_ULL(1)
-
-#define PTE_READ_ONLY BIT(0)
+#define XE_PTE_SHIFT 12
+#define XE_PAGE_SIZE (1 << XE_PTE_SHIFT)
+#define XE_PTE_MASK (XE_PAGE_SIZE - 1)
+#define XE_PDE_SHIFT (XE_PTE_SHIFT - 3)
+#define XE_PDES (1 << XE_PDE_SHIFT)
+#define XE_PDE_MASK (XE_PDES - 1)
+
+#define XE_64K_PTE_SHIFT 16
+#define XE_64K_PAGE_SIZE (1 << XE_64K_PTE_SHIFT)
+#define XE_64K_PTE_MASK (XE_64K_PAGE_SIZE - 1)
+#define XE_64K_PDE_MASK (XE_PDE_MASK >> 4)
+
+#define XE_PDE_PS_2M BIT_ULL(7)
+#define XE_PDPE_PS_1G BIT_ULL(7)
+#define XE_PDE_IPS_64K BIT_ULL(11)
+
+#define XE_GGTT_PTE_LM BIT_ULL(1)
+#define XE_USM_PPGTT_PTE_AE BIT_ULL(10)
+#define XE_PPGTT_PTE_LM BIT_ULL(11)
+#define XE_PDE_64K BIT_ULL(6)
+#define XE_PTE_PS64 BIT_ULL(8)
+
+#define XE_PAGE_PRESENT BIT_ULL(0)
+#define XE_PAGE_RW BIT_ULL(1)
+
+#define XE_PTE_READ_ONLY BIT(0)
#define XE_PL_SYSTEM TTM_PL_SYSTEM
#define XE_PL_TT TTM_PL_TT
diff --git a/drivers/gpu/drm/xe/xe_ggtt.c b/drivers/gpu/drm/xe/xe_ggtt.c
index 0fda9a18049b..dbc45ef084b4 100644
--- a/drivers/gpu/drm/xe/xe_ggtt.c
+++ b/drivers/gpu/drm/xe/xe_ggtt.c
@@ -30,11 +30,11 @@ u64 xe_ggtt_pte_encode(struct xe_bo *bo, u64 bo_offset)
u64 pte;
bool is_vram;
- pte = xe_bo_addr(bo, bo_offset, GEN8_PAGE_SIZE, &is_vram);
- pte |= GEN8_PAGE_PRESENT;
+ pte = xe_bo_addr(bo, bo_offset, XE_PAGE_SIZE, &is_vram);
+ pte |= XE_PAGE_PRESENT;
if (is_vram)
- pte |= GEN12_GGTT_PTE_LM;
+ pte |= XE_GGTT_PTE_LM;
/* FIXME: vfunc + pass in caching rules */
if (xe->info.platform == XE_METEORLAKE) {
@@ -56,10 +56,10 @@ static unsigned int probe_gsm_size(struct pci_dev *pdev)
void xe_ggtt_set_pte(struct xe_ggtt *ggtt, u64 addr, u64 pte)
{
- XE_BUG_ON(addr & GEN8_PTE_MASK);
+ XE_BUG_ON(addr & XE_PTE_MASK);
XE_BUG_ON(addr >= ggtt->size);
- writeq(pte, &ggtt->gsm[addr >> GEN8_PTE_SHIFT]);
+ writeq(pte, &ggtt->gsm[addr >> XE_PTE_SHIFT]);
}
static void xe_ggtt_clear(struct xe_ggtt *ggtt, u64 start, u64 size)
@@ -76,7 +76,7 @@ static void xe_ggtt_clear(struct xe_ggtt *ggtt, u64 start, u64 size)
while (start < end) {
xe_ggtt_set_pte(ggtt, start, scratch_pte);
- start += GEN8_PAGE_SIZE;
+ start += XE_PAGE_SIZE;
}
}
@@ -107,7 +107,7 @@ int xe_ggtt_init_noalloc(struct xe_gt *gt, struct xe_ggtt *ggtt)
}
ggtt->gsm = gt->mmio.regs + SZ_8M;
- ggtt->size = (gsm_size / 8) * (u64)GEN8_PAGE_SIZE;
+ ggtt->size = (gsm_size / 8) * (u64) XE_PAGE_SIZE;
if (IS_DGFX(xe) && xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K)
ggtt->flags |= XE_GGTT_FLAGS_64K;
@@ -167,7 +167,7 @@ int xe_ggtt_init(struct xe_gt *gt, struct xe_ggtt *ggtt)
else
flags |= XE_BO_CREATE_VRAM_IF_DGFX(gt);
- ggtt->scratch = xe_bo_create_pin_map(xe, gt, NULL, GEN8_PAGE_SIZE,
+ ggtt->scratch = xe_bo_create_pin_map(xe, gt, NULL, XE_PAGE_SIZE,
ttm_bo_type_kernel,
flags);
@@ -224,8 +224,8 @@ void xe_ggtt_printk(struct xe_ggtt *ggtt, const char *prefix)
scratch_pte = xe_ggtt_pte_encode(ggtt->scratch, 0);
printk("%sGlobal GTT:", prefix);
- for (addr = 0; addr < ggtt->size; addr += GEN8_PAGE_SIZE) {
- unsigned int i = addr / GEN8_PAGE_SIZE;
+ for (addr = 0; addr < ggtt->size; addr += XE_PAGE_SIZE) {
+ unsigned int i = addr / XE_PAGE_SIZE;
XE_BUG_ON(addr > U32_MAX);
if (ggtt->gsm[i] == scratch_pte)
@@ -261,7 +261,7 @@ void xe_ggtt_map_bo(struct xe_ggtt *ggtt, struct xe_bo *bo)
u64 start = bo->ggtt_node.start;
u64 offset, pte;
- for (offset = 0; offset < bo->size; offset += GEN8_PAGE_SIZE) {
+ for (offset = 0; offset < bo->size; offset += XE_PAGE_SIZE) {
pte = xe_ggtt_pte_encode(bo, offset);
xe_ggtt_set_pte(ggtt, start + offset, pte);
}
@@ -309,7 +309,7 @@ int xe_ggtt_insert_bo(struct xe_ggtt *ggtt, struct xe_bo *bo)
{
u64 alignment;
- alignment = GEN8_PAGE_SIZE;
+ alignment = XE_PAGE_SIZE;
if (xe_bo_is_vram(bo) && ggtt->flags & XE_GGTT_FLAGS_64K)
alignment = SZ_64K;
diff --git a/drivers/gpu/drm/xe/xe_migrate.c b/drivers/gpu/drm/xe/xe_migrate.c
index 2169d687ba3f..a8e66b84dc63 100644
--- a/drivers/gpu/drm/xe/xe_migrate.c
+++ b/drivers/gpu/drm/xe/xe_migrate.c
@@ -147,7 +147,7 @@ static int xe_migrate_create_cleared_bo(struct xe_migrate *m, struct xe_vm *vm)
return PTR_ERR(m->cleared_bo);
xe_map_memset(xe, &m->cleared_bo->vmap, 0, 0x00, cleared_size);
- vram_addr = xe_bo_addr(m->cleared_bo, 0, GEN8_PAGE_SIZE, &is_vram);
+ vram_addr = xe_bo_addr(m->cleared_bo, 0, XE_PAGE_SIZE, &is_vram);
XE_BUG_ON(!is_vram);
m->cleared_vram_ofs = xe_migrate_vram_ofs(vram_addr);
@@ -166,9 +166,9 @@ static int xe_migrate_prepare_vm(struct xe_gt *gt, struct xe_migrate *m,
int ret;
/* Can't bump NUM_PT_SLOTS too high */
- BUILD_BUG_ON(NUM_PT_SLOTS > SZ_2M/GEN8_PAGE_SIZE);
+ BUILD_BUG_ON(NUM_PT_SLOTS > SZ_2M/XE_PAGE_SIZE);
/* Must be a multiple of 64K to support all platforms */
- BUILD_BUG_ON(NUM_PT_SLOTS * GEN8_PAGE_SIZE % SZ_64K);
+ BUILD_BUG_ON(NUM_PT_SLOTS * XE_PAGE_SIZE % SZ_64K);
/* And one slot reserved for the 4KiB page table updates */
BUILD_BUG_ON(!(NUM_KERNEL_PDE & 1));
@@ -176,7 +176,7 @@ static int xe_migrate_prepare_vm(struct xe_gt *gt, struct xe_migrate *m,
XE_BUG_ON(m->batch_base_ofs + batch->size >= SZ_2M);
bo = xe_bo_create_pin_map(vm->xe, m->gt, vm,
- num_entries * GEN8_PAGE_SIZE,
+ num_entries * XE_PAGE_SIZE,
ttm_bo_type_kernel,
XE_BO_CREATE_VRAM_IF_DGFX(m->gt) |
XE_BO_CREATE_PINNED_BIT);
@@ -189,14 +189,14 @@ static int xe_migrate_prepare_vm(struct xe_gt *gt, struct xe_migrate *m,
return ret;
}
- entry = gen8_pde_encode(bo, bo->size - GEN8_PAGE_SIZE, XE_CACHE_WB);
+ entry = gen8_pde_encode(bo, bo->size - XE_PAGE_SIZE, XE_CACHE_WB);
xe_pt_write(xe, &vm->pt_root[id]->bo->vmap, 0, entry);
- map_ofs = (num_entries - num_level) * GEN8_PAGE_SIZE;
+ map_ofs = (num_entries - num_level) * XE_PAGE_SIZE;
/* Map the entire BO in our level 0 pt */
for (i = 0, level = 0; i < num_entries; level++) {
- entry = gen8_pte_encode(NULL, bo, i * GEN8_PAGE_SIZE,
+ entry = gen8_pte_encode(NULL, bo, i * XE_PAGE_SIZE,
XE_CACHE_WB, 0, 0);
xe_map_wr(xe, &bo->vmap, map_ofs + level * 8, u64, entry);
@@ -211,10 +211,10 @@ static int xe_migrate_prepare_vm(struct xe_gt *gt, struct xe_migrate *m,
XE_BUG_ON(xe->info.supports_usm);
/* Write out batch too */
- m->batch_base_ofs = NUM_PT_SLOTS * GEN8_PAGE_SIZE;
+ m->batch_base_ofs = NUM_PT_SLOTS * XE_PAGE_SIZE;
for (i = 0; i < batch->size;
- i += vm->flags & XE_VM_FLAGS_64K ? GEN8_64K_PAGE_SIZE :
- GEN8_PAGE_SIZE) {
+ i += vm->flags & XE_VM_FLAGS_64K ? XE_64K_PAGE_SIZE :
+ XE_PAGE_SIZE) {
entry = gen8_pte_encode(NULL, batch, i,
XE_CACHE_WB, 0, 0);
@@ -224,13 +224,13 @@ static int xe_migrate_prepare_vm(struct xe_gt *gt, struct xe_migrate *m,
}
} else {
bool is_vram;
- u64 batch_addr = xe_bo_addr(batch, 0, GEN8_PAGE_SIZE, &is_vram);
+ u64 batch_addr = xe_bo_addr(batch, 0, XE_PAGE_SIZE, &is_vram);
m->batch_base_ofs = xe_migrate_vram_ofs(batch_addr);
if (xe->info.supports_usm) {
batch = gt->usm.bb_pool->bo;
- batch_addr = xe_bo_addr(batch, 0, GEN8_PAGE_SIZE,
+ batch_addr = xe_bo_addr(batch, 0, XE_PAGE_SIZE,
&is_vram);
m->usm_batch_base_ofs = xe_migrate_vram_ofs(batch_addr);
}
@@ -240,20 +240,20 @@ static int xe_migrate_prepare_vm(struct xe_gt *gt, struct xe_migrate *m,
u32 flags = 0;
if (vm->flags & XE_VM_FLAGS_64K && level == 1)
- flags = GEN12_PDE_64K;
+ flags = XE_PDE_64K;
entry = gen8_pde_encode(bo, map_ofs + (level - 1) *
- GEN8_PAGE_SIZE, XE_CACHE_WB);
- xe_map_wr(xe, &bo->vmap, map_ofs + GEN8_PAGE_SIZE * level, u64,
+ XE_PAGE_SIZE, XE_CACHE_WB);
+ xe_map_wr(xe, &bo->vmap, map_ofs + XE_PAGE_SIZE * level, u64,
entry | flags);
}
/* Write PDE's that point to our BO. */
for (i = 0; i < num_entries - num_level; i++) {
- entry = gen8_pde_encode(bo, i * GEN8_PAGE_SIZE,
+ entry = gen8_pde_encode(bo, i * XE_PAGE_SIZE,
XE_CACHE_WB);
- xe_map_wr(xe, &bo->vmap, map_ofs + GEN8_PAGE_SIZE +
+ xe_map_wr(xe, &bo->vmap, map_ofs + XE_PAGE_SIZE +
(i + 1) * 8, u64, entry);
}
@@ -262,9 +262,9 @@ static int xe_migrate_prepare_vm(struct xe_gt *gt, struct xe_migrate *m,
u64 pos, ofs, flags;
level = 2;
- ofs = map_ofs + GEN8_PAGE_SIZE * level + 256 * 8;
- flags = GEN8_PAGE_RW | GEN8_PAGE_PRESENT | PPAT_CACHED |
- GEN12_PPGTT_PTE_LM | GEN8_PDPE_PS_1G;
+ ofs = map_ofs + XE_PAGE_SIZE * level + 256 * 8;
+ flags = XE_PAGE_RW | XE_PAGE_PRESENT | PPAT_CACHED |
+ XE_PPGTT_PTE_LM | XE_PDPE_PS_1G;
/*
* Use 1GB pages, it shouldn't matter the physical amount of
@@ -294,10 +294,10 @@ static int xe_migrate_prepare_vm(struct xe_gt *gt, struct xe_migrate *m,
* the different addresses in VM.
*/
#define NUM_VMUSA_UNIT_PER_PAGE 32
-#define VM_SA_UPDATE_UNIT_SIZE (GEN8_PAGE_SIZE / NUM_VMUSA_UNIT_PER_PAGE)
+#define VM_SA_UPDATE_UNIT_SIZE (XE_PAGE_SIZE / NUM_VMUSA_UNIT_PER_PAGE)
#define NUM_VMUSA_WRITES_PER_UNIT (VM_SA_UPDATE_UNIT_SIZE / sizeof(u64))
drm_suballoc_manager_init(&m->vm_update_sa,
- (map_ofs / GEN8_PAGE_SIZE - NUM_KERNEL_PDE) *
+ (map_ofs / XE_PAGE_SIZE - NUM_KERNEL_PDE) *
NUM_VMUSA_UNIT_PER_PAGE, 0);
m->pt_bo = bo;
@@ -403,7 +403,7 @@ static u32 pte_update_size(struct xe_migrate *m,
if (!is_vram) {
/* Clip L0 to available size */
u64 size = min(*L0, (u64)avail_pts * SZ_2M);
- u64 num_4k_pages = DIV_ROUND_UP(size, GEN8_PAGE_SIZE);
+ u64 num_4k_pages = DIV_ROUND_UP(size, XE_PAGE_SIZE);
*L0 = size;
*L0_ofs = xe_migrate_vm_addr(pt_ofs, 0);
@@ -433,7 +433,7 @@ static void emit_pte(struct xe_migrate *m,
u32 size, struct xe_bo *bo)
{
u32 ptes;
- u64 ofs = at_pt * GEN8_PAGE_SIZE;
+ u64 ofs = at_pt * XE_PAGE_SIZE;
u64 cur_ofs;
/*
@@ -443,7 +443,7 @@ static void emit_pte(struct xe_migrate *m,
* on running tests.
*/
- ptes = DIV_ROUND_UP(size, GEN8_PAGE_SIZE);
+ ptes = DIV_ROUND_UP(size, XE_PAGE_SIZE);
while (ptes) {
u32 chunk = min(0x1ffU, ptes);
@@ -466,13 +466,13 @@ static void emit_pte(struct xe_migrate *m,
if ((m->eng->vm->flags & XE_VM_FLAGS_64K) &&
!(cur_ofs & (16 * 8 - 1))) {
XE_WARN_ON(!IS_ALIGNED(addr, SZ_64K));
- addr |= GEN12_PTE_PS64;
+ addr |= XE_PTE_PS64;
}
addr += vram_region_io_offset(bo->ttm.resource);
- addr |= GEN12_PPGTT_PTE_LM;
+ addr |= XE_PPGTT_PTE_LM;
}
- addr |= PPAT_CACHED | GEN8_PAGE_PRESENT | GEN8_PAGE_RW;
+ addr |= PPAT_CACHED | XE_PAGE_PRESENT | XE_PAGE_RW;
bb->cs[bb->len++] = lower_32_bits(addr);
bb->cs[bb->len++] = upper_32_bits(addr);
@@ -697,7 +697,8 @@ struct dma_fence *xe_migrate_copy(struct xe_migrate *m,
bb->cs[bb->len++] = MI_BATCH_BUFFER_END;
update_idx = bb->len;
- emit_copy(gt, bb, src_L0_ofs, dst_L0_ofs, src_L0, GEN8_PAGE_SIZE);
+ emit_copy(gt, bb, src_L0_ofs, dst_L0_ofs, src_L0,
+ XE_PAGE_SIZE);
flush_flags = xe_migrate_ccs_copy(m, bb, src_L0_ofs, src_is_vram,
dst_L0_ofs, dst_is_vram,
src_L0, ccs_ofs, copy_ccs);
@@ -915,7 +916,7 @@ struct dma_fence *xe_migrate_clear(struct xe_migrate *m,
bb->cs[bb->len++] = MI_BATCH_BUFFER_END;
update_idx = bb->len;
- emit_clear(gt, bb, clear_L0_ofs, clear_L0, GEN8_PAGE_SIZE,
+ emit_clear(gt, bb, clear_L0_ofs, clear_L0, XE_PAGE_SIZE,
clear_vram);
if (xe_device_has_flat_ccs(xe) && clear_vram) {
emit_copy_ccs(gt, bb, clear_L0_ofs, true,
@@ -985,7 +986,7 @@ static void write_pgtable(struct xe_gt *gt, struct xe_bb *bb, u64 ppgtt_ofs,
bool is_vram;
ppgtt_ofs = xe_migrate_vram_ofs(xe_bo_addr(update->pt_bo, 0,
- GEN8_PAGE_SIZE,
+ XE_PAGE_SIZE,
&is_vram));
XE_BUG_ON(!is_vram);
}
@@ -1202,7 +1203,7 @@ xe_migrate_update_pgtables(struct xe_migrate *m,
/* Map our PT's to gtt */
bb->cs[bb->len++] = MI_STORE_DATA_IMM | BIT(21) |
(num_updates * 2 + 1);
- bb->cs[bb->len++] = ppgtt_ofs * GEN8_PAGE_SIZE + page_ofs;
+ bb->cs[bb->len++] = ppgtt_ofs * XE_PAGE_SIZE + page_ofs;
bb->cs[bb->len++] = 0; /* upper_32_bits */
for (i = 0; i < num_updates; i++) {
@@ -1220,9 +1221,9 @@ xe_migrate_update_pgtables(struct xe_migrate *m,
update_idx = bb->len;
addr = xe_migrate_vm_addr(ppgtt_ofs, 0) +
- (page_ofs / sizeof(u64)) * GEN8_PAGE_SIZE;
+ (page_ofs / sizeof(u64)) * XE_PAGE_SIZE;
for (i = 0; i < num_updates; i++)
- write_pgtable(m->gt, bb, addr + i * GEN8_PAGE_SIZE,
+ write_pgtable(m->gt, bb, addr + i * XE_PAGE_SIZE,
&updates[i], pt_update);
} else {
/* phys pages, no preamble required */
diff --git a/drivers/gpu/drm/xe/xe_pt.c b/drivers/gpu/drm/xe/xe_pt.c
index 7aa12f86e55b..f15282996c3b 100644
--- a/drivers/gpu/drm/xe/xe_pt.c
+++ b/drivers/gpu/drm/xe/xe_pt.c
@@ -63,8 +63,8 @@ u64 gen8_pde_encode(struct xe_bo *bo, u64 bo_offset,
u64 pde;
bool is_vram;
- pde = xe_bo_addr(bo, bo_offset, GEN8_PAGE_SIZE, &is_vram);
- pde |= GEN8_PAGE_PRESENT | GEN8_PAGE_RW;
+ pde = xe_bo_addr(bo, bo_offset, XE_PAGE_SIZE, &is_vram);
+ pde |= XE_PAGE_PRESENT | XE_PAGE_RW;
XE_WARN_ON(IS_DGFX(xe_bo_device(bo)) && !is_vram);
@@ -100,10 +100,10 @@ static dma_addr_t vma_addr(struct xe_vma *vma, u64 offset,
static u64 __gen8_pte_encode(u64 pte, enum xe_cache_level cache, u32 flags,
u32 pt_level)
{
- pte |= GEN8_PAGE_PRESENT | GEN8_PAGE_RW;
+ pte |= XE_PAGE_PRESENT | XE_PAGE_RW;
- if (unlikely(flags & PTE_READ_ONLY))
- pte &= ~GEN8_PAGE_RW;
+ if (unlikely(flags & XE_PTE_READ_ONLY))
+ pte &= ~XE_PAGE_RW;
/* FIXME: I don't think the PPAT handling is correct for MTL */
@@ -120,9 +120,9 @@ static u64 __gen8_pte_encode(u64 pte, enum xe_cache_level cache, u32 flags,
}
if (pt_level == 1)
- pte |= GEN8_PDE_PS_2M;
+ pte |= XE_PDE_PS_2M;
else if (pt_level == 2)
- pte |= GEN8_PDPE_PS_1G;
+ pte |= XE_PDPE_PS_1G;
/* XXX: Does hw support 1 GiB pages? */
XE_BUG_ON(pt_level > 2);
@@ -152,14 +152,14 @@ u64 gen8_pte_encode(struct xe_vma *vma, struct xe_bo *bo,
bool is_vram;
if (vma)
- pte = vma_addr(vma, offset, GEN8_PAGE_SIZE, &is_vram);
+ pte = vma_addr(vma, offset, XE_PAGE_SIZE, &is_vram);
else
- pte = xe_bo_addr(bo, offset, GEN8_PAGE_SIZE, &is_vram);
+ pte = xe_bo_addr(bo, offset, XE_PAGE_SIZE, &is_vram);
if (is_vram) {
- pte |= GEN12_PPGTT_PTE_LM;
+ pte |= XE_PPGTT_PTE_LM;
if (vma && vma->use_atomic_access_pte_bit)
- pte |= GEN12_USM_PPGTT_PTE_AE;
+ pte |= XE_USM_PPGTT_PTE_AE;
}
return __gen8_pte_encode(pte, cache, flags, pt_level);
@@ -210,7 +210,7 @@ struct xe_pt *xe_pt_create(struct xe_vm *vm, struct xe_gt *gt,
int err;
size = !level ? sizeof(struct xe_pt) : sizeof(struct xe_pt_dir) +
- GEN8_PDES * sizeof(struct xe_ptw *);
+ XE_PDES * sizeof(struct xe_ptw *);
pt = kzalloc(size, GFP_KERNEL);
if (!pt)
return ERR_PTR(-ENOMEM);
@@ -264,7 +264,7 @@ void xe_pt_populate_empty(struct xe_gt *gt, struct xe_vm *vm,
xe_map_memset(vm->xe, map, 0, 0, SZ_4K);
} else {
empty = __xe_pt_empty_pte(gt, vm, pt->level);
- for (i = 0; i < GEN8_PDES; i++)
+ for (i = 0; i < XE_PDES; i++)
xe_pt_write(vm->xe, map, i, empty);
}
}
@@ -279,7 +279,7 @@ void xe_pt_populate_empty(struct xe_gt *gt, struct xe_vm *vm,
*/
unsigned int xe_pt_shift(unsigned int level)
{
- return GEN8_PTE_SHIFT + GEN8_PDE_SHIFT * level;
+ return XE_PTE_SHIFT + XE_PDE_SHIFT * level;
}
/**
@@ -306,7 +306,7 @@ void xe_pt_destroy(struct xe_pt *pt, u32 flags, struct llist_head *deferred)
if (pt->level > 0 && pt->num_live) {
struct xe_pt_dir *pt_dir = as_xe_pt_dir(pt);
- for (i = 0; i < GEN8_PDES; i++) {
+ for (i = 0; i < XE_PDES; i++) {
if (xe_pt_entry(pt_dir, i))
xe_pt_destroy(xe_pt_entry(pt_dir, i), flags,
deferred);
@@ -488,7 +488,7 @@ xe_pt_new_shared(struct xe_walk_update *wupd, struct xe_pt *parent,
entry->qwords = 0;
if (alloc_entries) {
- entry->pt_entries = kmalloc_array(GEN8_PDES,
+ entry->pt_entries = kmalloc_array(XE_PDES,
sizeof(*entry->pt_entries),
GFP_KERNEL);
if (!entry->pt_entries)
@@ -648,7 +648,7 @@ xe_pt_stage_bind_entry(struct xe_ptw *parent, pgoff_t offset,
*/
if (level == 0 && !xe_parent->is_compact) {
if (xe_pt_is_pte_ps64K(addr, next, xe_walk))
- pte |= GEN12_PTE_PS64;
+ pte |= XE_PTE_PS64;
else if (XE_WARN_ON(xe_walk->needs_64K))
return -EINVAL;
}
@@ -698,7 +698,7 @@ xe_pt_stage_bind_entry(struct xe_ptw *parent, pgoff_t offset,
if (GRAPHICS_VERx100(xe_walk->gt->xe) >= 1250 && level == 1 &&
covers && xe_pt_scan_64K(addr, next, xe_walk)) {
walk->shifts = xe_compact_pt_shifts;
- flags |= GEN12_PDE_64K;
+ flags |= XE_PDE_64K;
xe_child->is_compact = true;
}
@@ -760,9 +760,9 @@ xe_pt_stage_bind(struct xe_gt *gt, struct xe_vma *vma,
if (is_vram) {
struct xe_gt *bo_gt = xe_bo_to_gt(bo);
- xe_walk.default_pte = GEN12_PPGTT_PTE_LM;
+ xe_walk.default_pte = XE_PPGTT_PTE_LM;
if (vma && vma->use_atomic_access_pte_bit)
- xe_walk.default_pte |= GEN12_USM_PPGTT_PTE_AE;
+ xe_walk.default_pte |= XE_USM_PPGTT_PTE_AE;
xe_walk.dma_offset = bo_gt->mem.vram.io_start -
gt_to_xe(gt)->mem.vram.io_start;
xe_walk.cache = XE_CACHE_WB;
diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
index 49aa4ddedbf2..e634bb96f9cc 100644
--- a/drivers/gpu/drm/xe/xe_vm.c
+++ b/drivers/gpu/drm/xe/xe_vm.c
@@ -59,7 +59,7 @@ int xe_vma_userptr_pin_pages(struct xe_vma *vma)
bool in_kthread = !current->mm;
unsigned long notifier_seq;
int pinned, ret, i;
- bool read_only = vma->pte_flags & PTE_READ_ONLY;
+ bool read_only = vma->pte_flags & XE_PTE_READ_ONLY;
lockdep_assert_held(&vm->lock);
XE_BUG_ON(!xe_vma_is_userptr(vma));
@@ -844,7 +844,7 @@ static struct xe_vma *xe_vma_create(struct xe_vm *vm,
vma->start = start;
vma->end = end;
if (read_only)
- vma->pte_flags = PTE_READ_ONLY;
+ vma->pte_flags = XE_PTE_READ_ONLY;
if (gt_mask) {
vma->gt_mask = gt_mask;
@@ -899,7 +899,7 @@ static void xe_vma_destroy_late(struct xe_vma *vma)
{
struct xe_vm *vm = vma->vm;
struct xe_device *xe = vm->xe;
- bool read_only = vma->pte_flags & PTE_READ_ONLY;
+ bool read_only = vma->pte_flags & XE_PTE_READ_ONLY;
if (xe_vma_is_userptr(vma)) {
if (vma->userptr.sg) {
@@ -1960,7 +1960,7 @@ int xe_vm_create_ioctl(struct drm_device *dev, void *data,
#if IS_ENABLED(CONFIG_DRM_XE_DEBUG_MEM)
/* Warning: Security issue - never enable by default */
- args->reserved[0] = xe_bo_main_addr(vm->pt_root[0]->bo, GEN8_PAGE_SIZE);
+ args->reserved[0] = xe_bo_main_addr(vm->pt_root[0]->bo, XE_PAGE_SIZE);
#endif
return 0;
@@ -2617,7 +2617,7 @@ static struct xe_vma *vm_unbind_lookup_vmas(struct xe_vm *vm,
first->userptr.ptr,
first->start,
lookup->start - 1,
- (first->pte_flags & PTE_READ_ONLY),
+ (first->pte_flags & XE_PTE_READ_ONLY),
first->gt_mask);
if (first->bo)
xe_bo_unlock(first->bo, &ww);
@@ -2648,7 +2648,7 @@ static struct xe_vma *vm_unbind_lookup_vmas(struct xe_vm *vm,
last->userptr.ptr + chunk,
last->start + chunk,
last->end,
- (last->pte_flags & PTE_READ_ONLY),
+ (last->pte_flags & XE_PTE_READ_ONLY),
last->gt_mask);
if (last->bo)
xe_bo_unlock(last->bo, &ww);
@@ -3405,7 +3405,8 @@ int xe_analyze_vm(struct drm_printer *p, struct xe_vm *vm, int gt_id)
return 0;
}
if (vm->pt_root[gt_id]) {
- addr = xe_bo_addr(vm->pt_root[gt_id]->bo, 0, GEN8_PAGE_SIZE, &is_vram);
+ addr = xe_bo_addr(vm->pt_root[gt_id]->bo, 0, XE_PAGE_SIZE,
+ &is_vram);
drm_printf(p, " VM root: A:0x%llx %s\n", addr, is_vram ? "VRAM" : "SYS");
}
@@ -3416,10 +3417,11 @@ int xe_analyze_vm(struct drm_printer *p, struct xe_vm *vm, int gt_id)
if (is_userptr) {
struct xe_res_cursor cur;
- xe_res_first_sg(vma->userptr.sg, 0, GEN8_PAGE_SIZE, &cur);
+ xe_res_first_sg(vma->userptr.sg, 0, XE_PAGE_SIZE,
+ &cur);
addr = xe_res_dma(&cur);
} else {
- addr = xe_bo_addr(vma->bo, 0, GEN8_PAGE_SIZE, &is_vram);
+ addr = xe_bo_addr(vma->bo, 0, XE_PAGE_SIZE, &is_vram);
}
drm_printf(p, " [%016llx-%016llx] S:0x%016llx A:%016llx %s\n",
vma->start, vma->end, vma->end - vma->start + 1ull,