diff options
-rw-r--r-- | drivers/gpu/drm/xe/xe_bo.c | 15 | ||||
-rw-r--r-- | drivers/gpu/drm/xe/xe_bo.h | 5 | ||||
-rw-r--r-- | drivers/gpu/drm/xe/xe_ggtt.c | 4 | ||||
-rw-r--r-- | drivers/gpu/drm/xe/xe_migrate.c | 4 | ||||
-rw-r--r-- | drivers/gpu/drm/xe/xe_pt.c | 13 |
5 files changed, 29 insertions, 12 deletions
diff --git a/drivers/gpu/drm/xe/xe_bo.c b/drivers/gpu/drm/xe/xe_bo.c index d4e60a96ed64..65b56e7a2fde 100644 --- a/drivers/gpu/drm/xe/xe_bo.c +++ b/drivers/gpu/drm/xe/xe_bo.c @@ -80,6 +80,21 @@ bool xe_bo_is_stolen(struct xe_bo *bo) return bo->ttm.resource->mem_type == XE_PL_STOLEN; } +/** + * xe_bo_is_stolen_devmem - check if BO is of stolen type accessed via PCI BAR + * @bo: The BO + * + * The stolen memory is accessed through the PCI BAR for both DGFX and some + * integrated platforms that have a dedicated bit in the PTE for devmem (DM). + * + * Returns: true if it's stolen memory accessed via PCI BAR, false otherwise. + */ +bool xe_bo_is_stolen_devmem(struct xe_bo *bo) +{ + return xe_bo_is_stolen(bo) && + GRAPHICS_VERx100(xe_bo_device(bo)) >= 1270; +} + static bool xe_bo_is_user(struct xe_bo *bo) { return bo->flags & XE_BO_CREATE_USER_BIT; diff --git a/drivers/gpu/drm/xe/xe_bo.h b/drivers/gpu/drm/xe/xe_bo.h index b8817e13aeeb..a9a32d680208 100644 --- a/drivers/gpu/drm/xe/xe_bo.h +++ b/drivers/gpu/drm/xe/xe_bo.h @@ -67,9 +67,9 @@ #define XE_PDPE_PS_1G BIT_ULL(7) #define XE_PDE_IPS_64K BIT_ULL(11) -#define XE_GGTT_PTE_LM BIT_ULL(1) +#define XE_GGTT_PTE_DM BIT_ULL(1) #define XE_USM_PPGTT_PTE_AE BIT_ULL(10) -#define XE_PPGTT_PTE_LM BIT_ULL(11) +#define XE_PPGTT_PTE_DM BIT_ULL(11) #define XE_PDE_64K BIT_ULL(6) #define XE_PTE_PS64 BIT_ULL(8) #define XE_PTE_NULL BIT_ULL(9) @@ -239,6 +239,7 @@ void xe_bo_vunmap(struct xe_bo *bo); bool mem_type_is_vram(u32 mem_type); bool xe_bo_is_vram(struct xe_bo *bo); bool xe_bo_is_stolen(struct xe_bo *bo); +bool xe_bo_is_stolen_devmem(struct xe_bo *bo); uint64_t vram_region_gpu_offset(struct ttm_resource *res); bool xe_bo_can_migrate(struct xe_bo *bo, u32 mem_type); diff --git a/drivers/gpu/drm/xe/xe_ggtt.c b/drivers/gpu/drm/xe/xe_ggtt.c index 3eea65bd1bcd..bf46b90a76ad 100644 --- a/drivers/gpu/drm/xe/xe_ggtt.c +++ b/drivers/gpu/drm/xe/xe_ggtt.c @@ -35,8 +35,8 @@ u64 xe_ggtt_pte_encode(struct xe_bo *bo, u64 bo_offset) pte = xe_bo_addr(bo, bo_offset, XE_PAGE_SIZE); pte |= XE_PAGE_PRESENT; - if (xe_bo_is_vram(bo)) - pte |= XE_GGTT_PTE_LM; + if (xe_bo_is_vram(bo) || xe_bo_is_stolen_devmem(bo)) + pte |= XE_GGTT_PTE_DM; /* FIXME: vfunc + pass in caching rules */ if (xe->info.platform == XE_METEORLAKE) { diff --git a/drivers/gpu/drm/xe/xe_migrate.c b/drivers/gpu/drm/xe/xe_migrate.c index 03f50a14c5c2..0405136bc0b1 100644 --- a/drivers/gpu/drm/xe/xe_migrate.c +++ b/drivers/gpu/drm/xe/xe_migrate.c @@ -258,7 +258,7 @@ static int xe_migrate_prepare_vm(struct xe_tile *tile, struct xe_migrate *m, level = 2; ofs = map_ofs + XE_PAGE_SIZE * level + 256 * 8; flags = XE_PAGE_RW | XE_PAGE_PRESENT | PPAT_CACHED | - XE_PPGTT_PTE_LM | XE_PDPE_PS_1G; + XE_PPGTT_PTE_DM | XE_PDPE_PS_1G; /* * Use 1GB pages, it shouldn't matter the physical amount of @@ -463,7 +463,7 @@ static void emit_pte(struct xe_migrate *m, } addr += vram_region_gpu_offset(bo->ttm.resource); - addr |= XE_PPGTT_PTE_LM; + addr |= XE_PPGTT_PTE_DM; } addr |= PPAT_CACHED | XE_PAGE_PRESENT | XE_PAGE_RW; bb->cs[bb->len++] = lower_32_bits(addr); diff --git a/drivers/gpu/drm/xe/xe_pt.c b/drivers/gpu/drm/xe/xe_pt.c index f69f7dbaca55..d9192bf50362 100644 --- a/drivers/gpu/drm/xe/xe_pt.c +++ b/drivers/gpu/drm/xe/xe_pt.c @@ -127,8 +127,8 @@ u64 xe_pte_encode(struct xe_bo *bo, u64 offset, enum xe_cache_level cache, u64 pte; pte = xe_bo_addr(bo, offset, XE_PAGE_SIZE); - if (xe_bo_is_vram(bo)) - pte |= XE_PPGTT_PTE_LM; + if (xe_bo_is_vram(bo) || xe_bo_is_stolen_devmem(bo)) + pte |= XE_PPGTT_PTE_DM; return __pte_encode(pte, cache, NULL, pt_level); } @@ -714,7 +714,8 @@ xe_pt_stage_bind(struct xe_tile *tile, struct xe_vma *vma, struct xe_vm_pgtable_update *entries, u32 *num_entries) { struct xe_bo *bo = xe_vma_bo(vma); - bool is_vram = !xe_vma_is_userptr(vma) && bo && xe_bo_is_vram(bo); + bool is_devmem = !xe_vma_is_userptr(vma) && bo && + (xe_bo_is_vram(bo) || xe_bo_is_stolen_devmem(bo)); struct xe_res_cursor curs; struct xe_pt_stage_bind_walk xe_walk = { .base = { @@ -728,13 +729,13 @@ xe_pt_stage_bind(struct xe_tile *tile, struct xe_vma *vma, .va_curs_start = xe_vma_start(vma), .vma = vma, .wupd.entries = entries, - .needs_64K = (xe_vma_vm(vma)->flags & XE_VM_FLAG_64K) && is_vram, + .needs_64K = (xe_vma_vm(vma)->flags & XE_VM_FLAG_64K) && is_devmem, }; struct xe_pt *pt = xe_vma_vm(vma)->pt_root[tile->id]; int ret; - if (is_vram) { - xe_walk.default_pte = XE_PPGTT_PTE_LM; + if (is_devmem) { + xe_walk.default_pte = XE_PPGTT_PTE_DM; if (vma && vma->gpuva.flags & XE_VMA_ATOMIC_PTE_BIT) xe_walk.default_pte |= XE_USM_PPGTT_PTE_AE; xe_walk.dma_offset = vram_region_gpu_offset(bo->ttm.resource); |