summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
authorMatthew Auld <matthew.auld@intel.com>2023-03-08 15:30:08 +0300
committerRodrigo Vivi <rodrigo.vivi@intel.com>2023-12-20 02:29:45 +0300
commit2a8477f7614a62b41b034e3eaf017d41e8a58ce9 (patch)
tree12a231e189347f8dc3f43663e9b29fbd6a867016 /drivers
parent39fd0b4507c3ba86ef04827208dd3aa85d2d796e (diff)
downloadlinux-2a8477f7614a62b41b034e3eaf017d41e8a58ce9.tar.xz
drm/xe: s/lmem/vram/
This seems to be the preferred nomenclature in xe. Currently we are intermixing vram and lmem, which is confusing. v2 (Gwan-gyeong Mun & Lucas): - Rather apply to the entire driver Signed-off-by: Matthew Auld <matthew.auld@intel.com> Cc: Maarten Lankhorst <maarten.lankhorst@linux.intel.com> Cc: Gwan-gyeong Mun <gwan-gyeong.mun@intel.com> Cc: Lucas De Marchi <lucas.demarchi@intel.com> Acked-by: Lucas De Marchi <lucas.demarchi@intel.com> Reviewed-by: Rodrigo Vivi <rodrigo.vivi@intel.com> Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/gpu/drm/xe/Kconfig.debug2
-rw-r--r--drivers/gpu/drm/xe/tests/xe_migrate.c2
-rw-r--r--drivers/gpu/drm/xe/xe_bo.c10
-rw-r--r--drivers/gpu/drm/xe/xe_bo.h6
-rw-r--r--drivers/gpu/drm/xe/xe_ggtt.c6
-rw-r--r--drivers/gpu/drm/xe/xe_migrate.c12
-rw-r--r--drivers/gpu/drm/xe/xe_mmio.c40
-rw-r--r--drivers/gpu/drm/xe/xe_module.c6
-rw-r--r--drivers/gpu/drm/xe/xe_module.h2
-rw-r--r--drivers/gpu/drm/xe/xe_pt.c12
-rw-r--r--drivers/gpu/drm/xe/xe_vm.c10
11 files changed, 54 insertions, 54 deletions
diff --git a/drivers/gpu/drm/xe/Kconfig.debug b/drivers/gpu/drm/xe/Kconfig.debug
index 565be3f6b9b9..93b284cdd0a2 100644
--- a/drivers/gpu/drm/xe/Kconfig.debug
+++ b/drivers/gpu/drm/xe/Kconfig.debug
@@ -41,7 +41,7 @@ config DRM_XE_DEBUG_VM
If in doubt, say "N".
config DRM_XE_DEBUG_MEM
- bool "Enable passing SYS/LMEM addresses to user space"
+ bool "Enable passing SYS/VRAM addresses to user space"
default n
help
Pass object location trough uapi. Intended for extended
diff --git a/drivers/gpu/drm/xe/tests/xe_migrate.c b/drivers/gpu/drm/xe/tests/xe_migrate.c
index b7e4a126e8b7..ac659b94e7f5 100644
--- a/drivers/gpu/drm/xe/tests/xe_migrate.c
+++ b/drivers/gpu/drm/xe/tests/xe_migrate.c
@@ -129,7 +129,7 @@ static void test_copy(struct xe_migrate *m, struct xe_bo *bo,
}
dma_fence_put(fence);
- /* Try to copy 0xc0 from sysmem to lmem with 2MB or 64KiB/4KiB pages */
+ /* Try to copy 0xc0 from sysmem to vram with 2MB or 64KiB/4KiB pages */
xe_map_memset(xe, &sysmem->vmap, 0, 0xc0, sysmem->size);
xe_map_memset(xe, &bo->vmap, 0, 0xd0, bo->size);
diff --git a/drivers/gpu/drm/xe/xe_bo.c b/drivers/gpu/drm/xe/xe_bo.c
index 09b8db6d7ba3..cfb79519b673 100644
--- a/drivers/gpu/drm/xe/xe_bo.c
+++ b/drivers/gpu/drm/xe/xe_bo.c
@@ -1299,12 +1299,12 @@ int xe_bo_pin(struct xe_bo *bo)
if (IS_DGFX(xe) && !(IS_ENABLED(CONFIG_DRM_XE_DEBUG) &&
bo->flags & XE_BO_INTERNAL_TEST)) {
struct ttm_place *place = &(bo->placements[0]);
- bool lmem;
+ bool vram;
if (mem_type_is_vram(place->mem_type)) {
XE_BUG_ON(!(place->flags & TTM_PL_FLAG_CONTIGUOUS));
- place->fpfn = (xe_bo_addr(bo, 0, PAGE_SIZE, &lmem) -
+ place->fpfn = (xe_bo_addr(bo, 0, PAGE_SIZE, &vram) -
vram_region_io_offset(bo)) >> PAGE_SHIFT;
place->lpfn = place->fpfn + (bo->size >> PAGE_SHIFT);
@@ -1424,7 +1424,7 @@ bool xe_bo_is_xe_bo(struct ttm_buffer_object *bo)
}
dma_addr_t xe_bo_addr(struct xe_bo *bo, u64 offset,
- size_t page_size, bool *is_lmem)
+ size_t page_size, bool *is_vram)
{
struct xe_res_cursor cur;
u64 page;
@@ -1436,9 +1436,9 @@ dma_addr_t xe_bo_addr(struct xe_bo *bo, u64 offset,
page = offset >> PAGE_SHIFT;
offset &= (PAGE_SIZE - 1);
- *is_lmem = xe_bo_is_vram(bo);
+ *is_vram = xe_bo_is_vram(bo);
- if (!*is_lmem && !xe_bo_is_stolen(bo)) {
+ if (!*is_vram && !xe_bo_is_stolen(bo)) {
XE_BUG_ON(!bo->ttm.ttm);
xe_res_first_sg(xe_bo_get_sg(bo), page << PAGE_SHIFT,
diff --git a/drivers/gpu/drm/xe/xe_bo.h b/drivers/gpu/drm/xe/xe_bo.h
index 8c2cdbe51ab5..4350845542bf 100644
--- a/drivers/gpu/drm/xe/xe_bo.h
+++ b/drivers/gpu/drm/xe/xe_bo.h
@@ -196,14 +196,14 @@ static inline void xe_bo_unpin_map_no_vm(struct xe_bo *bo)
bool xe_bo_is_xe_bo(struct ttm_buffer_object *bo);
dma_addr_t xe_bo_addr(struct xe_bo *bo, u64 offset,
- size_t page_size, bool *is_lmem);
+ size_t page_size, bool *is_vram);
static inline dma_addr_t
xe_bo_main_addr(struct xe_bo *bo, size_t page_size)
{
- bool is_lmem;
+ bool is_vram;
- return xe_bo_addr(bo, 0, page_size, &is_lmem);
+ return xe_bo_addr(bo, 0, page_size, &is_vram);
}
static inline u32
diff --git a/drivers/gpu/drm/xe/xe_ggtt.c b/drivers/gpu/drm/xe/xe_ggtt.c
index d6ebc1d77f4d..99bc9036c7a0 100644
--- a/drivers/gpu/drm/xe/xe_ggtt.c
+++ b/drivers/gpu/drm/xe/xe_ggtt.c
@@ -28,12 +28,12 @@ u64 xe_ggtt_pte_encode(struct xe_bo *bo, u64 bo_offset)
{
struct xe_device *xe = xe_bo_device(bo);
u64 pte;
- bool is_lmem;
+ bool is_vram;
- pte = xe_bo_addr(bo, bo_offset, GEN8_PAGE_SIZE, &is_lmem);
+ pte = xe_bo_addr(bo, bo_offset, GEN8_PAGE_SIZE, &is_vram);
pte |= GEN8_PAGE_PRESENT;
- if (is_lmem)
+ if (is_vram)
pte |= GEN12_GGTT_PTE_LM;
/* FIXME: vfunc + pass in caching rules */
diff --git a/drivers/gpu/drm/xe/xe_migrate.c b/drivers/gpu/drm/xe/xe_migrate.c
index 79aa3508ae3e..4a9fe1f7128d 100644
--- a/drivers/gpu/drm/xe/xe_migrate.c
+++ b/drivers/gpu/drm/xe/xe_migrate.c
@@ -222,15 +222,15 @@ static int xe_migrate_prepare_vm(struct xe_gt *gt, struct xe_migrate *m,
level++;
}
} else {
- bool is_lmem;
- u64 batch_addr = xe_bo_addr(batch, 0, GEN8_PAGE_SIZE, &is_lmem);
+ bool is_vram;
+ u64 batch_addr = xe_bo_addr(batch, 0, GEN8_PAGE_SIZE, &is_vram);
m->batch_base_ofs = xe_migrate_vram_ofs(batch_addr);
if (xe->info.supports_usm) {
batch = gt->usm.bb_pool.bo;
batch_addr = xe_bo_addr(batch, 0, GEN8_PAGE_SIZE,
- &is_lmem);
+ &is_vram);
m->usm_batch_base_ofs = xe_migrate_vram_ofs(batch_addr);
}
}
@@ -933,12 +933,12 @@ static void write_pgtable(struct xe_gt *gt, struct xe_bb *bb, u64 ppgtt_ofs,
*/
XE_BUG_ON(update->qwords > 0x1ff);
if (!ppgtt_ofs) {
- bool is_lmem;
+ bool is_vram;
ppgtt_ofs = xe_migrate_vram_ofs(xe_bo_addr(update->pt_bo, 0,
GEN8_PAGE_SIZE,
- &is_lmem));
- XE_BUG_ON(!is_lmem);
+ &is_vram));
+ XE_BUG_ON(!is_vram);
}
do {
diff --git a/drivers/gpu/drm/xe/xe_mmio.c b/drivers/gpu/drm/xe/xe_mmio.c
index 65b0df9bb579..e5bd4609aaee 100644
--- a/drivers/gpu/drm/xe/xe_mmio.c
+++ b/drivers/gpu/drm/xe/xe_mmio.c
@@ -68,7 +68,7 @@ _resize_bar(struct xe_device *xe, int resno, resource_size_t size)
return 1;
}
-static int xe_resize_lmem_bar(struct xe_device *xe, resource_size_t lmem_size)
+static int xe_resize_vram_bar(struct xe_device *xe, resource_size_t vram_size)
{
struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
struct pci_bus *root = pdev->bus;
@@ -78,31 +78,31 @@ static int xe_resize_lmem_bar(struct xe_device *xe, resource_size_t lmem_size)
u32 pci_cmd;
int i;
int ret;
- u64 force_lmem_bar_size = xe_force_lmem_bar_size;
+ u64 force_vram_bar_size = xe_force_vram_bar_size;
current_size = roundup_pow_of_two(pci_resource_len(pdev, GEN12_LMEM_BAR));
- if (force_lmem_bar_size) {
+ if (force_vram_bar_size) {
u32 bar_sizes;
- rebar_size = force_lmem_bar_size * (resource_size_t)SZ_1M;
+ rebar_size = force_vram_bar_size * (resource_size_t)SZ_1M;
bar_sizes = pci_rebar_get_possible_sizes(pdev, GEN12_LMEM_BAR);
if (rebar_size == current_size)
return 0;
if (!(bar_sizes & BIT(pci_rebar_bytes_to_size(rebar_size))) ||
- rebar_size >= roundup_pow_of_two(lmem_size)) {
- rebar_size = lmem_size;
+ rebar_size >= roundup_pow_of_two(vram_size)) {
+ rebar_size = vram_size;
drm_info(&xe->drm,
"Given bar size is not within supported size, setting it to default: %llu\n",
- (u64)lmem_size >> 20);
+ (u64)vram_size >> 20);
}
} else {
rebar_size = current_size;
- if (rebar_size != roundup_pow_of_two(lmem_size))
- rebar_size = lmem_size;
+ if (rebar_size != roundup_pow_of_two(vram_size))
+ rebar_size = vram_size;
else
return 0;
}
@@ -117,7 +117,7 @@ static int xe_resize_lmem_bar(struct xe_device *xe, resource_size_t lmem_size)
}
if (!root_res) {
- drm_info(&xe->drm, "Can't resize LMEM BAR - platform support is missing\n");
+ drm_info(&xe->drm, "Can't resize VRAM BAR - platform support is missing\n");
return -1;
}
@@ -168,7 +168,7 @@ int xe_mmio_total_vram_size(struct xe_device *xe, u64 *vram_size, u64 *usable_si
if (usable_size) {
reg = xe_gt_mcr_unicast_read_any(gt, XEHP_FLAT_CCS_BASE_ADDR);
*usable_size = (u64)REG_FIELD_GET(GENMASK(31, 8), reg) * SZ_64K;
- drm_info(&xe->drm, "lmem_size: 0x%llx usable_size: 0x%llx\n",
+ drm_info(&xe->drm, "vram_size: 0x%llx usable_size: 0x%llx\n",
*vram_size, *usable_size);
}
@@ -180,7 +180,7 @@ int xe_mmio_probe_vram(struct xe_device *xe)
struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
struct xe_gt *gt;
u8 id;
- u64 lmem_size;
+ u64 vram_size;
u64 original_size;
u64 current_size;
u64 usable_size;
@@ -207,29 +207,29 @@ int xe_mmio_probe_vram(struct xe_device *xe)
gt = xe_device_get_gt(xe, 0);
original_size = pci_resource_len(pdev, GEN12_LMEM_BAR);
- err = xe_mmio_total_vram_size(xe, &lmem_size, &usable_size);
+ err = xe_mmio_total_vram_size(xe, &vram_size, &usable_size);
if (err)
return err;
- resize_result = xe_resize_lmem_bar(xe, lmem_size);
+ resize_result = xe_resize_vram_bar(xe, vram_size);
current_size = pci_resource_len(pdev, GEN12_LMEM_BAR);
xe->mem.vram.io_start = pci_resource_start(pdev, GEN12_LMEM_BAR);
- xe->mem.vram.size = min(current_size, lmem_size);
+ xe->mem.vram.size = min(current_size, vram_size);
if (!xe->mem.vram.size)
return -EIO;
if (resize_result > 0)
- drm_info(&xe->drm, "Successfully resize LMEM from %lluMiB to %lluMiB\n",
+ drm_info(&xe->drm, "Successfully resize VRAM from %lluMiB to %lluMiB\n",
(u64)original_size >> 20,
(u64)current_size >> 20);
- else if (xe->mem.vram.size < lmem_size && !xe_force_lmem_bar_size)
+ else if (xe->mem.vram.size < vram_size && !xe_force_vram_bar_size)
drm_info(&xe->drm, "Using a reduced BAR size of %lluMiB. Consider enabling 'Resizable BAR' support in your BIOS.\n",
(u64)xe->mem.vram.size >> 20);
- if (xe->mem.vram.size < lmem_size)
+ if (xe->mem.vram.size < vram_size)
drm_warn(&xe->drm, "Restricting VRAM size to PCI resource size (0x%llx->0x%llx)\n",
- lmem_size, (u64)xe->mem.vram.size);
+ vram_size, (u64)xe->mem.vram.size);
xe->mem.vram.mapping = ioremap_wc(xe->mem.vram.io_start, xe->mem.vram.size);
xe->mem.vram.size = min_t(u64, xe->mem.vram.size, usable_size);
@@ -360,7 +360,7 @@ int xe_mmio_init(struct xe_device *xe)
* and we should not continue with driver initialization.
*/
if (IS_DGFX(xe) && !(xe_mmio_read32(gt, GU_CNTL.reg) & LMEM_INIT)) {
- drm_err(&xe->drm, "LMEM not initialized by firmware\n");
+ drm_err(&xe->drm, "VRAM not initialized by firmware\n");
return -ENODEV;
}
diff --git a/drivers/gpu/drm/xe/xe_module.c b/drivers/gpu/drm/xe/xe_module.c
index 3f5d03a58696..e8ee7a9b0878 100644
--- a/drivers/gpu/drm/xe/xe_module.c
+++ b/drivers/gpu/drm/xe/xe_module.c
@@ -18,9 +18,9 @@ bool enable_guc = true;
module_param_named_unsafe(enable_guc, enable_guc, bool, 0444);
MODULE_PARM_DESC(enable_guc, "Enable GuC submission");
-u32 xe_force_lmem_bar_size;
-module_param_named(lmem_bar_size, xe_force_lmem_bar_size, uint, 0600);
-MODULE_PARM_DESC(lmem_bar_size, "Set the lmem bar size(in MiB)");
+u32 xe_force_vram_bar_size;
+module_param_named(vram_bar_size, xe_force_vram_bar_size, uint, 0600);
+MODULE_PARM_DESC(vram_bar_size, "Set the vram bar size(in MiB)");
int xe_guc_log_level = 5;
module_param_named(guc_log_level, xe_guc_log_level, int, 0600);
diff --git a/drivers/gpu/drm/xe/xe_module.h b/drivers/gpu/drm/xe/xe_module.h
index 2c6ee46f5595..86916c176382 100644
--- a/drivers/gpu/drm/xe/xe_module.h
+++ b/drivers/gpu/drm/xe/xe_module.h
@@ -8,6 +8,6 @@
/* Module modprobe variables */
extern bool enable_guc;
extern bool enable_display;
-extern u32 xe_force_lmem_bar_size;
+extern u32 xe_force_vram_bar_size;
extern int xe_guc_log_level;
extern char *xe_param_force_probe;
diff --git a/drivers/gpu/drm/xe/xe_pt.c b/drivers/gpu/drm/xe/xe_pt.c
index 00d9fff53828..64da98152455 100644
--- a/drivers/gpu/drm/xe/xe_pt.c
+++ b/drivers/gpu/drm/xe/xe_pt.c
@@ -61,12 +61,12 @@ u64 gen8_pde_encode(struct xe_bo *bo, u64 bo_offset,
const enum xe_cache_level level)
{
u64 pde;
- bool is_lmem;
+ bool is_vram;
- pde = xe_bo_addr(bo, bo_offset, GEN8_PAGE_SIZE, &is_lmem);
+ pde = xe_bo_addr(bo, bo_offset, GEN8_PAGE_SIZE, &is_vram);
pde |= GEN8_PAGE_PRESENT | GEN8_PAGE_RW;
- XE_WARN_ON(IS_DGFX(xe_bo_device(bo)) && !is_lmem);
+ XE_WARN_ON(IS_DGFX(xe_bo_device(bo)) && !is_vram);
/* FIXME: I don't think the PPAT handling is correct for MTL */
@@ -79,13 +79,13 @@ u64 gen8_pde_encode(struct xe_bo *bo, u64 bo_offset,
}
static dma_addr_t vma_addr(struct xe_vma *vma, u64 offset,
- size_t page_size, bool *is_lmem)
+ size_t page_size, bool *is_vram)
{
if (xe_vma_is_userptr(vma)) {
struct xe_res_cursor cur;
u64 page;
- *is_lmem = false;
+ *is_vram = false;
page = offset >> PAGE_SHIFT;
offset &= (PAGE_SIZE - 1);
@@ -93,7 +93,7 @@ static dma_addr_t vma_addr(struct xe_vma *vma, u64 offset,
&cur);
return xe_res_dma(&cur) + offset;
} else {
- return xe_bo_addr(vma->bo, offset, page_size, is_lmem);
+ return xe_bo_addr(vma->bo, offset, page_size, is_vram);
}
}
diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
index fcac31f11706..a8254a4148f7 100644
--- a/drivers/gpu/drm/xe/xe_vm.c
+++ b/drivers/gpu/drm/xe/xe_vm.c
@@ -3379,7 +3379,7 @@ int xe_vm_invalidate_vma(struct xe_vma *vma)
int xe_analyze_vm(struct drm_printer *p, struct xe_vm *vm, int gt_id)
{
struct rb_node *node;
- bool is_lmem;
+ bool is_vram;
uint64_t addr;
if (!down_read_trylock(&vm->lock)) {
@@ -3387,8 +3387,8 @@ int xe_analyze_vm(struct drm_printer *p, struct xe_vm *vm, int gt_id)
return 0;
}
if (vm->pt_root[gt_id]) {
- addr = xe_bo_addr(vm->pt_root[gt_id]->bo, 0, GEN8_PAGE_SIZE, &is_lmem);
- drm_printf(p, " VM root: A:0x%llx %s\n", addr, is_lmem ? "LMEM" : "SYS");
+ addr = xe_bo_addr(vm->pt_root[gt_id]->bo, 0, GEN8_PAGE_SIZE, &is_vram);
+ drm_printf(p, " VM root: A:0x%llx %s\n", addr, is_vram ? "VRAM" : "SYS");
}
for (node = rb_first(&vm->vmas); node; node = rb_next(node)) {
@@ -3401,11 +3401,11 @@ int xe_analyze_vm(struct drm_printer *p, struct xe_vm *vm, int gt_id)
xe_res_first_sg(vma->userptr.sg, 0, GEN8_PAGE_SIZE, &cur);
addr = xe_res_dma(&cur);
} else {
- addr = xe_bo_addr(vma->bo, 0, GEN8_PAGE_SIZE, &is_lmem);
+ addr = xe_bo_addr(vma->bo, 0, GEN8_PAGE_SIZE, &is_vram);
}
drm_printf(p, " [%016llx-%016llx] S:0x%016llx A:%016llx %s\n",
vma->start, vma->end, vma->end - vma->start + 1ull,
- addr, is_userptr ? "USR" : is_lmem ? "VRAM" : "SYS");
+ addr, is_userptr ? "USR" : is_vram ? "VRAM" : "SYS");
}
up_read(&vm->lock);