From 7b1a8a5fcee4a85be1f540ac0e09761d421e562d Mon Sep 17 00:00:00 2001 From: José Roberto de Souza Date: Thu, 4 Jan 2024 08:18:32 -0800 Subject: drm/xe: Fix definition of intel_wakeref_t MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit i915 defines it as unsigned long so Xe should do the same to avoid compilation warnings: CC [M] drivers/gpu/drm/i915/i915_gem.o CC [M] drivers/gpu/drm/xe/i915-display/intel_display_power_well.o In file included from ./include/drm/drm_mm.h:51, from drivers/gpu/drm/xe/xe_bo_types.h:11, from drivers/gpu/drm/xe/xe_bo.h:11, from ./drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_object.h:11, from ./drivers/gpu/drm/xe/compat-i915-headers/i915_drv.h:15, from drivers/gpu/drm/i915/display/intel_display_power.c:8: drivers/gpu/drm/i915/display/intel_display_power.c: In function ‘print_async_put_domains_state’: drivers/gpu/drm/i915/display/intel_display_power.c:408:29: warning: format ‘%lu’ expects argument of type ‘long unsigned int’, but argument 5 has type ‘int’ [-Wformat=] 408 | drm_dbg(&i915->drm, "async_put_wakeref %lu\n", | ^~~~~~~~~~~~~~~~~~~~~~~~~ 409 | power_domains->async_put_wakeref); | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ | | | int ./include/drm/drm_print.h:410:39: note: in definition of macro ‘drm_dev_dbg’ 410 | __drm_dev_dbg(NULL, dev, cat, fmt, ##__VA_ARGS__) | ^~~ ./include/drm/drm_print.h:510:33: note: in expansion of macro ‘drm_dbg_driver’ 510 | #define drm_dbg(drm, fmt, ...) drm_dbg_driver(drm, fmt, ##__VA_ARGS__) | ^~~~~~~~~~~~~~ drivers/gpu/drm/i915/display/intel_display_power.c:408:9: note: in expansion of macro ‘drm_dbg’ 408 | drm_dbg(&i915->drm, "async_put_wakeref %lu\n", | ^~~~~~~ drivers/gpu/drm/i915/display/intel_display_power.c:408:50: note: format string is defined here 408 | drm_dbg(&i915->drm, "async_put_wakeref %lu\n", | ~~^ | | | long unsigned int | %u CC [M] drivers/gpu/drm/i915/i915_gem_evict.o CC [M] drivers/gpu/drm/i915/i915_gem_gtt.o CC [M] drivers/gpu/drm/xe/i915-display/intel_display_trace.o CC [M] drivers/gpu/drm/xe/i915-display/intel_display_wa.o CC [M] drivers/gpu/drm/i915/i915_query.o Fixes: 44e694958b95 ("drm/xe/display: Implement display support") Cc: Maarten Lankhorst Reviewed-by: Jani Nikula Signed-off-by: José Roberto de Souza (cherry picked from commit fdbadf504375886a0320ac6f84c850322a6b32e1) Signed-off-by: Thomas Hellström --- drivers/gpu/drm/xe/compat-i915-headers/intel_wakeref.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/xe/compat-i915-headers/intel_wakeref.h b/drivers/gpu/drm/xe/compat-i915-headers/intel_wakeref.h index 1c5e30cf10ca..ecb1c0707706 100644 --- a/drivers/gpu/drm/xe/compat-i915-headers/intel_wakeref.h +++ b/drivers/gpu/drm/xe/compat-i915-headers/intel_wakeref.h @@ -5,4 +5,4 @@ #include -typedef bool intel_wakeref_t; +typedef unsigned long intel_wakeref_t; -- cgit v1.2.3 From 56c253daabc8bd9dfbae52c3d9e0dd34977347a6 Mon Sep 17 00:00:00 2001 From: Matthew Brost Date: Thu, 4 Jan 2024 00:00:39 -0800 Subject: drm/xe: Fix exec IOCTL long running exec queue ring full condition MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The intent is to return -EWOULDBLOCK to the user if a long running exec queue is full during the exec IOCTL. -EWOULDBLOCK aliases to -EAGAIN which results in the exec IOCTL doing a retry loop. Fix this by ensuring the retry loop is broken when returning -EWOULDBLOCK. Fixes: 8ae8a2e8dd21 ("drm/xe: Long running job update") Reported-by: Sai Gowtham Ch Signed-off-by: Matthew Brost Reviewed-by: Brian Welty (cherry picked from commit 97d0047cbb17318431eaf37dfe1a6855539340f9) Signed-off-by: Thomas Hellström --- drivers/gpu/drm/xe/xe_exec.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/xe/xe_exec.c b/drivers/gpu/drm/xe/xe_exec.c index d30c0d0689bc..b853feed9ccc 100644 --- a/drivers/gpu/drm/xe/xe_exec.c +++ b/drivers/gpu/drm/xe/xe_exec.c @@ -115,7 +115,7 @@ int xe_exec_ioctl(struct drm_device *dev, void *data, struct drm_file *file) struct xe_sched_job *job; struct dma_fence *rebind_fence; struct xe_vm *vm; - bool write_locked; + bool write_locked, skip_retry = false; ktime_t end = 0; int err = 0; @@ -227,7 +227,8 @@ retry: } if (xe_exec_queue_is_lr(q) && xe_exec_queue_ring_full(q)) { - err = -EWOULDBLOCK; + err = -EWOULDBLOCK; /* Aliased to -EAGAIN */ + skip_retry = true; goto err_exec; } @@ -337,7 +338,7 @@ err_unlock_list: up_write(&vm->lock); else up_read(&vm->lock); - if (err == -EAGAIN) + if (err == -EAGAIN && !skip_retry) goto retry; err_syncs: for (i = 0; i < num_syncs; i++) -- cgit v1.2.3 From 457f4439833487acb18abdd55e95fbb17d43fdca Mon Sep 17 00:00:00 2001 From: Thomas Hellström Date: Fri, 22 Dec 2023 18:59:04 +0100 Subject: drm/xe/vm: Fix an error path MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit If using the VM_BIND_OP_UNMAP_ALL without any bound vmas for the vm, we will end up dereferencing an uninitialized variable and leak a bo lock. Fix this. v2: - Updated commit message (Lucas De Marchi) Reported-by: Dafna Hirschfeld Closes: https://lore.kernel.org/intel-xe/jrwua7ckbiozfcaodx4gg2h4taiuxs53j5zlpf3qzvyhyiyl2d@pbs3plurokrj/ Suggested-by: Dafna Hirschfeld Fixes: b06d47be7c83 ("drm/xe: Port Xe to GPUVA") Signed-off-by: Thomas Hellström Acked-by: Lucas De Marchi Reviewed-by: Lucas De Marchi Link: https://patchwork.freedesktop.org/patch/msgid/20231222175904.16732-1-thomas.hellstrom@linux.intel.com (cherry picked from commit 9d0c1c5618be02c5acda7e6bbb728007b0632984) Signed-off-by: Thomas Hellström --- drivers/gpu/drm/xe/xe_vm.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c index 0cfe7289b97e..b0e3cab6a584 100644 --- a/drivers/gpu/drm/xe/xe_vm.c +++ b/drivers/gpu/drm/xe/xe_vm.c @@ -2063,9 +2063,11 @@ vm_bind_ioctl_ops_create(struct xe_vm *vm, struct xe_bo *bo, if (err) return ERR_PTR(err); - vm_bo = drm_gpuvm_bo_find(&vm->gpuvm, obj); - if (!vm_bo) - break; + vm_bo = drm_gpuvm_bo_obtain(&vm->gpuvm, obj); + if (IS_ERR(vm_bo)) { + xe_bo_unlock(bo); + return ERR_CAST(vm_bo); + } ops = drm_gpuvm_bo_unmap_ops_create(vm_bo); drm_gpuvm_bo_put(vm_bo); -- cgit v1.2.3 From 3ec276d06698189506f508f87c0f4f17c11e0251 Mon Sep 17 00:00:00 2001 From: Thomas Hellström Date: Tue, 9 Jan 2024 12:24:02 +0100 Subject: drm/xe: Use __iomem for the regs pointer MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The regs pointer points to IO memory. Annotate it properly and fix the corresponding sparse warning. Fixes: a4e2f3a299ea ("drm/xe: refactor xe_mmio_probe_tiles to support MMIO extension") Cc: Koby Elbaz Cc: Ofir Bitton Cc: Moti Haimovski Cc: Rodrigo Vivi Signed-off-by: Thomas Hellström Reviewed-by: Lucas De Marchi Link: https://patchwork.freedesktop.org/patch/msgid/20240109112405.108136-2-thomas.hellstrom@linux.intel.com (cherry picked from commit 9d03bf30e78673d827484bbc17a6fd8f5e43a039) Signed-off-by: Thomas Hellström --- drivers/gpu/drm/xe/xe_mmio.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/xe/xe_mmio.c b/drivers/gpu/drm/xe/xe_mmio.c index f660cfb79f50..c8c5d74b6e90 100644 --- a/drivers/gpu/drm/xe/xe_mmio.c +++ b/drivers/gpu/drm/xe/xe_mmio.c @@ -303,7 +303,7 @@ void xe_mmio_probe_tiles(struct xe_device *xe) u8 id, tile_count = xe->info.tile_count; struct xe_gt *gt = xe_root_mmio_gt(xe); struct xe_tile *tile; - void *regs; + void __iomem *regs; u32 mtcfg; if (tile_count == 1) -- cgit v1.2.3 From 77232e6a28447c2942558d05f1c3115bdf95a9e7 Mon Sep 17 00:00:00 2001 From: Thomas Hellström Date: Tue, 9 Jan 2024 12:24:03 +0100 Subject: drm/xe: Annotate xe_mem_region::mapping with __iomem MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The pointer points to IO memory, but the __iomem annotation was incorrectly placed. Annotate it correctly, update its usage accordingly and fix the corresponding sparse error. Fixes: 0887a2e7ab62 ("drm/xe: Make xe_mem_region struct") Cc: Oak Zeng Cc: Michael J. Ruhl Cc: Matthew Brost Cc: Rodrigo Vivi Signed-off-by: Thomas Hellström Reviewed-by: Lucas De Marchi Link: https://patchwork.freedesktop.org/patch/msgid/20240109112405.108136-3-thomas.hellstrom@linux.intel.com (cherry picked from commit 20855b62a30538361e587cfc7c5245f07d4f826a) Signed-off-by: Thomas Hellström --- drivers/gpu/drm/xe/xe_bo.c | 4 ++-- drivers/gpu/drm/xe/xe_device_types.h | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/xe/xe_bo.c b/drivers/gpu/drm/xe/xe_bo.c index 8e4a3b1f6b93..3cd29bd015a0 100644 --- a/drivers/gpu/drm/xe/xe_bo.c +++ b/drivers/gpu/drm/xe/xe_bo.c @@ -442,7 +442,7 @@ static int xe_ttm_io_mem_reserve(struct ttm_device *bdev, if (vram->mapping && mem->placement & TTM_PL_FLAG_CONTIGUOUS) - mem->bus.addr = (u8 *)vram->mapping + + mem->bus.addr = (u8 __force *)vram->mapping + mem->bus.offset; mem->bus.offset += vram->io_start; @@ -734,7 +734,7 @@ static int xe_bo_move(struct ttm_buffer_object *ttm_bo, bool evict, /* Create a new VMAP once kernel BO back in VRAM */ if (!ret && resource_is_vram(new_mem)) { struct xe_mem_region *vram = res_to_mem_region(new_mem); - void *new_addr = vram->mapping + + void __iomem *new_addr = vram->mapping + (new_mem->start << PAGE_SHIFT); if (XE_WARN_ON(new_mem->start == XE_BO_INVALID_OFFSET)) { diff --git a/drivers/gpu/drm/xe/xe_device_types.h b/drivers/gpu/drm/xe/xe_device_types.h index c45ef17b3473..4b38c6bc6c76 100644 --- a/drivers/gpu/drm/xe/xe_device_types.h +++ b/drivers/gpu/drm/xe/xe_device_types.h @@ -97,7 +97,7 @@ struct xe_mem_region { */ resource_size_t actual_physical_size; /** @mapping: pointer to VRAM mappable space */ - void *__iomem mapping; + void __iomem *mapping; }; /** -- cgit v1.2.3 From 5c63e7574739c034e072dea0e0a6fcbe8d538666 Mon Sep 17 00:00:00 2001 From: Thomas Hellström Date: Tue, 9 Jan 2024 12:24:04 +0100 Subject: drm/xe: Annotate multiple mmio pointers with __iomem MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit There are a couple of pointers pointing to MMIO space. Annotate them with __iomem and fix the corresponding sparse warnings. Fixes: dd08ebf6c352 ("drm/xe: Introduce a new DRM driver for Intel GPUs") Fixes: 3b0d4a557996 ("drm/xe: Move register MMIO into xe_tile") Fixes: 399a13323f0d ("drm/xe: add 28-bit address support in struct xe_reg") Cc: Rodrigo Vivi Cc: Matthew Brost Cc: Lucas De Marchi Cc: Matt Roper Cc: Koby Elbaz Cc: Ofir Bitton Cc: Moti Haimovski Signed-off-by: Thomas Hellström Reviewed-by: Lucas De Marchi Reviewed-by: Matt Roper Link: https://patchwork.freedesktop.org/patch/msgid/20240109112405.108136-4-thomas.hellstrom@linux.intel.com (cherry picked from commit 9d612ee52c6096bc70d43f54921ba2831ffbf1ad) Signed-off-by: Thomas Hellström --- drivers/gpu/drm/xe/xe_device_types.h | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/xe/xe_device_types.h b/drivers/gpu/drm/xe/xe_device_types.h index 4b38c6bc6c76..5dc9127a2029 100644 --- a/drivers/gpu/drm/xe/xe_device_types.h +++ b/drivers/gpu/drm/xe/xe_device_types.h @@ -146,7 +146,7 @@ struct xe_tile { size_t size; /** @regs: pointer to tile's MMIO space (starting with registers) */ - void *regs; + void __iomem *regs; } mmio; /** @@ -159,7 +159,7 @@ struct xe_tile { size_t size; /** @regs: pointer to tile's additional MMIO-extension space */ - void *regs; + void __iomem *regs; } mmio_ext; /** @mem: memory management info for tile */ @@ -301,7 +301,7 @@ struct xe_device { /** @size: size of MMIO space for device */ size_t size; /** @regs: pointer to MMIO space for device */ - void *regs; + void __iomem *regs; } mmio; /** @mem: memory info for device */ -- cgit v1.2.3 From 98949068eb559a31f162ab37f56a89bf6c3698ad Mon Sep 17 00:00:00 2001 From: Thomas Hellström Date: Tue, 9 Jan 2024 12:24:05 +0100 Subject: drm/xe: Annotate xe_ttm_stolen_mgr::mapping with __iomem MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The pointer points to IO memory, but the __iomem annotation was incorrectly placed. Annotate it correctly, update its usage accordingly and fix the corresponding sparse error. Fixes: d8b52a02cb40 ("drm/xe: Implement stolen memory.") Cc: Maarten Lankhorst Cc: Matthew Brost Cc: Rodrigo Vivi Signed-off-by: Thomas Hellström Reviewed-by: Lucas De Marchi Link: https://patchwork.freedesktop.org/patch/msgid/20240109112405.108136-5-thomas.hellstrom@linux.intel.com (cherry picked from commit dcddb6f0b06d454c9a3b2b240a43f0e7310c7f7c) Signed-off-by: Thomas Hellström --- drivers/gpu/drm/xe/xe_ttm_stolen_mgr.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/xe/xe_ttm_stolen_mgr.c b/drivers/gpu/drm/xe/xe_ttm_stolen_mgr.c index d2b00d0bf1e2..e5d7d5e2bec1 100644 --- a/drivers/gpu/drm/xe/xe_ttm_stolen_mgr.c +++ b/drivers/gpu/drm/xe/xe_ttm_stolen_mgr.c @@ -31,7 +31,7 @@ struct xe_ttm_stolen_mgr { /* GPU base offset */ resource_size_t stolen_base; - void *__iomem mapping; + void __iomem *mapping; }; static inline struct xe_ttm_stolen_mgr * @@ -275,7 +275,7 @@ static int __xe_ttm_stolen_io_mem_reserve_bar2(struct xe_device *xe, drm_WARN_ON(&xe->drm, !(mem->placement & TTM_PL_FLAG_CONTIGUOUS)); if (mem->placement & TTM_PL_FLAG_CONTIGUOUS && mgr->mapping) - mem->bus.addr = (u8 *)mgr->mapping + mem->bus.offset; + mem->bus.addr = (u8 __force *)mgr->mapping + mem->bus.offset; mem->bus.offset += mgr->io_base; mem->bus.is_iomem = true; -- cgit v1.2.3 From fef257eb6dcb9f39baee9ac44f064cd796ecfd0b Mon Sep 17 00:00:00 2001 From: Brian Welty Date: Fri, 5 Jan 2024 11:04:39 -0800 Subject: drm/xe: Fix guc_exec_queue_set_priority MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We need to set q->priority prior to calling guc_exec_queue_add_msg() as that will call init_policies() and sets the scheduling properties to those stored in the exec_queue. Fixes: dd08ebf6c352 ("drm/xe: Introduce a new DRM driver for Intel GPUs") Signed-off-by: Brian Welty Signed-off-by: Matthew Brost Reviewed-by: Matthew Brost (cherry picked from commit b16483f9f8120b530327879fa3ea576e897946da) Signed-off-by: Thomas Hellström --- drivers/gpu/drm/xe/xe_guc_submit.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/xe/xe_guc_submit.c b/drivers/gpu/drm/xe/xe_guc_submit.c index 21ac68e3246f..5de3ac47c462 100644 --- a/drivers/gpu/drm/xe/xe_guc_submit.c +++ b/drivers/gpu/drm/xe/xe_guc_submit.c @@ -1308,8 +1308,8 @@ static int guc_exec_queue_set_priority(struct xe_exec_queue *q, if (!msg) return -ENOMEM; - guc_exec_queue_add_msg(q, msg, SET_SCHED_PROPS); q->priority = priority; + guc_exec_queue_add_msg(q, msg, SET_SCHED_PROPS); return 0; } -- cgit v1.2.3 From 19c02225242498eea9267d444ee1276016368d49 Mon Sep 17 00:00:00 2001 From: Brian Welty Date: Fri, 5 Jan 2024 11:04:40 -0800 Subject: drm/xe: Fix modifying exec_queue priority in xe_migrate_init MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit After exec_queue has been created, we cannot simply modify q->priority. This needs to be done by the backend via q->ops. However in this case, it would be more efficient to simply pass a flag when creating the exec_queue and set the desired priority upfront during queue creation. To that end: new flag EXEC_QUEUE_FLAG_HIGH_PRIORITY is introduced. The priority field is moved to be with other scheduling properties and is now exec_queue.sched_props.priority. This is no longer set to initial value by the backend, but is now set within __xe_exec_queue_create(). Fixes: b4eecedc75c1 ("drm/xe: Fix potential deadlock handling page faults") Signed-off-by: Brian Welty Signed-off-by: Matthew Brost Reviewed-by: Matthew Brost (cherry picked from commit a8004af338f6b3319476ecbed63ea49bf393fc1f) Signed-off-by: Thomas Hellström --- drivers/gpu/drm/xe/xe_exec_queue.c | 5 +++++ drivers/gpu/drm/xe/xe_exec_queue_types.h | 6 ++++-- drivers/gpu/drm/xe/xe_guc_submit.c | 7 +++---- drivers/gpu/drm/xe/xe_migrate.c | 5 ++--- 4 files changed, 14 insertions(+), 9 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/xe/xe_exec_queue.c b/drivers/gpu/drm/xe/xe_exec_queue.c index 44fe8097b7cd..bcfc4127c7c5 100644 --- a/drivers/gpu/drm/xe/xe_exec_queue.c +++ b/drivers/gpu/drm/xe/xe_exec_queue.c @@ -67,6 +67,11 @@ static struct xe_exec_queue *__xe_exec_queue_create(struct xe_device *xe, q->sched_props.timeslice_us = hwe->eclass->sched_props.timeslice_us; q->sched_props.preempt_timeout_us = hwe->eclass->sched_props.preempt_timeout_us; + if (q->flags & EXEC_QUEUE_FLAG_KERNEL && + q->flags & EXEC_QUEUE_FLAG_HIGH_PRIORITY) + q->sched_props.priority = XE_EXEC_QUEUE_PRIORITY_KERNEL; + else + q->sched_props.priority = XE_EXEC_QUEUE_PRIORITY_NORMAL; if (xe_exec_queue_is_parallel(q)) { q->parallel.composite_fence_ctx = dma_fence_context_alloc(1); diff --git a/drivers/gpu/drm/xe/xe_exec_queue_types.h b/drivers/gpu/drm/xe/xe_exec_queue_types.h index 3d7e704ec3d9..8d4b7feb8c30 100644 --- a/drivers/gpu/drm/xe/xe_exec_queue_types.h +++ b/drivers/gpu/drm/xe/xe_exec_queue_types.h @@ -52,8 +52,6 @@ struct xe_exec_queue { struct xe_vm *vm; /** @class: class of this exec queue */ enum xe_engine_class class; - /** @priority: priority of this exec queue */ - enum xe_exec_queue_priority priority; /** * @logical_mask: logical mask of where job submitted to exec queue can run */ @@ -84,6 +82,8 @@ struct xe_exec_queue { #define EXEC_QUEUE_FLAG_VM BIT(4) /* child of VM queue for multi-tile VM jobs */ #define EXEC_QUEUE_FLAG_BIND_ENGINE_CHILD BIT(5) +/* kernel exec_queue only, set priority to highest level */ +#define EXEC_QUEUE_FLAG_HIGH_PRIORITY BIT(6) /** * @flags: flags for this exec queue, should statically setup aside from ban @@ -142,6 +142,8 @@ struct xe_exec_queue { u32 timeslice_us; /** @preempt_timeout_us: preemption timeout in micro-seconds */ u32 preempt_timeout_us; + /** @priority: priority of this exec queue */ + enum xe_exec_queue_priority priority; } sched_props; /** @compute: compute exec queue state */ diff --git a/drivers/gpu/drm/xe/xe_guc_submit.c b/drivers/gpu/drm/xe/xe_guc_submit.c index 5de3ac47c462..54ffcfcdd41f 100644 --- a/drivers/gpu/drm/xe/xe_guc_submit.c +++ b/drivers/gpu/drm/xe/xe_guc_submit.c @@ -421,7 +421,7 @@ static void init_policies(struct xe_guc *guc, struct xe_exec_queue *q) { struct exec_queue_policy policy; struct xe_device *xe = guc_to_xe(guc); - enum xe_exec_queue_priority prio = q->priority; + enum xe_exec_queue_priority prio = q->sched_props.priority; u32 timeslice_us = q->sched_props.timeslice_us; u32 preempt_timeout_us = q->sched_props.preempt_timeout_us; @@ -1231,7 +1231,6 @@ static int guc_exec_queue_init(struct xe_exec_queue *q) err = xe_sched_entity_init(&ge->entity, sched); if (err) goto err_sched; - q->priority = XE_EXEC_QUEUE_PRIORITY_NORMAL; if (xe_exec_queue_is_lr(q)) INIT_WORK(&q->guc->lr_tdr, xe_guc_exec_queue_lr_cleanup); @@ -1301,14 +1300,14 @@ static int guc_exec_queue_set_priority(struct xe_exec_queue *q, { struct xe_sched_msg *msg; - if (q->priority == priority || exec_queue_killed_or_banned(q)) + if (q->sched_props.priority == priority || exec_queue_killed_or_banned(q)) return 0; msg = kmalloc(sizeof(*msg), GFP_KERNEL); if (!msg) return -ENOMEM; - q->priority = priority; + q->sched_props.priority = priority; guc_exec_queue_add_msg(q, msg, SET_SCHED_PROPS); return 0; diff --git a/drivers/gpu/drm/xe/xe_migrate.c b/drivers/gpu/drm/xe/xe_migrate.c index adf1dab5eba2..02fca8f9adc2 100644 --- a/drivers/gpu/drm/xe/xe_migrate.c +++ b/drivers/gpu/drm/xe/xe_migrate.c @@ -344,7 +344,8 @@ struct xe_migrate *xe_migrate_init(struct xe_tile *tile) m->q = xe_exec_queue_create(xe, vm, logical_mask, 1, hwe, EXEC_QUEUE_FLAG_KERNEL | - EXEC_QUEUE_FLAG_PERMANENT); + EXEC_QUEUE_FLAG_PERMANENT | + EXEC_QUEUE_FLAG_HIGH_PRIORITY); } else { m->q = xe_exec_queue_create_class(xe, primary_gt, vm, XE_ENGINE_CLASS_COPY, @@ -355,8 +356,6 @@ struct xe_migrate *xe_migrate_init(struct xe_tile *tile) xe_vm_close_and_put(vm); return ERR_CAST(m->q); } - if (xe->info.has_usm) - m->q->priority = XE_EXEC_QUEUE_PRIORITY_KERNEL; mutex_init(&m->job_mutex); -- cgit v1.2.3 From 23ca3d2fe367794d2816530fa6b141339fddc1c6 Mon Sep 17 00:00:00 2001 From: Vinay Belgaumkar Date: Mon, 8 Jan 2024 14:58:42 -0800 Subject: drm/xe: Check skip_guc_pc before setting SLPC flag MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Don't set SLPC GuC feature ctl flag if skip_guc_pc is true. v2: Skip the freq related sysfs creation as well (Badal) v3: Remove unnecessary parenthesis (Lucas) Fixes: 975e4a3795d4 ("drm/xe: Manually setup C6 when skip_guc_pc is set") Fixes: bef52b5c7a19 ("drm/xe: Create a xe_gt_freq component for raw management and sysfs") Reviewed-by: Lucas De Marchi Signed-off-by: Vinay Belgaumkar Link: https://lore.kernel.org/r/20240108225842.966066-1-vinay.belgaumkar@intel.com Signed-off-by: Rodrigo Vivi (cherry picked from commit 69cac0a8f3ef8db4d62441c4a2686ec676c9facd) Signed-off-by: Thomas Hellström --- drivers/gpu/drm/xe/xe_gt_freq.c | 3 +++ drivers/gpu/drm/xe/xe_guc.c | 7 ++++++- 2 files changed, 9 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/xe/xe_gt_freq.c b/drivers/gpu/drm/xe/xe_gt_freq.c index 3adfa6686e7c..e5b0f4ecdbe8 100644 --- a/drivers/gpu/drm/xe/xe_gt_freq.c +++ b/drivers/gpu/drm/xe/xe_gt_freq.c @@ -196,6 +196,9 @@ void xe_gt_freq_init(struct xe_gt *gt) struct xe_device *xe = gt_to_xe(gt); int err; + if (xe->info.skip_guc_pc) + return; + gt->freq = kobject_create_and_add("freq0", gt->sysfs); if (!gt->freq) { drm_warn(&xe->drm, "failed to add freq0 directory to %s\n", diff --git a/drivers/gpu/drm/xe/xe_guc.c b/drivers/gpu/drm/xe/xe_guc.c index 482cb0df9f15..0a61390c64a7 100644 --- a/drivers/gpu/drm/xe/xe_guc.c +++ b/drivers/gpu/drm/xe/xe_guc.c @@ -60,7 +60,12 @@ static u32 guc_ctl_debug_flags(struct xe_guc *guc) static u32 guc_ctl_feature_flags(struct xe_guc *guc) { - return GUC_CTL_ENABLE_SLPC; + u32 flags = 0; + + if (!guc_to_xe(guc)->info.skip_guc_pc) + flags |= GUC_CTL_ENABLE_SLPC; + + return flags; } static u32 guc_ctl_log_params_flags(struct xe_guc *guc) -- cgit v1.2.3 From 190db3b1da8f40131d6153de7469abce16766302 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Wed, 10 Jan 2024 06:48:29 -0800 Subject: drm/xe: Fix build bug for GCC 11 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Building drivers/gpu/drm/xe/xe_gt_pagefault.c with GCC 11 results in the following build errors: ./include/linux/fortify-string.h:57:33: error: writing 16 bytes into a region of size 0 [-Werror=stringop-overflow=] 57 | #define __underlying_memcpy __builtin_memcpy | ^ ./include/linux/fortify-string.h:644:9: note: in expansion of macro ‘__underlying_memcpy’ 644 | __underlying_##op(p, q, __fortify_size); \ | ^~~~~~~~~~~~~ ./include/linux/fortify-string.h:689:26: note: in expansion of macro ‘__fortify_memcpy_chk’ 689 | #define memcpy(p, q, s) __fortify_memcpy_chk(p, q, s, \ | ^~~~~~~~~~~~~~~~~~~~ drivers/gpu/drm/xe/xe_gt_pagefault.c:340:17: note: in expansion of macro ‘memcpy’ 340 | memcpy(pf_queue->data + pf_queue->tail, msg, len * sizeof(u32)); | ^~~~~~ In file included from drivers/gpu/drm/xe/xe_device_types.h:17, from drivers/gpu/drm/xe/xe_vm_types.h:16, from drivers/gpu/drm/xe/xe_bo.h:13, from drivers/gpu/drm/xe/xe_gt_pagefault.c:16: drivers/gpu/drm/xe/xe_gt_types.h:102:25: note: at offset [1144, 265324] into destination object ‘tile’ of size 8 102 | struct xe_tile *tile; | ^~~~ Fix these by removing -Wstringop-overflow from drm/xe builds. Closes: https://lore.kernel.org/all/45ad1d0f-a10f-483e-848a-76a30252edbe@paulmck-laptop/ Fixes: 7a8bc11782d3 ("drm/xe: Enable W=1 warnings by default") Suggested-by: Stephen Rothwell Signed-off-by: Paul E. McKenney [ This particular warning is broken on GCC11. In future changes it will be moved to the normal C flags in the top level Makefile (out of Makefile.extrawarn), but accounting for the compiler support. Just remove it out of xe's forced extra warnings for now ] Signed-off-by: Lucas De Marchi (cherry picked from commit a109d19992294736abd4f4232ea639e03eb1f9e7) Signed-off-by: Thomas Hellström --- drivers/gpu/drm/xe/Makefile | 1 - 1 file changed, 1 deletion(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/xe/Makefile b/drivers/gpu/drm/xe/Makefile index 53bd2a8ba1ae..efcf0ab7a1a6 100644 --- a/drivers/gpu/drm/xe/Makefile +++ b/drivers/gpu/drm/xe/Makefile @@ -17,7 +17,6 @@ subdir-ccflags-y += $(call cc-option, -Wunused-const-variable) subdir-ccflags-y += $(call cc-option, -Wpacked-not-aligned) subdir-ccflags-y += $(call cc-option, -Wformat-overflow) subdir-ccflags-y += $(call cc-option, -Wformat-truncation) -subdir-ccflags-y += $(call cc-option, -Wstringop-overflow) subdir-ccflags-y += $(call cc-option, -Wstringop-truncation) # The following turn off the warnings enabled by -Wextra ifeq ($(findstring 2, $(KBUILD_EXTRA_WARN)),) -- cgit v1.2.3 From ffd915e41a4a2277fd8041dc77603df59acf3e01 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Fri, 5 Jan 2024 15:22:23 +0300 Subject: drm/xe/device: clean up on error in probe() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This error path should clean up before returning. Smatch detected this bug: drivers/gpu/drm/xe/xe_device.c:487 xe_device_probe() warn: missing unwind goto? Fixes: 4cb12b71923b ("drm/xe/xe2: Determine bios enablement for flat ccs on igfx") Signed-off-by: Dan Carpenter Signed-off-by: Matthew Brost Reviewed-by: Matthew Brost (cherry picked from commit c10da95afa68060e13c5f920d96671943a7e54d9) Signed-off-by: Thomas Hellström --- drivers/gpu/drm/xe/xe_device.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/xe/xe_device.c b/drivers/gpu/drm/xe/xe_device.c index d9ae77fe7382..b8d8da546670 100644 --- a/drivers/gpu/drm/xe/xe_device.c +++ b/drivers/gpu/drm/xe/xe_device.c @@ -484,7 +484,7 @@ int xe_device_probe(struct xe_device *xe) err = xe_device_set_has_flat_ccs(xe); if (err) - return err; + goto err_irq_shutdown; err = xe_mmio_probe_vram(xe); if (err) -- cgit v1.2.3 From 616576df35193bbadac31dc42a32d5943e183f45 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Fri, 5 Jan 2024 15:20:35 +0300 Subject: drm/xe/selftests: Fix an error pointer dereference bug MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Check if "bo" is an error pointer before calling xe_bo_lock() on it. Fixes: d6abc18d6693 ("drm/xe/xe2: Modify xe_bo_test for system memory") Signed-off-by: Dan Carpenter Signed-off-by: Matthew Brost Reviewed-by: Matthew Brost (cherry picked from commit 88ec23528b32ddb9ce2e8492f2629b0056353697) Signed-off-by: Thomas Hellström --- drivers/gpu/drm/xe/tests/xe_bo.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/xe/tests/xe_bo.c b/drivers/gpu/drm/xe/tests/xe_bo.c index 412b2e7ce40c..3436fd9cf2b2 100644 --- a/drivers/gpu/drm/xe/tests/xe_bo.c +++ b/drivers/gpu/drm/xe/tests/xe_bo.c @@ -125,14 +125,13 @@ static void ccs_test_run_tile(struct xe_device *xe, struct xe_tile *tile, bo = xe_bo_create_user(xe, NULL, NULL, SZ_1M, DRM_XE_GEM_CPU_CACHING_WC, ttm_bo_type_device, bo_flags); - - xe_bo_lock(bo, false); - if (IS_ERR(bo)) { KUNIT_FAIL(test, "Failed to create bo.\n"); return; } + xe_bo_lock(bo, false); + kunit_info(test, "Verifying that CCS data is cleared on creation.\n"); ret = ccs_test_migrate(tile, bo, false, 0ULL, 0xdeadbeefdeadbeefULL, test); -- cgit v1.2.3 From ec32f4f1bed87f0b87b9b0091231c8685db1138c Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Fri, 5 Jan 2024 15:20:22 +0300 Subject: drm/xe: unlock on error path in xe_vm_add_compute_exec_queue() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Drop the "&vm->lock" before returning. Fixes: 24f947d58fe5 ("drm/xe: Use DRM GPUVM helpers for external- and evicted objects") Signed-off-by: Dan Carpenter Signed-off-by: Matthew Brost Reviewed-by: Matthew Brost (cherry picked from commit cf46019e8550a810cc023af7aa020ba43103b44d) Signed-off-by: Thomas Hellström --- drivers/gpu/drm/xe/xe_vm.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c index b0e3cab6a584..10b6995fbf29 100644 --- a/drivers/gpu/drm/xe/xe_vm.c +++ b/drivers/gpu/drm/xe/xe_vm.c @@ -335,13 +335,13 @@ int xe_vm_add_compute_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q) down_write(&vm->lock); err = drm_gpuvm_exec_lock(&vm_exec); if (err) - return err; + goto out_up_write; pfence = xe_preempt_fence_create(q, q->compute.context, ++q->compute.seqno); if (!pfence) { err = -ENOMEM; - goto out_unlock; + goto out_fini; } list_add(&q->compute.link, &vm->preempt.exec_queues); @@ -364,8 +364,9 @@ int xe_vm_add_compute_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q) up_read(&vm->userptr.notifier_lock); -out_unlock: +out_fini: drm_exec_fini(exec); +out_up_write: up_write(&vm->lock); return err; -- cgit v1.2.3 From 7425c43c268f859426d02ccb3f043bdbae31cca9 Mon Sep 17 00:00:00 2001 From: Thomas Hellström Date: Wed, 10 Jan 2024 17:34:15 +0100 Subject: drm/xe/migrate: Fix CCS copy for small VRAM copy chunks MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Since the migrate code is using the identity map for addressing VRAM, copy chunks may become as small as 64K if the VRAM resource is fragmented. However, a chunk size smaller that 1MiB may lead to the *next* chunk's offset into the CCS metadata backup memory may not be page-aligned, and the XY_CTRL_SURF_COPY_BLT command can't handle that, and even if it could, the current code doesn't handle the offset calculaton correctly. To fix this, make sure we align the size of VRAM copy chunks to 1MiB. If the remaining data to copy is smaller than that, that's not a problem, so use the remaining size. If the VRAM copy cunk becomes fragmented due to the size alignment restriction, don't use the identity map, but instead emit PTEs into the page-table like we do for system memory. v2: - Rebase v3: - Future proof somewhat by taking into account the real data size to flat CCS metadata size ratio. (Matt Roper) - Invert a couple of if-statements for better readability. - Fix support for 4K-granularity VRAM sizes. (Tested on DG1). v4: - Fix up code comments - Fix debug printout format typo. v5: - Add a Fixes: tag. Cc: Matt Roper Cc: Matthew Auld Cc: Matthew Brost Fixes: e89b384cde62 ("drm/xe/migrate: Update emit_pte to cope with a size level than 4k") Signed-off-by: Thomas Hellström Reviewed-by: Matthew Auld Link: https://patchwork.freedesktop.org/patch/msgid/20240110163415.524165-1-thomas.hellstrom@linux.intel.com (cherry picked from commit ef51d7542d143f3fd9a48d4e2c307563661668aa) Signed-off-by: Thomas Hellström --- drivers/gpu/drm/xe/tests/xe_migrate.c | 2 +- drivers/gpu/drm/xe/xe_migrate.c | 128 +++++++++++++++++++++------------- 2 files changed, 80 insertions(+), 50 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/xe/tests/xe_migrate.c b/drivers/gpu/drm/xe/tests/xe_migrate.c index 7a32faa2f688..a6523df0f1d3 100644 --- a/drivers/gpu/drm/xe/tests/xe_migrate.c +++ b/drivers/gpu/drm/xe/tests/xe_migrate.c @@ -331,7 +331,7 @@ static void xe_migrate_sanity_test(struct xe_migrate *m, struct kunit *test) xe_res_first_sg(xe_bo_sg(pt), 0, pt->size, &src_it); emit_pte(m, bb, NUM_KERNEL_PDE - 1, xe_bo_is_vram(pt), false, - &src_it, XE_PAGE_SIZE, pt); + &src_it, XE_PAGE_SIZE, pt->ttm.resource); run_sanity_job(m, xe, bb, bb->len, "Writing PTE for our fake PT", test); diff --git a/drivers/gpu/drm/xe/xe_migrate.c b/drivers/gpu/drm/xe/xe_migrate.c index 02fca8f9adc2..e05e9e7282b6 100644 --- a/drivers/gpu/drm/xe/xe_migrate.c +++ b/drivers/gpu/drm/xe/xe_migrate.c @@ -62,6 +62,8 @@ struct xe_migrate { * out of the pt_bo. */ struct drm_suballoc_manager vm_update_sa; + /** @min_chunk_size: For dgfx, Minimum chunk size */ + u64 min_chunk_size; }; #define MAX_PREEMPTDISABLE_TRANSFER SZ_8M /* Around 1ms. */ @@ -363,6 +365,19 @@ struct xe_migrate *xe_migrate_init(struct xe_tile *tile) if (err) return ERR_PTR(err); + if (IS_DGFX(xe)) { + if (xe_device_has_flat_ccs(xe)) + /* min chunk size corresponds to 4K of CCS Metadata */ + m->min_chunk_size = SZ_4K * SZ_64K / + xe_device_ccs_bytes(xe, SZ_64K); + else + /* Somewhat arbitrary to avoid a huge amount of blits */ + m->min_chunk_size = SZ_64K; + m->min_chunk_size = roundup_pow_of_two(m->min_chunk_size); + drm_dbg(&xe->drm, "Migrate min chunk size is 0x%08llx\n", + (unsigned long long)m->min_chunk_size); + } + return m; } @@ -374,16 +389,35 @@ static u64 max_mem_transfer_per_pass(struct xe_device *xe) return MAX_PREEMPTDISABLE_TRANSFER; } -static u64 xe_migrate_res_sizes(struct xe_device *xe, struct xe_res_cursor *cur) +static u64 xe_migrate_res_sizes(struct xe_migrate *m, struct xe_res_cursor *cur) { - /* - * For VRAM we use identity mapped pages so we are limited to current - * cursor size. For system we program the pages ourselves so we have no - * such limitation. - */ - return min_t(u64, max_mem_transfer_per_pass(xe), - mem_type_is_vram(cur->mem_type) ? cur->size : - cur->remaining); + struct xe_device *xe = tile_to_xe(m->tile); + u64 size = min_t(u64, max_mem_transfer_per_pass(xe), cur->remaining); + + if (mem_type_is_vram(cur->mem_type)) { + /* + * VRAM we want to blit in chunks with sizes aligned to + * min_chunk_size in order for the offset to CCS metadata to be + * page-aligned. If it's the last chunk it may be smaller. + * + * Another constraint is that we need to limit the blit to + * the VRAM block size, unless size is smaller than + * min_chunk_size. + */ + u64 chunk = max_t(u64, cur->size, m->min_chunk_size); + + size = min_t(u64, size, chunk); + if (size > m->min_chunk_size) + size = round_down(size, m->min_chunk_size); + } + + return size; +} + +static bool xe_migrate_allow_identity(u64 size, const struct xe_res_cursor *cur) +{ + /* If the chunk is not fragmented, allow identity map. */ + return cur->size >= size; } static u32 pte_update_size(struct xe_migrate *m, @@ -396,7 +430,12 @@ static u32 pte_update_size(struct xe_migrate *m, u32 cmds = 0; *L0_pt = pt_ofs; - if (!is_vram) { + if (is_vram && xe_migrate_allow_identity(*L0, cur)) { + /* Offset into identity map. */ + *L0_ofs = xe_migrate_vram_ofs(tile_to_xe(m->tile), + cur->start + vram_region_gpu_offset(res)); + cmds += cmd_size; + } else { /* Clip L0 to available size */ u64 size = min(*L0, (u64)avail_pts * SZ_2M); u64 num_4k_pages = DIV_ROUND_UP(size, XE_PAGE_SIZE); @@ -412,11 +451,6 @@ static u32 pte_update_size(struct xe_migrate *m, /* Each chunk has a single blit command */ cmds += cmd_size; - } else { - /* Offset into identity map. */ - *L0_ofs = xe_migrate_vram_ofs(tile_to_xe(m->tile), - cur->start + vram_region_gpu_offset(res)); - cmds += cmd_size; } return cmds; @@ -426,10 +460,10 @@ static void emit_pte(struct xe_migrate *m, struct xe_bb *bb, u32 at_pt, bool is_vram, bool is_comp_pte, struct xe_res_cursor *cur, - u32 size, struct xe_bo *bo) + u32 size, struct ttm_resource *res) { struct xe_device *xe = tile_to_xe(m->tile); - + struct xe_vm *vm = m->q->vm; u16 pat_index; u32 ptes; u64 ofs = at_pt * XE_PAGE_SIZE; @@ -442,13 +476,6 @@ static void emit_pte(struct xe_migrate *m, else pat_index = xe->pat.idx[XE_CACHE_WB]; - /* - * FIXME: Emitting VRAM PTEs to L0 PTs is forbidden. Currently - * we're only emitting VRAM PTEs during sanity tests, so when - * that's moved to a Kunit test, we should condition VRAM PTEs - * on running tests. - */ - ptes = DIV_ROUND_UP(size, XE_PAGE_SIZE); while (ptes) { @@ -468,20 +495,22 @@ static void emit_pte(struct xe_migrate *m, addr = xe_res_dma(cur) & PAGE_MASK; if (is_vram) { - /* Is this a 64K PTE entry? */ - if ((m->q->vm->flags & XE_VM_FLAG_64K) && - !(cur_ofs & (16 * 8 - 1))) { - xe_tile_assert(m->tile, IS_ALIGNED(addr, SZ_64K)); + if (vm->flags & XE_VM_FLAG_64K) { + u64 va = cur_ofs * XE_PAGE_SIZE / 8; + + xe_assert(xe, (va & (SZ_64K - 1)) == + (addr & (SZ_64K - 1))); + flags |= XE_PTE_PS64; } - addr += vram_region_gpu_offset(bo->ttm.resource); + addr += vram_region_gpu_offset(res); devmem = true; } - addr = m->q->vm->pt_ops->pte_encode_addr(m->tile->xe, - addr, pat_index, - 0, devmem, flags); + addr = vm->pt_ops->pte_encode_addr(m->tile->xe, + addr, pat_index, + 0, devmem, flags); bb->cs[bb->len++] = lower_32_bits(addr); bb->cs[bb->len++] = upper_32_bits(addr); @@ -693,8 +722,8 @@ struct dma_fence *xe_migrate_copy(struct xe_migrate *m, bool usm = xe->info.has_usm; u32 avail_pts = max_mem_transfer_per_pass(xe) / LEVEL0_PAGE_TABLE_ENCODE_SIZE; - src_L0 = xe_migrate_res_sizes(xe, &src_it); - dst_L0 = xe_migrate_res_sizes(xe, &dst_it); + src_L0 = xe_migrate_res_sizes(m, &src_it); + dst_L0 = xe_migrate_res_sizes(m, &dst_it); drm_dbg(&xe->drm, "Pass %u, sizes: %llu & %llu\n", pass++, src_L0, dst_L0); @@ -715,6 +744,7 @@ struct dma_fence *xe_migrate_copy(struct xe_migrate *m, &ccs_ofs, &ccs_pt, 0, 2 * avail_pts, avail_pts); + xe_assert(xe, IS_ALIGNED(ccs_it.start, PAGE_SIZE)); } /* Add copy commands size here */ @@ -727,20 +757,20 @@ struct dma_fence *xe_migrate_copy(struct xe_migrate *m, goto err_sync; } - if (!src_is_vram) - emit_pte(m, bb, src_L0_pt, src_is_vram, true, &src_it, src_L0, - src_bo); - else + if (src_is_vram && xe_migrate_allow_identity(src_L0, &src_it)) xe_res_next(&src_it, src_L0); - - if (!dst_is_vram) - emit_pte(m, bb, dst_L0_pt, dst_is_vram, true, &dst_it, src_L0, - dst_bo); else + emit_pte(m, bb, src_L0_pt, src_is_vram, true, &src_it, src_L0, + src); + + if (dst_is_vram && xe_migrate_allow_identity(src_L0, &dst_it)) xe_res_next(&dst_it, src_L0); + else + emit_pte(m, bb, dst_L0_pt, dst_is_vram, true, &dst_it, src_L0, + dst); if (copy_system_ccs) - emit_pte(m, bb, ccs_pt, false, false, &ccs_it, ccs_size, src_bo); + emit_pte(m, bb, ccs_pt, false, false, &ccs_it, ccs_size, src); bb->cs[bb->len++] = MI_BATCH_BUFFER_END; update_idx = bb->len; @@ -949,7 +979,7 @@ struct dma_fence *xe_migrate_clear(struct xe_migrate *m, bool usm = xe->info.has_usm; u32 avail_pts = max_mem_transfer_per_pass(xe) / LEVEL0_PAGE_TABLE_ENCODE_SIZE; - clear_L0 = xe_migrate_res_sizes(xe, &src_it); + clear_L0 = xe_migrate_res_sizes(m, &src_it); drm_dbg(&xe->drm, "Pass %u, size: %llu\n", pass++, clear_L0); @@ -976,12 +1006,12 @@ struct dma_fence *xe_migrate_clear(struct xe_migrate *m, size -= clear_L0; /* Preemption is enabled again by the ring ops. */ - if (!clear_vram) { - emit_pte(m, bb, clear_L0_pt, clear_vram, true, &src_it, clear_L0, - bo); - } else { + if (clear_vram && xe_migrate_allow_identity(clear_L0, &src_it)) xe_res_next(&src_it, clear_L0); - } + else + emit_pte(m, bb, clear_L0_pt, clear_vram, true, &src_it, clear_L0, + dst); + bb->cs[bb->len++] = MI_BATCH_BUFFER_END; update_idx = bb->len; -- cgit v1.2.3 From 8049e3954aeaaeb488cd4e371526721c7fca297e Mon Sep 17 00:00:00 2001 From: Brian Welty Date: Wed, 10 Jan 2024 16:21:11 -0800 Subject: drm/xe: Fix bounds checking in __xe_bo_placement_for_flags() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Requesting all memory regions on PVC will fill bo->placements up to XE_BO_MAX_PLACEMENTS. The subsequent call to try_add_stolen() will trip over the bounds checking even though XE_PL_STOLEN is not expected to be used in this case. This is hit with igt@xe_exec_fault_mode@once-basic-prefetch: xe 0000:8c:00.0: [drm] Assertion `*c < (sizeof(bo->placements) / sizeof((bo->placements)[0]) + ((int)(sizeof(struct { int:(-!!(__builtin_types_compatible_p(typeof((bo->placements)), typeof(&(bo->placements)[0])))); }))))` failed! WARNING: CPU: 30 PID: 6161 at drivers/gpu/drm/xe/xe_bo.c:203 __xe_bo_placement_for_flags+0x218/0x240 [xe] Is fixed here by moving the bounds checks closer to where we actually write into the bo->placement array. Fixes: 8c54ee8a8606 ("drm/xe: Ensure that we don't access the placements array out-of-bounds") Link: https://patchwork.freedesktop.org/patch/msgid/20240111002111.10190-1-brian.welty@intel.com Signed-off-by: Matthew Brost Signed-off-by: Brian Welty Reviewed-by: Matthew Brost (cherry picked from commit 52e3fa3e3ea3ee05e32c1a8d72bb3ae306a4da64) Signed-off-by: Thomas Hellström --- drivers/gpu/drm/xe/xe_bo.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/xe/xe_bo.c b/drivers/gpu/drm/xe/xe_bo.c index 3cd29bd015a0..0b0e262e2166 100644 --- a/drivers/gpu/drm/xe/xe_bo.c +++ b/drivers/gpu/drm/xe/xe_bo.c @@ -125,9 +125,9 @@ static struct xe_mem_region *res_to_mem_region(struct ttm_resource *res) static void try_add_system(struct xe_device *xe, struct xe_bo *bo, u32 bo_flags, u32 *c) { - xe_assert(xe, *c < ARRAY_SIZE(bo->placements)); - if (bo_flags & XE_BO_CREATE_SYSTEM_BIT) { + xe_assert(xe, *c < ARRAY_SIZE(bo->placements)); + bo->placements[*c] = (struct ttm_place) { .mem_type = XE_PL_TT, }; @@ -145,6 +145,8 @@ static void add_vram(struct xe_device *xe, struct xe_bo *bo, struct xe_mem_region *vram; u64 io_size; + xe_assert(xe, *c < ARRAY_SIZE(bo->placements)); + vram = to_xe_ttm_vram_mgr(ttm_manager_type(&xe->ttm, mem_type))->vram; xe_assert(xe, vram && vram->usable_size); io_size = vram->io_size; @@ -175,8 +177,6 @@ static void add_vram(struct xe_device *xe, struct xe_bo *bo, static void try_add_vram(struct xe_device *xe, struct xe_bo *bo, u32 bo_flags, u32 *c) { - xe_assert(xe, *c < ARRAY_SIZE(bo->placements)); - if (bo->props.preferred_gt == XE_GT1) { if (bo_flags & XE_BO_CREATE_VRAM1_BIT) add_vram(xe, bo, bo->placements, bo_flags, XE_PL_VRAM1, c); @@ -193,9 +193,9 @@ static void try_add_vram(struct xe_device *xe, struct xe_bo *bo, static void try_add_stolen(struct xe_device *xe, struct xe_bo *bo, u32 bo_flags, u32 *c) { - xe_assert(xe, *c < ARRAY_SIZE(bo->placements)); - if (bo_flags & XE_BO_CREATE_STOLEN_BIT) { + xe_assert(xe, *c < ARRAY_SIZE(bo->placements)); + bo->placements[*c] = (struct ttm_place) { .mem_type = XE_PL_STOLEN, .flags = bo_flags & (XE_BO_CREATE_PINNED_BIT | -- cgit v1.2.3 From bf3ff145df184698a8a80b33265064638572366f Mon Sep 17 00:00:00 2001 From: Jani Nikula Date: Thu, 11 Jan 2024 12:47:16 +0200 Subject: drm/xe: display support should not depend on EXPERT MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Remove the DRM_XE_DISPLAY config dependency on EXPERT. I can only presume the idea was only experts should be able to disable it, but the effect is the opposite. Reported-by: Eero Tamminen Reviewed-by: Francois Dugast Signed-off-by: Jani Nikula Link: https://patchwork.freedesktop.org/patch/msgid/20240111104716.3548744-1-jani.nikula@intel.com (cherry picked from commit 1c7531f50eaa425eca8ff726287b8df3a4a51e55) Signed-off-by: Thomas Hellström --- drivers/gpu/drm/xe/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/xe/Kconfig b/drivers/gpu/drm/xe/Kconfig index 1cced50d8d8c..e36ae1f0d885 100644 --- a/drivers/gpu/drm/xe/Kconfig +++ b/drivers/gpu/drm/xe/Kconfig @@ -47,7 +47,7 @@ config DRM_XE config DRM_XE_DISPLAY bool "Enable display support" - depends on DRM_XE && EXPERT && DRM_XE=m + depends on DRM_XE && DRM_XE=m select FB_IOMEM_HELPERS select I2C select I2C_ALGOBIT -- cgit v1.2.3