diff options
Diffstat (limited to 'drivers/gpu/drm/xe')
-rw-r--r-- | drivers/gpu/drm/xe/xe_device.c | 21 | ||||
-rw-r--r-- | drivers/gpu/drm/xe/xe_migrate.c | 12 | ||||
-rw-r--r-- | drivers/gpu/drm/xe/xe_pcode.c | 115 | ||||
-rw-r--r-- | drivers/gpu/drm/xe/xe_pcode.h | 6 | ||||
-rw-r--r-- | drivers/gpu/drm/xe/xe_pm.c | 36 |
5 files changed, 115 insertions, 75 deletions
diff --git a/drivers/gpu/drm/xe/xe_device.c b/drivers/gpu/drm/xe/xe_device.c index d32ff3857e65..b3b37ed832ca 100644 --- a/drivers/gpu/drm/xe/xe_device.c +++ b/drivers/gpu/drm/xe/xe_device.c @@ -389,8 +389,14 @@ mask_err: return err; } -/* - * Initialize MMIO resources that don't require any knowledge about tile count. +/** + * xe_device_probe_early: Device early probe + * @xe: xe device instance + * + * Initialize MMIO resources that don't require any + * knowledge about tile count. Also initialize pcode + * + * Return: 0 on success, error code on failure */ int xe_device_probe_early(struct xe_device *xe) { @@ -404,6 +410,10 @@ int xe_device_probe_early(struct xe_device *xe) if (err) return err; + err = xe_pcode_probe_early(xe); + if (err) + return err; + return 0; } @@ -482,11 +492,8 @@ int xe_device_probe(struct xe_device *xe) if (err) return err; - for_each_gt(gt, xe, id) { - err = xe_pcode_probe(gt); - if (err) - return err; - } + for_each_gt(gt, xe, id) + xe_pcode_init(gt); err = xe_display_init_noirq(xe); if (err) diff --git a/drivers/gpu/drm/xe/xe_migrate.c b/drivers/gpu/drm/xe/xe_migrate.c index 2ba4fb9511f6..aca519f5b85d 100644 --- a/drivers/gpu/drm/xe/xe_migrate.c +++ b/drivers/gpu/drm/xe/xe_migrate.c @@ -33,7 +33,6 @@ #include "xe_sync.h" #include "xe_trace.h" #include "xe_vm.h" -#include "xe_wa.h" /** * struct xe_migrate - migrate context. @@ -299,10 +298,6 @@ static int xe_migrate_prepare_vm(struct xe_tile *tile, struct xe_migrate *m, } /* - * Due to workaround 16017236439, odd instance hardware copy engines are - * faster than even instance ones. - * This function returns the mask involving all fast copy engines and the - * reserved copy engine to be used as logical mask for migrate engine. * Including the reserved copy engine is required to avoid deadlocks due to * migrate jobs servicing the faults gets stuck behind the job that faulted. */ @@ -316,8 +311,7 @@ static u32 xe_migrate_usm_logical_mask(struct xe_gt *gt) if (hwe->class != XE_ENGINE_CLASS_COPY) continue; - if (!XE_WA(gt, 16017236439) || - xe_gt_is_usm_hwe(gt, hwe) || hwe->instance & 1) + if (xe_gt_is_usm_hwe(gt, hwe)) logical_mask |= BIT(hwe->logical_instance); } @@ -368,6 +362,10 @@ struct xe_migrate *xe_migrate_init(struct xe_tile *tile) if (!hwe || !logical_mask) return ERR_PTR(-EINVAL); + /* + * XXX: Currently only reserving 1 (likely slow) BCS instance on + * PVC, may want to revisit if performance is needed. + */ m->q = xe_exec_queue_create(xe, vm, logical_mask, 1, hwe, EXEC_QUEUE_FLAG_KERNEL | EXEC_QUEUE_FLAG_PERMANENT | diff --git a/drivers/gpu/drm/xe/xe_pcode.c b/drivers/gpu/drm/xe/xe_pcode.c index b324dc2a5deb..81f4ae2ea08f 100644 --- a/drivers/gpu/drm/xe/xe_pcode.c +++ b/drivers/gpu/drm/xe/xe_pcode.c @@ -10,6 +10,7 @@ #include <drm/drm_managed.h> +#include "xe_device.h" #include "xe_gt.h" #include "xe_mmio.h" #include "xe_pcode_api.h" @@ -43,8 +44,6 @@ static int pcode_mailbox_status(struct xe_gt *gt) [PCODE_ERROR_MASK] = {-EPROTO, "Unknown"}, }; - lockdep_assert_held(>->pcode.lock); - err = xe_mmio_read32(gt, PCODE_MAILBOX) & PCODE_ERROR_MASK; if (err) { drm_err(>_to_xe(gt)->drm, "PCODE Mailbox failed: %d %s", err, @@ -55,17 +54,15 @@ static int pcode_mailbox_status(struct xe_gt *gt) return 0; } -static int pcode_mailbox_rw(struct xe_gt *gt, u32 mbox, u32 *data0, u32 *data1, - unsigned int timeout_ms, bool return_data, - bool atomic) +static int __pcode_mailbox_rw(struct xe_gt *gt, u32 mbox, u32 *data0, u32 *data1, + unsigned int timeout_ms, bool return_data, + bool atomic) { int err; if (gt_to_xe(gt)->info.skip_pcode) return 0; - lockdep_assert_held(>->pcode.lock); - if ((xe_mmio_read32(gt, PCODE_MAILBOX) & PCODE_READY) != 0) return -EAGAIN; @@ -87,6 +84,18 @@ static int pcode_mailbox_rw(struct xe_gt *gt, u32 mbox, u32 *data0, u32 *data1, return pcode_mailbox_status(gt); } +static int pcode_mailbox_rw(struct xe_gt *gt, u32 mbox, u32 *data0, u32 *data1, + unsigned int timeout_ms, bool return_data, + bool atomic) +{ + if (gt_to_xe(gt)->info.skip_pcode) + return 0; + + lockdep_assert_held(>->pcode.lock); + + return __pcode_mailbox_rw(gt, mbox, data0, data1, timeout_ms, return_data, atomic); +} + int xe_pcode_write_timeout(struct xe_gt *gt, u32 mbox, u32 data, int timeout) { int err; @@ -109,15 +118,19 @@ int xe_pcode_read(struct xe_gt *gt, u32 mbox, u32 *val, u32 *val1) return err; } -static int xe_pcode_try_request(struct xe_gt *gt, u32 mbox, - u32 request, u32 reply_mask, u32 reply, - u32 *status, bool atomic, int timeout_us) +static int pcode_try_request(struct xe_gt *gt, u32 mbox, + u32 request, u32 reply_mask, u32 reply, + u32 *status, bool atomic, int timeout_us, bool locked) { int slept, wait = 10; for (slept = 0; slept < timeout_us; slept += wait) { - *status = pcode_mailbox_rw(gt, mbox, &request, NULL, 1, true, - atomic); + if (locked) + *status = pcode_mailbox_rw(gt, mbox, &request, NULL, 1, true, + atomic); + else + *status = __pcode_mailbox_rw(gt, mbox, &request, NULL, 1, true, + atomic); if ((*status == 0) && ((request & reply_mask) == reply)) return 0; @@ -158,8 +171,8 @@ int xe_pcode_request(struct xe_gt *gt, u32 mbox, u32 request, mutex_lock(>->pcode.lock); - ret = xe_pcode_try_request(gt, mbox, request, reply_mask, reply, &status, - false, timeout_base_ms * 1000); + ret = pcode_try_request(gt, mbox, request, reply_mask, reply, &status, + false, timeout_base_ms * 1000, true); if (!ret) goto out; @@ -177,8 +190,8 @@ int xe_pcode_request(struct xe_gt *gt, u32 mbox, u32 request, "PCODE timeout, retrying with preemption disabled\n"); drm_WARN_ON_ONCE(>_to_xe(gt)->drm, timeout_base_ms > 1); preempt_disable(); - ret = xe_pcode_try_request(gt, mbox, request, reply_mask, reply, &status, - true, timeout_base_ms * 1000); + ret = pcode_try_request(gt, mbox, request, reply_mask, reply, &status, + true, 50 * 1000, true); preempt_enable(); out: @@ -238,59 +251,71 @@ unlock: } /** - * xe_pcode_init - Ensure PCODE is initialized - * @gt: gt instance + * xe_pcode_ready - Ensure PCODE is initialized + * @xe: xe instance + * @locked: true if lock held, false otherwise * - * This function ensures that PCODE is properly initialized. To be called during - * probe and resume paths. + * PCODE init mailbox is polled only on root gt of root tile + * as the root tile provides the initialization is complete only + * after all the tiles have completed the initialization. + * Called only on early probe without locks and with locks in + * resume path. * - * It returns 0 on success, and -error number on failure. + * Returns 0 on success, and -error number on failure. */ -int xe_pcode_init(struct xe_gt *gt) +int xe_pcode_ready(struct xe_device *xe, bool locked) { u32 status, request = DGFX_GET_INIT_STATUS; + struct xe_gt *gt = xe_root_mmio_gt(xe); int timeout_us = 180000000; /* 3 min */ int ret; - if (gt_to_xe(gt)->info.skip_pcode) + if (xe->info.skip_pcode) return 0; - if (!IS_DGFX(gt_to_xe(gt))) + if (!IS_DGFX(xe)) return 0; - mutex_lock(>->pcode.lock); - ret = xe_pcode_try_request(gt, DGFX_PCODE_STATUS, request, - DGFX_INIT_STATUS_COMPLETE, - DGFX_INIT_STATUS_COMPLETE, - &status, false, timeout_us); - mutex_unlock(>->pcode.lock); + if (locked) + mutex_lock(>->pcode.lock); + + ret = pcode_try_request(gt, DGFX_PCODE_STATUS, request, + DGFX_INIT_STATUS_COMPLETE, + DGFX_INIT_STATUS_COMPLETE, + &status, false, timeout_us, locked); + + if (locked) + mutex_unlock(>->pcode.lock); if (ret) - drm_err(>_to_xe(gt)->drm, + drm_err(&xe->drm, "PCODE initialization timedout after: 3 min\n"); return ret; } /** - * xe_pcode_probe - Prepare xe_pcode and also ensure PCODE is initialized. + * xe_pcode_init: initialize components of PCODE * @gt: gt instance * - * This function initializes the xe_pcode component, and when needed, it ensures - * that PCODE has properly performed its initialization and it is really ready - * to go. To be called once only during probe. - * - * It returns 0 on success, and -error number on failure. + * This function initializes the xe_pcode component. + * To be called once only during probe. */ -int xe_pcode_probe(struct xe_gt *gt) +void xe_pcode_init(struct xe_gt *gt) { drmm_mutex_init(>_to_xe(gt)->drm, >->pcode.lock); +} - if (gt_to_xe(gt)->info.skip_pcode) - return 0; - - if (!IS_DGFX(gt_to_xe(gt))) - return 0; - - return xe_pcode_init(gt); +/** + * xe_pcode_probe_early: initializes PCODE + * @xe: xe instance + * + * This function checks the initialization status of PCODE + * To be called once only during early probe without locks. + * + * Returns 0 on success, error code otherwise + */ +int xe_pcode_probe_early(struct xe_device *xe) +{ + return xe_pcode_ready(xe, false); } diff --git a/drivers/gpu/drm/xe/xe_pcode.h b/drivers/gpu/drm/xe/xe_pcode.h index 08cb1d047cba..3f54c6d2a57d 100644 --- a/drivers/gpu/drm/xe/xe_pcode.h +++ b/drivers/gpu/drm/xe/xe_pcode.h @@ -8,9 +8,11 @@ #include <linux/types.h> struct xe_gt; +struct xe_device; -int xe_pcode_probe(struct xe_gt *gt); -int xe_pcode_init(struct xe_gt *gt); +void xe_pcode_init(struct xe_gt *gt); +int xe_pcode_probe_early(struct xe_device *xe); +int xe_pcode_ready(struct xe_device *xe, bool locked); int xe_pcode_init_min_freq_table(struct xe_gt *gt, u32 min_gt_freq, u32 max_gt_freq); int xe_pcode_read(struct xe_gt *gt, u32 mbox, u32 *val, u32 *val1); diff --git a/drivers/gpu/drm/xe/xe_pm.c b/drivers/gpu/drm/xe/xe_pm.c index 53b3b0b019ac..944cf4d76099 100644 --- a/drivers/gpu/drm/xe/xe_pm.c +++ b/drivers/gpu/drm/xe/xe_pm.c @@ -54,13 +54,15 @@ int xe_pm_suspend(struct xe_device *xe) u8 id; int err; + drm_dbg(&xe->drm, "Suspending device\n"); + for_each_gt(gt, xe, id) xe_gt_suspend_prepare(gt); /* FIXME: Super racey... */ err = xe_bo_evict_all(xe); if (err) - return err; + goto err; xe_display_pm_suspend(xe); @@ -68,7 +70,7 @@ int xe_pm_suspend(struct xe_device *xe) err = xe_gt_suspend(gt); if (err) { xe_display_pm_resume(xe); - return err; + goto err; } } @@ -76,7 +78,11 @@ int xe_pm_suspend(struct xe_device *xe) xe_display_pm_suspend_late(xe); + drm_dbg(&xe->drm, "Device suspended\n"); return 0; +err: + drm_dbg(&xe->drm, "Device suspend failed %d\n", err); + return err; } /** @@ -92,14 +98,14 @@ int xe_pm_resume(struct xe_device *xe) u8 id; int err; + drm_dbg(&xe->drm, "Resuming device\n"); + for_each_tile(tile, xe, id) xe_wa_apply_tile_workarounds(tile); - for_each_gt(gt, xe, id) { - err = xe_pcode_init(gt); - if (err) - return err; - } + err = xe_pcode_ready(xe, true); + if (err) + return err; xe_display_pm_resume_early(xe); @@ -109,7 +115,7 @@ int xe_pm_resume(struct xe_device *xe) */ err = xe_bo_restore_kernel(xe); if (err) - return err; + goto err; xe_irq_resume(xe); @@ -120,9 +126,13 @@ int xe_pm_resume(struct xe_device *xe) err = xe_bo_restore_user(xe); if (err) - return err; + goto err; + drm_dbg(&xe->drm, "Device resumed\n"); return 0; +err: + drm_dbg(&xe->drm, "Device resume failed %d\n", err); + return err; } static bool xe_pm_pci_d3cold_capable(struct xe_device *xe) @@ -310,11 +320,9 @@ int xe_pm_runtime_resume(struct xe_device *xe) xe->d3cold.power_lost = xe_guc_in_reset(>->uc.guc); if (xe->d3cold.allowed && xe->d3cold.power_lost) { - for_each_gt(gt, xe, id) { - err = xe_pcode_init(gt); - if (err) - goto out; - } + err = xe_pcode_ready(xe, true); + if (err) + goto out; /* * This only restores pinned memory which is the memory |