summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c')
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c140
1 files changed, 130 insertions, 10 deletions
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c
index 1803a633ed64..f28a3a83742d 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c
@@ -13,6 +13,30 @@
#include "intel_guc_ct.h"
#include "intel_guc_print.h"
+#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GUC)
+enum {
+ CT_DEAD_ALIVE = 0,
+ CT_DEAD_SETUP,
+ CT_DEAD_WRITE,
+ CT_DEAD_DEADLOCK,
+ CT_DEAD_H2G_HAS_ROOM,
+ CT_DEAD_READ,
+ CT_DEAD_PROCESS_FAILED,
+};
+
+static void ct_dead_ct_worker_func(struct work_struct *w);
+
+#define CT_DEAD(ct, reason) \
+ do { \
+ if (!(ct)->dead_ct_reported) { \
+ (ct)->dead_ct_reason |= 1 << CT_DEAD_##reason; \
+ queue_work(system_unbound_wq, &(ct)->dead_ct_worker); \
+ } \
+ } while (0)
+#else
+#define CT_DEAD(ct, reason) do { } while (0)
+#endif
+
static inline struct intel_guc *ct_to_guc(struct intel_guc_ct *ct)
{
return container_of(ct, struct intel_guc, ct);
@@ -93,6 +117,9 @@ void intel_guc_ct_init_early(struct intel_guc_ct *ct)
spin_lock_init(&ct->requests.lock);
INIT_LIST_HEAD(&ct->requests.pending);
INIT_LIST_HEAD(&ct->requests.incoming);
+#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GUC)
+ INIT_WORK(&ct->dead_ct_worker, ct_dead_ct_worker_func);
+#endif
INIT_WORK(&ct->requests.worker, ct_incoming_request_worker_func);
tasklet_setup(&ct->receive_tasklet, ct_receive_tasklet_func);
init_waitqueue_head(&ct->wq);
@@ -319,11 +346,16 @@ int intel_guc_ct_enable(struct intel_guc_ct *ct)
ct->enabled = true;
ct->stall_time = KTIME_MAX;
+#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GUC)
+ ct->dead_ct_reported = false;
+ ct->dead_ct_reason = CT_DEAD_ALIVE;
+#endif
return 0;
err_out:
CT_PROBE_ERROR(ct, "Failed to enable CTB (%pe)\n", ERR_PTR(err));
+ CT_DEAD(ct, SETUP);
return err;
}
@@ -344,6 +376,24 @@ void intel_guc_ct_disable(struct intel_guc_ct *ct)
}
}
+#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
+static void ct_track_lost_and_found(struct intel_guc_ct *ct, u32 fence, u32 action)
+{
+ unsigned int lost = fence % ARRAY_SIZE(ct->requests.lost_and_found);
+#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GUC)
+ unsigned long entries[SZ_32];
+ unsigned int n;
+
+ n = stack_trace_save(entries, ARRAY_SIZE(entries), 1);
+
+ /* May be called under spinlock, so avoid sleeping */
+ ct->requests.lost_and_found[lost].stack = stack_depot_save(entries, n, GFP_NOWAIT);
+#endif
+ ct->requests.lost_and_found[lost].fence = fence;
+ ct->requests.lost_and_found[lost].action = action;
+}
+#endif
+
static u32 ct_get_next_fence(struct intel_guc_ct *ct)
{
/* For now it's trivial */
@@ -394,11 +444,11 @@ static int ct_write(struct intel_guc_ct *ct,
FIELD_PREP(GUC_CTB_MSG_0_NUM_DWORDS, len) |
FIELD_PREP(GUC_CTB_MSG_0_FENCE, fence);
- type = (flags & INTEL_GUC_CT_SEND_NB) ? GUC_HXG_TYPE_EVENT :
+ type = (flags & INTEL_GUC_CT_SEND_NB) ? GUC_HXG_TYPE_FAST_REQUEST :
GUC_HXG_TYPE_REQUEST;
hxg = FIELD_PREP(GUC_HXG_MSG_0_TYPE, type) |
- FIELD_PREP(GUC_HXG_EVENT_MSG_0_ACTION |
- GUC_HXG_EVENT_MSG_0_DATA0, action[0]);
+ FIELD_PREP(GUC_HXG_REQUEST_MSG_0_ACTION |
+ GUC_HXG_REQUEST_MSG_0_DATA0, action[0]);
CT_DEBUG(ct, "writing (tail %u) %*ph %*ph %*ph\n",
tail, 4, &header, 4, &hxg, 4 * (len - 1), &action[1]);
@@ -415,6 +465,11 @@ static int ct_write(struct intel_guc_ct *ct,
}
GEM_BUG_ON(tail > size);
+#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
+ ct_track_lost_and_found(ct, fence,
+ FIELD_GET(GUC_HXG_EVENT_MSG_0_ACTION, action[0]));
+#endif
+
/*
* make sure H2G buffer update and LRC tail update (if this triggering a
* submission) are visible before updating the descriptor tail
@@ -434,6 +489,7 @@ static int ct_write(struct intel_guc_ct *ct,
corrupted:
CT_ERROR(ct, "Corrupted descriptor head=%u tail=%u status=%#x\n",
desc->head, desc->tail, desc->status);
+ CT_DEAD(ct, WRITE);
ctb->broken = true;
return -EPIPE;
}
@@ -504,6 +560,7 @@ static inline bool ct_deadlocked(struct intel_guc_ct *ct)
CT_ERROR(ct, "Head: %u\n (Dwords)", ct->ctbs.recv.desc->head);
CT_ERROR(ct, "Tail: %u\n (Dwords)", ct->ctbs.recv.desc->tail);
+ CT_DEAD(ct, DEADLOCK);
ct->ctbs.send.broken = true;
}
@@ -552,6 +609,7 @@ static inline bool h2g_has_room(struct intel_guc_ct *ct, u32 len_dw)
head, ctb->size);
desc->status |= GUC_CTB_STATUS_OVERFLOW;
ctb->broken = true;
+ CT_DEAD(ct, H2G_HAS_ROOM);
return false;
}
@@ -640,7 +698,7 @@ static int ct_send(struct intel_guc_ct *ct,
GEM_BUG_ON(!ct->enabled);
GEM_BUG_ON(!len);
- GEM_BUG_ON(len & ~GUC_CT_MSG_LEN_MASK);
+ GEM_BUG_ON(len > GUC_CTB_HXG_MSG_MAX_LEN - GUC_CTB_HDR_LEN);
GEM_BUG_ON(!response_buf && response_buf_size);
might_sleep();
@@ -902,15 +960,59 @@ static int ct_read(struct intel_guc_ct *ct, struct ct_incoming_msg **msg)
/* now update descriptor */
WRITE_ONCE(desc->head, head);
+ /*
+ * Wa_22016122933: Making sure the head update is
+ * visible to GuC right away
+ */
+ intel_guc_write_barrier(ct_to_guc(ct));
+
return available - len;
corrupted:
CT_ERROR(ct, "Corrupted descriptor head=%u tail=%u status=%#x\n",
desc->head, desc->tail, desc->status);
ctb->broken = true;
+ CT_DEAD(ct, READ);
return -EPIPE;
}
+#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
+static bool ct_check_lost_and_found(struct intel_guc_ct *ct, u32 fence)
+{
+ unsigned int n;
+ char *buf = NULL;
+ bool found = false;
+
+ lockdep_assert_held(&ct->requests.lock);
+
+ for (n = 0; n < ARRAY_SIZE(ct->requests.lost_and_found); n++) {
+ if (ct->requests.lost_and_found[n].fence != fence)
+ continue;
+ found = true;
+
+#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GUC)
+ buf = kmalloc(SZ_4K, GFP_NOWAIT);
+ if (buf && stack_depot_snprint(ct->requests.lost_and_found[n].stack,
+ buf, SZ_4K, 0)) {
+ CT_ERROR(ct, "Fence %u was used by action %#04x sent at\n%s",
+ fence, ct->requests.lost_and_found[n].action, buf);
+ break;
+ }
+#endif
+ CT_ERROR(ct, "Fence %u was used by action %#04x\n",
+ fence, ct->requests.lost_and_found[n].action);
+ break;
+ }
+ kfree(buf);
+ return found;
+}
+#else
+static bool ct_check_lost_and_found(struct intel_guc_ct *ct, u32 fence)
+{
+ return false;
+}
+#endif
+
static int ct_handle_response(struct intel_guc_ct *ct, struct ct_incoming_msg *response)
{
u32 len = FIELD_GET(GUC_CTB_MSG_0_NUM_DWORDS, response->msg[0]);
@@ -952,12 +1054,13 @@ static int ct_handle_response(struct intel_guc_ct *ct, struct ct_incoming_msg *r
break;
}
if (!found) {
- CT_ERROR(ct, "Unsolicited response (fence %u)\n", fence);
- CT_ERROR(ct, "Could not find fence=%u, last_fence=%u\n", fence,
- ct->requests.last_fence);
- list_for_each_entry(req, &ct->requests.pending, link)
- CT_ERROR(ct, "request %u awaits response\n",
- req->fence);
+ CT_ERROR(ct, "Unsolicited response message: len %u, data %#x (fence %u, last %u)\n",
+ len, hxg[0], fence, ct->requests.last_fence);
+ if (!ct_check_lost_and_found(ct, fence)) {
+ list_for_each_entry(req, &ct->requests.pending, link)
+ CT_ERROR(ct, "request %u awaits response\n",
+ req->fence);
+ }
err = -ENOKEY;
}
spin_unlock_irqrestore(&ct->requests.lock, flags);
@@ -1057,6 +1160,7 @@ static bool ct_process_incoming_requests(struct intel_guc_ct *ct)
if (unlikely(err)) {
CT_ERROR(ct, "Failed to process CT message (%pe) %*ph\n",
ERR_PTR(err), 4 * request->size, request->msg);
+ CT_DEAD(ct, PROCESS_FAILED);
ct_free_msg(request);
}
@@ -1233,3 +1337,19 @@ void intel_guc_ct_print_info(struct intel_guc_ct *ct,
drm_printf(p, "Tail: %u\n",
ct->ctbs.recv.desc->tail);
}
+
+#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GUC)
+static void ct_dead_ct_worker_func(struct work_struct *w)
+{
+ struct intel_guc_ct *ct = container_of(w, struct intel_guc_ct, dead_ct_worker);
+ struct intel_guc *guc = ct_to_guc(ct);
+
+ if (ct->dead_ct_reported)
+ return;
+
+ ct->dead_ct_reported = true;
+
+ guc_info(guc, "CTB is dead - reason=0x%X\n", ct->dead_ct_reason);
+ intel_klog_error_capture(guc_to_gt(guc), (intel_engine_mask_t)~0U);
+}
+#endif