summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/i915
diff options
context:
space:
mode:
authorMichal Wajdeczko <michal.wajdeczko@intel.com>2021-06-03 08:16:27 +0300
committerDaniel Vetter <daniel.vetter@ffwll.ch>2021-06-04 11:41:51 +0300
commit65dd4ed0f4e1ce2ccf8ddc66a6ee026b20f0c24c (patch)
treed5a5e915954720616791633781d5d6ea5c8c63cf /drivers/gpu/drm/i915
parent2e496ac200c13ab1de6dc504a2566c612b493a4e (diff)
downloadlinux-65dd4ed0f4e1ce2ccf8ddc66a6ee026b20f0c24c.tar.xz
drm/i915/guc: Don't receive all G2H messages in irq handler
In irq handler try to receive just single G2H message, let other messages to be received from tasklet. Signed-off-by: Michal Wajdeczko <michal.wajdeczko@intel.com> Signed-off-by: Matthew Brost <matthew.brost@intel.com> Reviewed-by: Matthew Brost <matthew.brost@intel.com> Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch> Link: https://patchwork.freedesktop.org/patch/msgid/20210603051630.2635-18-matthew.brost@intel.com
Diffstat (limited to 'drivers/gpu/drm/i915')
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c67
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_ct.h3
2 files changed, 50 insertions, 20 deletions
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c
index 1745f89baeae..6e83151ff1b9 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c
@@ -81,6 +81,7 @@ enum { CTB_SEND = 0, CTB_RECV = 1 };
enum { CTB_OWNER_HOST = 0 };
+static void ct_receive_tasklet_func(struct tasklet_struct *t);
static void ct_incoming_request_worker_func(struct work_struct *w);
/**
@@ -95,6 +96,7 @@ void intel_guc_ct_init_early(struct intel_guc_ct *ct)
INIT_LIST_HEAD(&ct->requests.pending);
INIT_LIST_HEAD(&ct->requests.incoming);
INIT_WORK(&ct->requests.worker, ct_incoming_request_worker_func);
+ tasklet_setup(&ct->receive_tasklet, ct_receive_tasklet_func);
}
static inline const char *guc_ct_buffer_type_to_str(u32 type)
@@ -244,6 +246,7 @@ void intel_guc_ct_fini(struct intel_guc_ct *ct)
{
GEM_BUG_ON(ct->enabled);
+ tasklet_kill(&ct->receive_tasklet);
i915_vma_unpin_and_release(&ct->vma, I915_VMA_RELEASE_MAP);
memset(ct, 0, sizeof(*ct));
}
@@ -651,7 +654,7 @@ static int ct_read(struct intel_guc_ct *ct, u32 *data)
CT_DEBUG(ct, "received %*ph\n", 4 * len, data);
desc->head = head * 4;
- return 0;
+ return available - len;
corrupted:
CT_ERROR(ct, "Corrupted descriptor addr=%#x head=%u tail=%u size=%u\n",
@@ -687,10 +690,10 @@ static int ct_handle_response(struct intel_guc_ct *ct, const u32 *msg)
u32 status;
u32 datalen;
struct ct_request *req;
+ unsigned long flags;
bool found = false;
GEM_BUG_ON(!ct_header_is_response(header));
- GEM_BUG_ON(!in_irq());
/* Response payload shall at least include fence and status */
if (unlikely(len < 2)) {
@@ -710,7 +713,7 @@ static int ct_handle_response(struct intel_guc_ct *ct, const u32 *msg)
CT_DEBUG(ct, "response fence %u status %#x\n", fence, status);
- spin_lock(&ct->requests.lock);
+ spin_lock_irqsave(&ct->requests.lock, flags);
list_for_each_entry(req, &ct->requests.pending, link) {
if (unlikely(fence != req->fence)) {
CT_DEBUG(ct, "request %u awaits response\n",
@@ -729,7 +732,7 @@ static int ct_handle_response(struct intel_guc_ct *ct, const u32 *msg)
found = true;
break;
}
- spin_unlock(&ct->requests.lock);
+ spin_unlock_irqrestore(&ct->requests.lock, flags);
if (!found)
CT_ERROR(ct, "Unsolicited response %*ph\n", msgsize, msg);
@@ -843,31 +846,55 @@ static int ct_handle_request(struct intel_guc_ct *ct, const u32 *msg)
return 0;
}
+static int ct_receive(struct intel_guc_ct *ct)
+{
+ u32 msg[GUC_CT_MSG_LEN_MASK + 1]; /* one extra dw for the header */
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&ct->ctbs.recv.lock, flags);
+ ret = ct_read(ct, msg);
+ spin_unlock_irqrestore(&ct->ctbs.recv.lock, flags);
+ if (ret < 0)
+ return ret;
+
+ if (ct_header_is_response(msg[0]))
+ ct_handle_response(ct, msg);
+ else
+ ct_handle_request(ct, msg);
+
+ return ret;
+}
+
+static void ct_try_receive_message(struct intel_guc_ct *ct)
+{
+ int ret;
+
+ if (GEM_WARN_ON(!ct->enabled))
+ return;
+
+ ret = ct_receive(ct);
+ if (ret > 0)
+ tasklet_hi_schedule(&ct->receive_tasklet);
+}
+
+static void ct_receive_tasklet_func(struct tasklet_struct *t)
+{
+ struct intel_guc_ct *ct = from_tasklet(ct, t, receive_tasklet);
+
+ ct_try_receive_message(ct);
+}
+
/*
* When we're communicating with the GuC over CT, GuC uses events
* to notify us about new messages being posted on the RECV buffer.
*/
void intel_guc_ct_event_handler(struct intel_guc_ct *ct)
{
- u32 msg[GUC_CT_MSG_LEN_MASK + 1]; /* one extra dw for the header */
- unsigned long flags;
- int err = 0;
-
if (unlikely(!ct->enabled)) {
WARN(1, "Unexpected GuC event received while CT disabled!\n");
return;
}
- do {
- spin_lock_irqsave(&ct->ctbs.recv.lock, flags);
- err = ct_read(ct, msg);
- spin_unlock_irqrestore(&ct->ctbs.recv.lock, flags);
- if (err)
- break;
-
- if (ct_header_is_response(msg[0]))
- err = ct_handle_response(ct, msg);
- else
- err = ct_handle_request(ct, msg);
- } while (!err);
+ ct_try_receive_message(ct);
}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.h
index bc52dc479a14..cb222f202301 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.h
@@ -6,6 +6,7 @@
#ifndef _INTEL_GUC_CT_H_
#define _INTEL_GUC_CT_H_
+#include <linux/interrupt.h>
#include <linux/spinlock.h>
#include <linux/workqueue.h>
@@ -55,6 +56,8 @@ struct intel_guc_ct {
struct intel_guc_ct_buffer recv;
} ctbs;
+ struct tasklet_struct receive_tasklet;
+
struct {
u32 last_fence; /* last fence used to send request */