summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/i915/i915_scheduler.c
diff options
context:
space:
mode:
authorChris Wilson <chris@chris-wilson.co.uk>2018-10-01 17:47:55 +0300
committerChris Wilson <chris@chris-wilson.co.uk>2018-10-01 22:34:24 +0300
commite9eaf82d97a2b05460ff5ef6a3e07446f7d049fe (patch)
tree438f33d4ca7e450b661260806a97f4a2d0ad9bbd /drivers/gpu/drm/i915/i915_scheduler.c
parente2f3496e93be3238de2e2e6bfc83b3a83c084ce5 (diff)
downloadlinux-e9eaf82d97a2b05460ff5ef6a3e07446f7d049fe.tar.xz
drm/i915: Priority boost for waiting clients
Latency is in the eye of the beholder. In the case where a client stops and waits for the gpu, give that request chain a small priority boost (not so that it overtakes higher priority clients, to preserve the external ordering) so that ideally the wait completes earlier. v2: Tvrtko recommends to keep the boost-from-user-stall as small as possible and to allow new client flows to be preferred for interactivity over stalls. Testcase: igt/gem_sync/switch-default Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Cc: Dmitry Rogozhkin <dmitry.v.rogozhkin@intel.com> Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20181001144755.7978-3-chris@chris-wilson.co.uk
Diffstat (limited to 'drivers/gpu/drm/i915/i915_scheduler.c')
-rw-r--r--drivers/gpu/drm/i915/i915_scheduler.c34
1 files changed, 28 insertions, 6 deletions
diff --git a/drivers/gpu/drm/i915/i915_scheduler.c b/drivers/gpu/drm/i915/i915_scheduler.c
index de9a2ba7c3bc..340faea6c08a 100644
--- a/drivers/gpu/drm/i915/i915_scheduler.c
+++ b/drivers/gpu/drm/i915/i915_scheduler.c
@@ -239,7 +239,8 @@ sched_lock_engine(struct i915_sched_node *node, struct intel_engine_cs *locked)
return engine;
}
-void i915_schedule(struct i915_request *rq, const struct i915_sched_attr *attr)
+static void __i915_schedule(struct i915_request *rq,
+ const struct i915_sched_attr *attr)
{
struct list_head *uninitialized_var(pl);
struct intel_engine_cs *engine, *last;
@@ -248,6 +249,8 @@ void i915_schedule(struct i915_request *rq, const struct i915_sched_attr *attr)
const int prio = attr->priority;
LIST_HEAD(dfs);
+ /* Needed in order to use the temporary link inside i915_dependency */
+ lockdep_assert_held(&schedule_lock);
GEM_BUG_ON(prio == I915_PRIORITY_INVALID);
if (i915_request_completed(rq))
@@ -256,9 +259,6 @@ void i915_schedule(struct i915_request *rq, const struct i915_sched_attr *attr)
if (prio <= READ_ONCE(rq->sched.attr.priority))
return;
- /* Needed in order to use the temporary link inside i915_dependency */
- spin_lock(&schedule_lock);
-
stack.signaler = &rq->sched;
list_add(&stack.dfs_link, &dfs);
@@ -312,7 +312,7 @@ void i915_schedule(struct i915_request *rq, const struct i915_sched_attr *attr)
rq->sched.attr = *attr;
if (stack.dfs_link.next == stack.dfs_link.prev)
- goto out_unlock;
+ return;
__list_del_entry(&stack.dfs_link);
}
@@ -371,7 +371,29 @@ void i915_schedule(struct i915_request *rq, const struct i915_sched_attr *attr)
}
spin_unlock_irq(&engine->timeline.lock);
+}
-out_unlock:
+void i915_schedule(struct i915_request *rq, const struct i915_sched_attr *attr)
+{
+ spin_lock(&schedule_lock);
+ __i915_schedule(rq, attr);
spin_unlock(&schedule_lock);
}
+
+void i915_schedule_bump_priority(struct i915_request *rq, unsigned int bump)
+{
+ struct i915_sched_attr attr;
+
+ GEM_BUG_ON(bump & ~I915_PRIORITY_MASK);
+
+ if (READ_ONCE(rq->sched.attr.priority) == I915_PRIORITY_INVALID)
+ return;
+
+ spin_lock_bh(&schedule_lock);
+
+ attr = rq->sched.attr;
+ attr.priority |= bump;
+ __i915_schedule(rq, &attr);
+
+ spin_unlock_bh(&schedule_lock);
+}