summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/i915/i915_sw_fence.c
diff options
context:
space:
mode:
authorChris Wilson <chris@chris-wilson.co.uk>2020-02-11 17:48:31 +0300
committerChris Wilson <chris@chris-wilson.co.uk>2020-02-12 00:58:39 +0300
commit42fb60de3129ef998bb18266ee06530669021e55 (patch)
tree97df241bc3c870323e38572ac2e507d41230cd3e /drivers/gpu/drm/i915/i915_sw_fence.c
parent0b02f97f40d8e33306f4d724e90b1aff97c3bd46 (diff)
downloadlinux-42fb60de3129ef998bb18266ee06530669021e55.tar.xz
drm/i915/gem: Don't leak non-persistent requests on changing engines
If we have a set of active engines marked as being non-persistent, we lose track of those if the user replaces those engines with I915_CONTEXT_PARAM_ENGINES. As part of our uABI contract is that non-persistent requests are terminated if they are no longer being tracked by the user's context (in order to prevent a lost request causing an untracked and so unstoppable GPU hang), we need to apply the same context cancellation upon changing engines. v2: Track stale engines[] so we only reap at context closure. v3: Tvrtko spotted races with closing contexts and set-engines, so add a veneer of kill-everything paranoia to clean up after losing a race. Fixes: a0e047156cde ("drm/i915/gem: Make context persistence optional") Testcase: igt/gem_ctx_peristence/replace Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20200211144831.1011498-1-chris@chris-wilson.co.uk
Diffstat (limited to 'drivers/gpu/drm/i915/i915_sw_fence.c')
-rw-r--r--drivers/gpu/drm/i915/i915_sw_fence.c17
1 files changed, 14 insertions, 3 deletions
diff --git a/drivers/gpu/drm/i915/i915_sw_fence.c b/drivers/gpu/drm/i915/i915_sw_fence.c
index 51ba97daf2a0..a3d38e089b6e 100644
--- a/drivers/gpu/drm/i915/i915_sw_fence.c
+++ b/drivers/gpu/drm/i915/i915_sw_fence.c
@@ -211,10 +211,21 @@ void i915_sw_fence_complete(struct i915_sw_fence *fence)
__i915_sw_fence_complete(fence, NULL);
}
-void i915_sw_fence_await(struct i915_sw_fence *fence)
+bool i915_sw_fence_await(struct i915_sw_fence *fence)
{
- debug_fence_assert(fence);
- WARN_ON(atomic_inc_return(&fence->pending) <= 1);
+ int pending;
+
+ /*
+ * It is only safe to add a new await to the fence while it has
+ * not yet been signaled (i.e. there are still existing signalers).
+ */
+ pending = atomic_read(&fence->pending);
+ do {
+ if (pending < 1)
+ return false;
+ } while (!atomic_try_cmpxchg(&fence->pending, &pending, pending + 1));
+
+ return true;
}
void __i915_sw_fence_init(struct i915_sw_fence *fence,