summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/i915/i915_drv.h
diff options
context:
space:
mode:
authorChris Wilson <chris@chris-wilson.co.uk>2016-12-23 17:57:56 +0300
committerChris Wilson <chris@chris-wilson.co.uk>2016-12-23 19:07:11 +0300
commitbdeb978506a7cf59ee75ed61a7b6a3506d03a1e4 (patch)
tree4d88a9af0a0222f261e95b2189d9fc71386b8066 /drivers/gpu/drm/i915/i915_drv.h
parent28f412e02b15be73669d9882f8041d8e8b54f197 (diff)
downloadlinux-bdeb978506a7cf59ee75ed61a7b6a3506d03a1e4.tar.xz
drm/i915: Repeat flush of idle work during suspend
The idle work handler is self-arming - if it detects that it needs to run again it will queue itself from its work handler. Take greater care when trying to drain the idle work, and double check that it is flushed. The free worker has a similar issue where it is armed by an RCU task which may be running concurrently with us. This should hopefully help with the sporadic WARN_ON(dev_priv->gt.awake) from i915_gem_suspend. v2: Reuse drain_freed_objects. v3: Don't try to flush the freed objects from the shrinker, as it may be underneath the struct_mutex already. v4: do while and comment upon the excess rcu_barrier in drain_freed_objects Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Link: http://patchwork.freedesktop.org/patch/msgid/20161223145804.6605-2-chris@chris-wilson.co.uk
Diffstat (limited to 'drivers/gpu/drm/i915/i915_drv.h')
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h13
1 files changed, 13 insertions, 0 deletions
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index bd3915ac0133..32e2d7431025 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -3208,6 +3208,19 @@ i915_gem_object_create_from_data(struct drm_i915_private *dev_priv,
void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file);
void i915_gem_free_object(struct drm_gem_object *obj);
+static inline void i915_gem_drain_freed_objects(struct drm_i915_private *i915)
+{
+ /* A single pass should suffice to release all the freed objects (along
+ * most call paths) , but be a little more paranoid in that freeing
+ * the objects does take a little amount of time, during which the rcu
+ * callbacks could have added new objects into the freed list, and
+ * armed the work again.
+ */
+ do {
+ rcu_barrier();
+ } while (flush_work(&i915->mm.free_work));
+}
+
struct i915_vma * __must_check
i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
const struct i915_ggtt_view *view,