summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/i915/i915_gem_shrinker.c
diff options
context:
space:
mode:
authorChris Wilson <chris@chris-wilson.co.uk>2016-10-28 15:58:37 +0300
committerChris Wilson <chris@chris-wilson.co.uk>2016-10-28 22:53:47 +0300
commit1233e2db199dea015391db03d3478b3392201c41 (patch)
tree264eee9509dcecd6cdbeeb44200e5ce1af54b0c6 /drivers/gpu/drm/i915/i915_gem_shrinker.c
parent03ac84f1830ec0b90f622500591eb3cc554ee479 (diff)
downloadlinux-1233e2db199dea015391db03d3478b3392201c41.tar.xz
drm/i915: Move object backing storage manipulation to its own locking
Break the allocation of the backing storage away from struct_mutex into a per-object lock. This allows parallel page allocation, provided we can do so outside of struct_mutex (i.e. set-domain-ioctl, pwrite, GTT fault), i.e. before execbuf! The increased cost of the atomic counters are hidden behind i915_vma_pin() for the typical case of execbuf, i.e. as the object is typically bound between execbufs, the page_pin_count is static. The cost will be felt around set-domain and pwrite, but offset by the improvement from reduced struct_mutex contention. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Link: http://patchwork.freedesktop.org/patch/msgid/20161028125858.23563-14-chris@chris-wilson.co.uk
Diffstat (limited to 'drivers/gpu/drm/i915/i915_gem_shrinker.c')
-rw-r--r--drivers/gpu/drm/i915/i915_gem_shrinker.c51
1 files changed, 34 insertions, 17 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem_shrinker.c b/drivers/gpu/drm/i915/i915_gem_shrinker.c
index f95061faeae6..c8a4c40ec2c2 100644
--- a/drivers/gpu/drm/i915/i915_gem_shrinker.c
+++ b/drivers/gpu/drm/i915/i915_gem_shrinker.c
@@ -48,6 +48,20 @@ static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task)
#endif
}
+static bool i915_gem_shrinker_lock(struct drm_device *dev, bool *unlock)
+{
+ if (!mutex_trylock(&dev->struct_mutex)) {
+ if (!mutex_is_locked_by(&dev->struct_mutex, current))
+ return false;
+
+ *unlock = false;
+ } else {
+ *unlock = true;
+ }
+
+ return true;
+}
+
static bool any_vma_pinned(struct drm_i915_gem_object *obj)
{
struct i915_vma *vma;
@@ -66,6 +80,9 @@ static bool swap_available(void)
static bool can_release_pages(struct drm_i915_gem_object *obj)
{
+ if (!obj->mm.pages)
+ return false;
+
/* Only shmemfs objects are backed by swap */
if (!obj->base.filp)
return false;
@@ -78,7 +95,7 @@ static bool can_release_pages(struct drm_i915_gem_object *obj)
* to the GPU, simply unbinding from the GPU is not going to succeed
* in releasing our pin count on the pages themselves.
*/
- if (obj->mm.pages_pin_count > obj->bind_count)
+ if (atomic_read(&obj->mm.pages_pin_count) > obj->bind_count)
return false;
if (any_vma_pinned(obj))
@@ -95,7 +112,7 @@ static bool unsafe_drop_pages(struct drm_i915_gem_object *obj)
{
if (i915_gem_object_unbind(obj) == 0)
__i915_gem_object_put_pages(obj);
- return !obj->mm.pages;
+ return !READ_ONCE(obj->mm.pages);
}
/**
@@ -135,6 +152,10 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
{ NULL, 0 },
}, *phase;
unsigned long count = 0;
+ bool unlock;
+
+ if (!i915_gem_shrinker_lock(&dev_priv->drm, &unlock))
+ return 0;
trace_i915_gem_shrink(dev_priv, target, flags);
i915_gem_retire_requests(dev_priv);
@@ -199,8 +220,14 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
i915_gem_object_get(obj);
- if (unsafe_drop_pages(obj))
- count += obj->base.size >> PAGE_SHIFT;
+ if (unsafe_drop_pages(obj)) {
+ mutex_lock(&obj->mm.lock);
+ if (!obj->mm.pages) {
+ __i915_gem_object_invalidate(obj);
+ count += obj->base.size >> PAGE_SHIFT;
+ }
+ mutex_unlock(&obj->mm.lock);
+ }
i915_gem_object_put(obj);
}
@@ -211,6 +238,9 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
intel_runtime_pm_put(dev_priv);
i915_gem_retire_requests(dev_priv);
+ if (unlock)
+ mutex_unlock(&dev_priv->drm.struct_mutex);
+
/* expedite the RCU grace period to free some request slabs */
synchronize_rcu_expedited();
@@ -244,19 +274,6 @@ unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv)
return freed;
}
-static bool i915_gem_shrinker_lock(struct drm_device *dev, bool *unlock)
-{
- if (!mutex_trylock(&dev->struct_mutex)) {
- if (!mutex_is_locked_by(&dev->struct_mutex, current))
- return false;
-
- *unlock = false;
- } else
- *unlock = true;
-
- return true;
-}
-
static unsigned long
i915_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
{