summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/i915/i915_scheduler.c
diff options
context:
space:
mode:
authorChris Wilson <chris@chris-wilson.co.uk>2019-02-28 13:20:33 +0300
committerChris Wilson <chris@chris-wilson.co.uk>2019-02-28 14:07:56 +0300
commit32eb6bcfdda9dad240cf6a22fda2b3418b1a1b8e (patch)
treeb8d37fcf09299b0f0edd9317479336e04a921156 /drivers/gpu/drm/i915/i915_scheduler.c
parentbd2be1418659abd7b1cdecc7d23d86314b0e3496 (diff)
downloadlinux-32eb6bcfdda9dad240cf6a22fda2b3418b1a1b8e.tar.xz
drm/i915: Make request allocation caches global
As kmem_caches share the same properties (size, allocation/free behaviour) for all potential devices, we can use global caches. While this potential has worse fragmentation behaviour (one can argue that different devices would have different activity lifetimes, but you can also argue that activity is temporal across the system) it is the default behaviour of the system at large to amalgamate matching caches. The benefit for us is much reduced pointer dancing along the frequent allocation paths. v2: Defer shrinking until after a global grace period for futureproofing multiple consumers of the slab caches, similar to the current strategy for avoiding shrinking too early. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20190228102035.5857-1-chris@chris-wilson.co.uk
Diffstat (limited to 'drivers/gpu/drm/i915/i915_scheduler.c')
-rw-r--r--drivers/gpu/drm/i915/i915_scheduler.c66
1 files changed, 52 insertions, 14 deletions
diff --git a/drivers/gpu/drm/i915/i915_scheduler.c b/drivers/gpu/drm/i915/i915_scheduler.c
index 38efefd22dce..0dd720593f9c 100644
--- a/drivers/gpu/drm/i915/i915_scheduler.c
+++ b/drivers/gpu/drm/i915/i915_scheduler.c
@@ -10,6 +10,11 @@
#include "i915_request.h"
#include "i915_scheduler.h"
+static struct i915_global_scheduler {
+ struct kmem_cache *slab_dependencies;
+ struct kmem_cache *slab_priorities;
+} global;
+
static DEFINE_SPINLOCK(schedule_lock);
static const struct i915_request *
@@ -37,16 +42,15 @@ void i915_sched_node_init(struct i915_sched_node *node)
}
static struct i915_dependency *
-i915_dependency_alloc(struct drm_i915_private *i915)
+i915_dependency_alloc(void)
{
- return kmem_cache_alloc(i915->dependencies, GFP_KERNEL);
+ return kmem_cache_alloc(global.slab_dependencies, GFP_KERNEL);
}
static void
-i915_dependency_free(struct drm_i915_private *i915,
- struct i915_dependency *dep)
+i915_dependency_free(struct i915_dependency *dep)
{
- kmem_cache_free(i915->dependencies, dep);
+ kmem_cache_free(global.slab_dependencies, dep);
}
bool __i915_sched_node_add_dependency(struct i915_sched_node *node,
@@ -73,25 +77,23 @@ bool __i915_sched_node_add_dependency(struct i915_sched_node *node,
return ret;
}
-int i915_sched_node_add_dependency(struct drm_i915_private *i915,
- struct i915_sched_node *node,
+int i915_sched_node_add_dependency(struct i915_sched_node *node,
struct i915_sched_node *signal)
{
struct i915_dependency *dep;
- dep = i915_dependency_alloc(i915);
+ dep = i915_dependency_alloc();
if (!dep)
return -ENOMEM;
if (!__i915_sched_node_add_dependency(node, signal, dep,
I915_DEPENDENCY_ALLOC))
- i915_dependency_free(i915, dep);
+ i915_dependency_free(dep);
return 0;
}
-void i915_sched_node_fini(struct drm_i915_private *i915,
- struct i915_sched_node *node)
+void i915_sched_node_fini(struct i915_sched_node *node)
{
struct i915_dependency *dep, *tmp;
@@ -111,7 +113,7 @@ void i915_sched_node_fini(struct drm_i915_private *i915,
list_del(&dep->wait_link);
if (dep->flags & I915_DEPENDENCY_ALLOC)
- i915_dependency_free(i915, dep);
+ i915_dependency_free(dep);
}
/* Remove ourselves from everyone who depends upon us */
@@ -121,7 +123,7 @@ void i915_sched_node_fini(struct drm_i915_private *i915,
list_del(&dep->signal_link);
if (dep->flags & I915_DEPENDENCY_ALLOC)
- i915_dependency_free(i915, dep);
+ i915_dependency_free(dep);
}
spin_unlock(&schedule_lock);
@@ -198,7 +200,7 @@ find_priolist:
if (prio == I915_PRIORITY_NORMAL) {
p = &execlists->default_priolist;
} else {
- p = kmem_cache_alloc(engine->i915->priorities, GFP_ATOMIC);
+ p = kmem_cache_alloc(global.slab_priorities, GFP_ATOMIC);
/* Convert an allocation failure to a priority bump */
if (unlikely(!p)) {
prio = I915_PRIORITY_NORMAL; /* recurses just once */
@@ -424,3 +426,39 @@ void i915_schedule_bump_priority(struct i915_request *rq, unsigned int bump)
spin_unlock_bh(&schedule_lock);
}
+
+void __i915_priolist_free(struct i915_priolist *p)
+{
+ kmem_cache_free(global.slab_priorities, p);
+}
+
+int __init i915_global_scheduler_init(void)
+{
+ global.slab_dependencies = KMEM_CACHE(i915_dependency,
+ SLAB_HWCACHE_ALIGN);
+ if (!global.slab_dependencies)
+ return -ENOMEM;
+
+ global.slab_priorities = KMEM_CACHE(i915_priolist,
+ SLAB_HWCACHE_ALIGN);
+ if (!global.slab_priorities)
+ goto err_priorities;
+
+ return 0;
+
+err_priorities:
+ kmem_cache_destroy(global.slab_priorities);
+ return -ENOMEM;
+}
+
+void i915_global_scheduler_shrink(void)
+{
+ kmem_cache_shrink(global.slab_dependencies);
+ kmem_cache_shrink(global.slab_priorities);
+}
+
+void i915_global_scheduler_exit(void)
+{
+ kmem_cache_destroy(global.slab_dependencies);
+ kmem_cache_destroy(global.slab_priorities);
+}