summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/i915/i915_active.c
diff options
context:
space:
mode:
authorDaniel Vetter <daniel.vetter@ffwll.ch>2021-07-27 15:10:28 +0300
committerDaniel Vetter <daniel.vetter@ffwll.ch>2021-07-28 17:45:57 +0300
commit512ba03e35ccb2897d19d0207ef6bd55a9564fd1 (patch)
tree12f6329f268ff3b6bc67a356e38cbf15f9dc4163 /drivers/gpu/drm/i915/i915_active.c
parent6d5de3275609c6022d6677808968b7adcdee5e66 (diff)
downloadlinux-512ba03e35ccb2897d19d0207ef6bd55a9564fd1.tar.xz
drm/i915: move i915_active slab to direct module init/exit
With the global kmem_cache shrink infrastructure gone there's nothing special and we can convert them over. I'm doing this split up into each patch because there's quite a bit of noise with removing the static global.slab_cache to just a slab_cache. v2: Make slab static (Jason, 0day) Reviewed-by: Jason Ekstrand <jason@jlekstrand.net> Cc: Jason Ekstrand <jason@jlekstrand.net> Signed-off-by: Daniel Vetter <daniel.vetter@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20210727121037.2041102-2-daniel.vetter@ffwll.ch
Diffstat (limited to 'drivers/gpu/drm/i915/i915_active.c')
-rw-r--r--drivers/gpu/drm/i915/i915_active.c31
1 files changed, 11 insertions, 20 deletions
diff --git a/drivers/gpu/drm/i915/i915_active.c b/drivers/gpu/drm/i915/i915_active.c
index 91723123ae9f..3103c1e1fd14 100644
--- a/drivers/gpu/drm/i915/i915_active.c
+++ b/drivers/gpu/drm/i915/i915_active.c
@@ -13,7 +13,6 @@
#include "i915_drv.h"
#include "i915_active.h"
-#include "i915_globals.h"
/*
* Active refs memory management
@@ -22,10 +21,7 @@
* they idle (when we know the active requests are inactive) and allocate the
* nodes from a local slab cache to hopefully reduce the fragmentation.
*/
-static struct i915_global_active {
- struct i915_global base;
- struct kmem_cache *slab_cache;
-} global;
+static struct kmem_cache *slab_cache;
struct active_node {
struct rb_node node;
@@ -174,7 +170,7 @@ __active_retire(struct i915_active *ref)
/* Finally free the discarded timeline tree */
rbtree_postorder_for_each_entry_safe(it, n, &root, node) {
GEM_BUG_ON(i915_active_fence_isset(&it->base));
- kmem_cache_free(global.slab_cache, it);
+ kmem_cache_free(slab_cache, it);
}
}
@@ -322,7 +318,7 @@ active_instance(struct i915_active *ref, u64 idx)
* XXX: We should preallocate this before i915_active_ref() is ever
* called, but we cannot call into fs_reclaim() anyway, so use GFP_ATOMIC.
*/
- node = kmem_cache_alloc(global.slab_cache, GFP_ATOMIC);
+ node = kmem_cache_alloc(slab_cache, GFP_ATOMIC);
if (!node)
goto out;
@@ -788,7 +784,7 @@ void i915_active_fini(struct i915_active *ref)
mutex_destroy(&ref->mutex);
if (ref->cache)
- kmem_cache_free(global.slab_cache, ref->cache);
+ kmem_cache_free(slab_cache, ref->cache);
}
static inline bool is_idle_barrier(struct active_node *node, u64 idx)
@@ -908,7 +904,7 @@ int i915_active_acquire_preallocate_barrier(struct i915_active *ref,
node = reuse_idle_barrier(ref, idx);
rcu_read_unlock();
if (!node) {
- node = kmem_cache_alloc(global.slab_cache, GFP_KERNEL);
+ node = kmem_cache_alloc(slab_cache, GFP_KERNEL);
if (!node)
goto unwind;
@@ -956,7 +952,7 @@ unwind:
atomic_dec(&ref->count);
intel_engine_pm_put(barrier_to_engine(node));
- kmem_cache_free(global.slab_cache, node);
+ kmem_cache_free(slab_cache, node);
}
return -ENOMEM;
}
@@ -1176,21 +1172,16 @@ struct i915_active *i915_active_create(void)
#include "selftests/i915_active.c"
#endif
-static void i915_global_active_exit(void)
+void i915_active_module_exit(void)
{
- kmem_cache_destroy(global.slab_cache);
+ kmem_cache_destroy(slab_cache);
}
-static struct i915_global_active global = { {
- .exit = i915_global_active_exit,
-} };
-
-int __init i915_global_active_init(void)
+int __init i915_active_module_init(void)
{
- global.slab_cache = KMEM_CACHE(active_node, SLAB_HWCACHE_ALIGN);
- if (!global.slab_cache)
+ slab_cache = KMEM_CACHE(active_node, SLAB_HWCACHE_ALIGN);
+ if (!slab_cache)
return -ENOMEM;
- i915_global_register(&global.base);
return 0;
}