summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/i915/i915_globals.c
diff options
context:
space:
mode:
authorChris Wilson <chris@chris-wilson.co.uk>2019-02-28 13:20:34 +0300
committerChris Wilson <chris@chris-wilson.co.uk>2019-02-28 14:08:02 +0300
commit13f1bfd3b3329b19950f95964580a84795ce7be9 (patch)
treed83dc5c9149e02d0cdcb13526493af68ec20c07f /drivers/gpu/drm/i915/i915_globals.c
parent32eb6bcfdda9dad240cf6a22fda2b3418b1a1b8e (diff)
downloadlinux-13f1bfd3b3329b19950f95964580a84795ce7be9.tar.xz
drm/i915: Make object/vma allocation caches global
As our allocations are not device specific, we can move our slab caches to a global scope. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20190228102035.5857-2-chris@chris-wilson.co.uk
Diffstat (limited to 'drivers/gpu/drm/i915/i915_globals.c')
-rw-r--r--drivers/gpu/drm/i915/i915_globals.c29
1 files changed, 28 insertions, 1 deletions
diff --git a/drivers/gpu/drm/i915/i915_globals.c b/drivers/gpu/drm/i915/i915_globals.c
index 7fd1b3945a04..cfd0bc462f58 100644
--- a/drivers/gpu/drm/i915/i915_globals.c
+++ b/drivers/gpu/drm/i915/i915_globals.c
@@ -8,9 +8,12 @@
#include <linux/workqueue.h>
#include "i915_active.h"
+#include "i915_gem_context.h"
+#include "i915_gem_object.h"
#include "i915_globals.h"
#include "i915_request.h"
#include "i915_scheduler.h"
+#include "i915_vma.h"
int __init i915_globals_init(void)
{
@@ -20,18 +23,36 @@ int __init i915_globals_init(void)
if (err)
return err;
- err = i915_global_request_init();
+ err = i915_global_context_init();
if (err)
goto err_active;
+ err = i915_global_objects_init();
+ if (err)
+ goto err_context;
+
+ err = i915_global_request_init();
+ if (err)
+ goto err_objects;
+
err = i915_global_scheduler_init();
if (err)
goto err_request;
+ err = i915_global_vma_init();
+ if (err)
+ goto err_scheduler;
+
return 0;
+err_scheduler:
+ i915_global_scheduler_exit();
err_request:
i915_global_request_exit();
+err_objects:
+ i915_global_objects_exit();
+err_context:
+ i915_global_context_exit();
err_active:
i915_global_active_exit();
return err;
@@ -45,8 +66,11 @@ static void i915_globals_shrink(void)
* with the aim of reducing fragmentation.
*/
i915_global_active_shrink();
+ i915_global_context_shrink();
+ i915_global_objects_shrink();
i915_global_request_shrink();
i915_global_scheduler_shrink();
+ i915_global_vma_shrink();
}
static atomic_t active;
@@ -104,8 +128,11 @@ void __exit i915_globals_exit(void)
rcu_barrier();
flush_scheduled_work();
+ i915_global_vma_exit();
i915_global_scheduler_exit();
i915_global_request_exit();
+ i915_global_objects_exit();
+ i915_global_context_exit();
i915_global_active_exit();
/* And ensure that our DESTROY_BY_RCU slabs are truly destroyed */