summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/i915/i915_active.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/i915/i915_active.c')
-rw-r--r--drivers/gpu/drm/i915/i915_active.c96
1 files changed, 96 insertions, 0 deletions
diff --git a/drivers/gpu/drm/i915/i915_active.c b/drivers/gpu/drm/i915/i915_active.c
index 863ae12707ba..293e5bcc4b6c 100644
--- a/drivers/gpu/drm/i915/i915_active.c
+++ b/drivers/gpu/drm/i915/i915_active.c
@@ -4,6 +4,8 @@
* Copyright © 2019 Intel Corporation
*/
+#include "gt/intel_engine_pm.h"
+
#include "i915_drv.h"
#include "i915_active.h"
#include "i915_globals.h"
@@ -157,6 +159,7 @@ void i915_active_init(struct drm_i915_private *i915,
ref->retire = retire;
ref->tree = RB_ROOT;
i915_active_request_init(&ref->last, NULL, last_retire);
+ init_llist_head(&ref->barriers);
ref->count = 0;
}
@@ -263,6 +266,99 @@ void i915_active_fini(struct i915_active *ref)
}
#endif
+int i915_active_acquire_preallocate_barrier(struct i915_active *ref,
+ struct intel_engine_cs *engine)
+{
+ struct drm_i915_private *i915 = engine->i915;
+ struct llist_node *pos, *next;
+ unsigned long tmp;
+ int err;
+
+ GEM_BUG_ON(!engine->mask);
+ for_each_engine_masked(engine, i915, engine->mask, tmp) {
+ struct intel_context *kctx = engine->kernel_context;
+ struct active_node *node;
+
+ node = kmem_cache_alloc(global.slab_cache, GFP_KERNEL);
+ if (unlikely(!node)) {
+ err = -ENOMEM;
+ goto unwind;
+ }
+
+ i915_active_request_init(&node->base,
+ (void *)engine, node_retire);
+ node->timeline = kctx->ring->timeline->fence_context;
+ node->ref = ref;
+ ref->count++;
+
+ intel_engine_pm_get(engine);
+ llist_add((struct llist_node *)&node->base.link,
+ &ref->barriers);
+ }
+
+ return 0;
+
+unwind:
+ llist_for_each_safe(pos, next, llist_del_all(&ref->barriers)) {
+ struct active_node *node;
+
+ node = container_of((struct list_head *)pos,
+ typeof(*node), base.link);
+ engine = (void *)rcu_access_pointer(node->base.request);
+
+ intel_engine_pm_put(engine);
+ kmem_cache_free(global.slab_cache, node);
+ }
+ return err;
+}
+
+void i915_active_acquire_barrier(struct i915_active *ref)
+{
+ struct llist_node *pos, *next;
+
+ i915_active_acquire(ref);
+
+ llist_for_each_safe(pos, next, llist_del_all(&ref->barriers)) {
+ struct intel_engine_cs *engine;
+ struct active_node *node;
+ struct rb_node **p, *parent;
+
+ node = container_of((struct list_head *)pos,
+ typeof(*node), base.link);
+
+ engine = (void *)rcu_access_pointer(node->base.request);
+ RCU_INIT_POINTER(node->base.request, ERR_PTR(-EAGAIN));
+
+ parent = NULL;
+ p = &ref->tree.rb_node;
+ while (*p) {
+ parent = *p;
+ if (rb_entry(parent,
+ struct active_node,
+ node)->timeline < node->timeline)
+ p = &parent->rb_right;
+ else
+ p = &parent->rb_left;
+ }
+ rb_link_node(&node->node, parent, p);
+ rb_insert_color(&node->node, &ref->tree);
+
+ llist_add((struct llist_node *)&node->base.link,
+ &engine->barrier_tasks);
+ intel_engine_pm_put(engine);
+ }
+ i915_active_release(ref);
+}
+
+void i915_request_add_barriers(struct i915_request *rq)
+{
+ struct intel_engine_cs *engine = rq->engine;
+ struct llist_node *node, *next;
+
+ llist_for_each_safe(node, next, llist_del_all(&engine->barrier_tasks))
+ list_add_tail((struct list_head *)node, &rq->active_list);
+}
+
int i915_active_request_set(struct i915_active_request *active,
struct i915_request *rq)
{