summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/xe/xe_sched_job.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/xe/xe_sched_job.c')
-rw-r--r--drivers/gpu/drm/xe/xe_sched_job.c74
1 files changed, 37 insertions, 37 deletions
diff --git a/drivers/gpu/drm/xe/xe_sched_job.c b/drivers/gpu/drm/xe/xe_sched_job.c
index 9944858de4d2..de2851d24c96 100644
--- a/drivers/gpu/drm/xe/xe_sched_job.c
+++ b/drivers/gpu/drm/xe/xe_sched_job.c
@@ -57,58 +57,58 @@ static struct xe_sched_job *job_alloc(bool parallel)
xe_sched_job_slab, GFP_KERNEL);
}
-bool xe_sched_job_is_migration(struct xe_engine *e)
+bool xe_sched_job_is_migration(struct xe_exec_queue *q)
{
- return e->vm && (e->vm->flags & XE_VM_FLAG_MIGRATION) &&
- !(e->flags & ENGINE_FLAG_WA);
+ return q->vm && (q->vm->flags & XE_VM_FLAG_MIGRATION) &&
+ !(q->flags & EXEC_QUEUE_FLAG_WA);
}
static void job_free(struct xe_sched_job *job)
{
- struct xe_engine *e = job->engine;
- bool is_migration = xe_sched_job_is_migration(e);
+ struct xe_exec_queue *q = job->q;
+ bool is_migration = xe_sched_job_is_migration(q);
- kmem_cache_free(xe_engine_is_parallel(job->engine) || is_migration ?
+ kmem_cache_free(xe_exec_queue_is_parallel(job->q) || is_migration ?
xe_sched_job_parallel_slab : xe_sched_job_slab, job);
}
static struct xe_device *job_to_xe(struct xe_sched_job *job)
{
- return gt_to_xe(job->engine->gt);
+ return gt_to_xe(job->q->gt);
}
-struct xe_sched_job *xe_sched_job_create(struct xe_engine *e,
+struct xe_sched_job *xe_sched_job_create(struct xe_exec_queue *q,
u64 *batch_addr)
{
struct xe_sched_job *job;
struct dma_fence **fences;
- bool is_migration = xe_sched_job_is_migration(e);
+ bool is_migration = xe_sched_job_is_migration(q);
int err;
int i, j;
u32 width;
/* Migration and kernel engines have their own locking */
- if (!(e->flags & (ENGINE_FLAG_KERNEL | ENGINE_FLAG_VM |
- ENGINE_FLAG_WA))) {
- lockdep_assert_held(&e->vm->lock);
- if (!xe_vm_no_dma_fences(e->vm))
- xe_vm_assert_held(e->vm);
+ if (!(q->flags & (EXEC_QUEUE_FLAG_KERNEL | EXEC_QUEUE_FLAG_VM |
+ EXEC_QUEUE_FLAG_WA))) {
+ lockdep_assert_held(&q->vm->lock);
+ if (!xe_vm_no_dma_fences(q->vm))
+ xe_vm_assert_held(q->vm);
}
- job = job_alloc(xe_engine_is_parallel(e) || is_migration);
+ job = job_alloc(xe_exec_queue_is_parallel(q) || is_migration);
if (!job)
return ERR_PTR(-ENOMEM);
- job->engine = e;
+ job->q = q;
kref_init(&job->refcount);
- xe_engine_get(job->engine);
+ xe_exec_queue_get(job->q);
- err = drm_sched_job_init(&job->drm, e->entity, 1, NULL);
+ err = drm_sched_job_init(&job->drm, q->entity, 1, NULL);
if (err)
goto err_free;
- if (!xe_engine_is_parallel(e)) {
- job->fence = xe_lrc_create_seqno_fence(e->lrc);
+ if (!xe_exec_queue_is_parallel(q)) {
+ job->fence = xe_lrc_create_seqno_fence(q->lrc);
if (IS_ERR(job->fence)) {
err = PTR_ERR(job->fence);
goto err_sched_job;
@@ -116,38 +116,38 @@ struct xe_sched_job *xe_sched_job_create(struct xe_engine *e,
} else {
struct dma_fence_array *cf;
- fences = kmalloc_array(e->width, sizeof(*fences), GFP_KERNEL);
+ fences = kmalloc_array(q->width, sizeof(*fences), GFP_KERNEL);
if (!fences) {
err = -ENOMEM;
goto err_sched_job;
}
- for (j = 0; j < e->width; ++j) {
- fences[j] = xe_lrc_create_seqno_fence(e->lrc + j);
+ for (j = 0; j < q->width; ++j) {
+ fences[j] = xe_lrc_create_seqno_fence(q->lrc + j);
if (IS_ERR(fences[j])) {
err = PTR_ERR(fences[j]);
goto err_fences;
}
}
- cf = dma_fence_array_create(e->width, fences,
- e->parallel.composite_fence_ctx,
- e->parallel.composite_fence_seqno++,
+ cf = dma_fence_array_create(q->width, fences,
+ q->parallel.composite_fence_ctx,
+ q->parallel.composite_fence_seqno++,
false);
if (!cf) {
- --e->parallel.composite_fence_seqno;
+ --q->parallel.composite_fence_seqno;
err = -ENOMEM;
goto err_fences;
}
/* Sanity check */
- for (j = 0; j < e->width; ++j)
+ for (j = 0; j < q->width; ++j)
XE_WARN_ON(cf->base.seqno != fences[j]->seqno);
job->fence = &cf->base;
}
- width = e->width;
+ width = q->width;
if (is_migration)
width = 2;
@@ -155,7 +155,7 @@ struct xe_sched_job *xe_sched_job_create(struct xe_engine *e,
job->batch_addr[i] = batch_addr[i];
/* All other jobs require a VM to be open which has a ref */
- if (unlikely(e->flags & ENGINE_FLAG_KERNEL))
+ if (unlikely(q->flags & EXEC_QUEUE_FLAG_KERNEL))
xe_device_mem_access_get(job_to_xe(job));
xe_device_assert_mem_access(job_to_xe(job));
@@ -164,14 +164,14 @@ struct xe_sched_job *xe_sched_job_create(struct xe_engine *e,
err_fences:
for (j = j - 1; j >= 0; --j) {
- --e->lrc[j].fence_ctx.next_seqno;
+ --q->lrc[j].fence_ctx.next_seqno;
dma_fence_put(fences[j]);
}
kfree(fences);
err_sched_job:
drm_sched_job_cleanup(&job->drm);
err_free:
- xe_engine_put(e);
+ xe_exec_queue_put(q);
job_free(job);
return ERR_PTR(err);
}
@@ -188,9 +188,9 @@ void xe_sched_job_destroy(struct kref *ref)
struct xe_sched_job *job =
container_of(ref, struct xe_sched_job, refcount);
- if (unlikely(job->engine->flags & ENGINE_FLAG_KERNEL))
+ if (unlikely(job->q->flags & EXEC_QUEUE_FLAG_KERNEL))
xe_device_mem_access_put(job_to_xe(job));
- xe_engine_put(job->engine);
+ xe_exec_queue_put(job->q);
dma_fence_put(job->fence);
drm_sched_job_cleanup(&job->drm);
job_free(job);
@@ -222,12 +222,12 @@ void xe_sched_job_set_error(struct xe_sched_job *job, int error)
trace_xe_sched_job_set_error(job);
dma_fence_enable_sw_signaling(job->fence);
- xe_hw_fence_irq_run(job->engine->fence_irq);
+ xe_hw_fence_irq_run(job->q->fence_irq);
}
bool xe_sched_job_started(struct xe_sched_job *job)
{
- struct xe_lrc *lrc = job->engine->lrc;
+ struct xe_lrc *lrc = job->q->lrc;
return !__dma_fence_is_later(xe_sched_job_seqno(job),
xe_lrc_start_seqno(lrc),
@@ -236,7 +236,7 @@ bool xe_sched_job_started(struct xe_sched_job *job)
bool xe_sched_job_completed(struct xe_sched_job *job)
{
- struct xe_lrc *lrc = job->engine->lrc;
+ struct xe_lrc *lrc = job->q->lrc;
/*
* Can safely check just LRC[0] seqno as that is last seqno written when