summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--drivers/gpu/drm/scheduler/sched_main.c18
1 files changed, 9 insertions, 9 deletions
diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c
index 98b2ad54fc70..fb64b35451f5 100644
--- a/drivers/gpu/drm/scheduler/sched_main.c
+++ b/drivers/gpu/drm/scheduler/sched_main.c
@@ -448,7 +448,7 @@ static void drm_sched_job_timedout(struct work_struct *work)
sched = container_of(work, struct drm_gpu_scheduler, work_tdr.work);
- /* Protects against concurrent deletion in drm_sched_get_cleanup_job */
+ /* Protects against concurrent deletion in drm_sched_get_finished_job */
spin_lock(&sched->job_list_lock);
job = list_first_entry_or_null(&sched->pending_list,
struct drm_sched_job, list);
@@ -500,9 +500,9 @@ void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad)
/*
* Reinsert back the bad job here - now it's safe as
- * drm_sched_get_cleanup_job cannot race against us and release the
+ * drm_sched_get_finished_job cannot race against us and release the
* bad job at this point - we parked (waited for) any in progress
- * (earlier) cleanups and drm_sched_get_cleanup_job will not be called
+ * (earlier) cleanups and drm_sched_get_finished_job will not be called
* now until the scheduler thread is unparked.
*/
if (bad && bad->sched == sched)
@@ -960,7 +960,7 @@ drm_sched_select_entity(struct drm_gpu_scheduler *sched)
}
/**
- * drm_sched_get_cleanup_job - fetch the next finished job to be destroyed
+ * drm_sched_get_finished_job - fetch the next finished job to be destroyed
*
* @sched: scheduler instance
*
@@ -968,7 +968,7 @@ drm_sched_select_entity(struct drm_gpu_scheduler *sched)
* ready for it to be destroyed.
*/
static struct drm_sched_job *
-drm_sched_get_cleanup_job(struct drm_gpu_scheduler *sched)
+drm_sched_get_finished_job(struct drm_gpu_scheduler *sched)
{
struct drm_sched_job *job, *next;
@@ -1059,14 +1059,14 @@ static void drm_sched_free_job_work(struct work_struct *w)
{
struct drm_gpu_scheduler *sched =
container_of(w, struct drm_gpu_scheduler, work_free_job);
- struct drm_sched_job *cleanup_job;
+ struct drm_sched_job *job;
if (READ_ONCE(sched->pause_submit))
return;
- cleanup_job = drm_sched_get_cleanup_job(sched);
- if (cleanup_job) {
- sched->ops->free_job(cleanup_job);
+ job = drm_sched_get_finished_job(sched);
+ if (job) {
+ sched->ops->free_job(job);
drm_sched_free_job_queue_if_done(sched);
drm_sched_run_job_queue_if_ready(sched);