summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/amd
diff options
context:
space:
mode:
authorChristian König <christian.koenig@amd.com>2015-08-20 18:01:01 +0300
committerAlex Deucher <alexander.deucher@amd.com>2015-08-25 17:44:23 +0300
commite688b728228b951f41175e3a7c0738708d045969 (patch)
treef390bdc496fb0445c9e8e75c3614fc2f339c95e8 /drivers/gpu/drm/amd
parentf49565982194d601f4f3a99892d0f3c765aec104 (diff)
downloadlinux-e688b728228b951f41175e3a7c0738708d045969.tar.xz
drm/amdgpu: reorder scheduler functions
Keep run queue, entity and scheduler handling together. Signed-off-by: Christian König <christian.koenig@amd.com> Reviewed-by: Chunming Zhou <david1.zhou@amd.com>
Diffstat (limited to 'drivers/gpu/drm/amd')
-rw-r--r--drivers/gpu/drm/amd/scheduler/gpu_scheduler.c56
1 files changed, 28 insertions, 28 deletions
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
index 435d70499d3e..5fb189ca7fbe 100644
--- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
+++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
@@ -94,34 +94,6 @@ amd_sched_rq_select_entity(struct amd_sched_rq *rq)
}
/**
- * Return ture if we can push more jobs to the hw.
- */
-static bool amd_sched_ready(struct amd_gpu_scheduler *sched)
-{
- return atomic_read(&sched->hw_rq_count) <
- sched->hw_submission_limit;
-}
-
-/**
- * Select next entity containing real IB submissions
-*/
-static struct amd_sched_entity *
-amd_sched_select_context(struct amd_gpu_scheduler *sched)
-{
- struct amd_sched_entity *tmp;
-
- if (!amd_sched_ready(sched))
- return NULL;
-
- /* Kernel run queue has higher priority than normal run queue*/
- tmp = amd_sched_rq_select_entity(&sched->kernel_rq);
- if (tmp == NULL)
- tmp = amd_sched_rq_select_entity(&sched->sched_rq);
-
- return tmp;
-}
-
-/**
* Init a context entity used by scheduler when submit to HW ring.
*
* @sched The pointer to the scheduler
@@ -263,6 +235,34 @@ int amd_sched_push_job(struct amd_sched_job *sched_job)
return 0;
}
+/**
+ * Return ture if we can push more jobs to the hw.
+ */
+static bool amd_sched_ready(struct amd_gpu_scheduler *sched)
+{
+ return atomic_read(&sched->hw_rq_count) <
+ sched->hw_submission_limit;
+}
+
+/**
+ * Select next entity containing real IB submissions
+*/
+static struct amd_sched_entity *
+amd_sched_select_context(struct amd_gpu_scheduler *sched)
+{
+ struct amd_sched_entity *tmp;
+
+ if (!amd_sched_ready(sched))
+ return NULL;
+
+ /* Kernel run queue has higher priority than normal run queue*/
+ tmp = amd_sched_rq_select_entity(&sched->kernel_rq);
+ if (tmp == NULL)
+ tmp = amd_sched_rq_select_entity(&sched->sched_rq);
+
+ return tmp;
+}
+
static void amd_sched_process_job(struct fence *f, struct fence_cb *cb)
{
struct amd_sched_job *sched_job =