summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
diff options
context:
space:
mode:
authorArunpravin Paneer Selvam <Arunpravin.PaneerSelvam@amd.com>2022-06-06 10:36:30 +0300
committerAlex Deucher <alexander.deucher@amd.com>2022-06-06 21:41:25 +0300
commitb07d1d73b09ef40e91ace51a2e167391676a8175 (patch)
tree8af1842448ceb9054135c62d2fb6b47ff4e932ed /drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
parent3d8785f6c04a953868384db455bb2fdd0b22c11c (diff)
downloadlinux-b07d1d73b09ef40e91ace51a2e167391676a8175.tar.xz
drm/amd/amdgpu: Enable high priority gfx queue
Starting from SIENNA CICHLID asic supports two gfx pipes, enabling two graphics queues, 1 on each pipe, pipe0 queue0 would be the normal piority queue and pipe1 queue0 would be the high priority queue Only one queue per pipe is visble to SPI, SPI looks at the priority value assigned to CP_GFX_HQD_QUEUE_PRIORITY from each of the queue's HQD/MQD. Create contexts applying AMDGPU_CTX_PRIORITY_HIGH which submits job to the high priority queue on GFX pipe1. There would be starvation of LP workload if HP workload is always available. v2: - remove unnecessary check(Nirmoy) - make pipe1 hardware support a separate patch(Nirmoy) - remove duplicate code(Shashank) - add CSA support for second gfx pipe(Alex) v3(Christian): - fix incorrect indentation - merge COMPUTE and GFX switch cases as both calls the same function. v4: - rebase w/ latest code base Signed-off-by: Arunpravin Paneer Selvam <Arunpravin.PaneerSelvam@amd.com> Acked-by: Christian König <christian.koenig@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c55
1 files changed, 44 insertions, 11 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
index 16699158e00d..00c69f0a9f52 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
@@ -142,7 +142,12 @@ void amdgpu_gfx_parse_disable_cu(unsigned *mask, unsigned max_se, unsigned max_s
}
}
-static bool amdgpu_gfx_is_multipipe_capable(struct amdgpu_device *adev)
+static bool amdgpu_gfx_is_graphics_multipipe_capable(struct amdgpu_device *adev)
+{
+ return amdgpu_async_gfx_ring && adev->gfx.me.num_pipe_per_me > 1;
+}
+
+static bool amdgpu_gfx_is_compute_multipipe_capable(struct amdgpu_device *adev)
{
if (amdgpu_compute_multipipe != -1) {
DRM_INFO("amdgpu: forcing compute pipe policy %d\n",
@@ -158,6 +163,28 @@ static bool amdgpu_gfx_is_multipipe_capable(struct amdgpu_device *adev)
return adev->gfx.mec.num_mec > 1;
}
+bool amdgpu_gfx_is_high_priority_graphics_queue(struct amdgpu_device *adev,
+ struct amdgpu_ring *ring)
+{
+ int queue = ring->queue;
+ int pipe = ring->pipe;
+
+ /* Policy: use pipe1 queue0 as high priority graphics queue if we
+ * have more than one gfx pipe.
+ */
+ if (amdgpu_gfx_is_graphics_multipipe_capable(adev) &&
+ adev->gfx.num_gfx_rings > 1 && pipe == 1 && queue == 0) {
+ int me = ring->me;
+ int bit;
+
+ bit = amdgpu_gfx_me_queue_to_bit(adev, me, pipe, queue);
+ if (ring == &adev->gfx.gfx_ring[bit])
+ return true;
+ }
+
+ return false;
+}
+
bool amdgpu_gfx_is_high_priority_compute_queue(struct amdgpu_device *adev,
struct amdgpu_ring *ring)
{
@@ -174,7 +201,7 @@ bool amdgpu_gfx_is_high_priority_compute_queue(struct amdgpu_device *adev,
void amdgpu_gfx_compute_queue_acquire(struct amdgpu_device *adev)
{
int i, queue, pipe;
- bool multipipe_policy = amdgpu_gfx_is_multipipe_capable(adev);
+ bool multipipe_policy = amdgpu_gfx_is_compute_multipipe_capable(adev);
int max_queues_per_mec = min(adev->gfx.mec.num_pipe_per_mec *
adev->gfx.mec.num_queue_per_pipe,
adev->gfx.num_compute_rings);
@@ -200,18 +227,24 @@ void amdgpu_gfx_compute_queue_acquire(struct amdgpu_device *adev)
void amdgpu_gfx_graphics_queue_acquire(struct amdgpu_device *adev)
{
- int i, queue, me;
-
- for (i = 0; i < AMDGPU_MAX_GFX_QUEUES; ++i) {
- queue = i % adev->gfx.me.num_queue_per_pipe;
- me = (i / adev->gfx.me.num_queue_per_pipe)
- / adev->gfx.me.num_pipe_per_me;
+ int i, queue, pipe;
+ bool multipipe_policy = amdgpu_gfx_is_graphics_multipipe_capable(adev);
+ int max_queues_per_me = adev->gfx.me.num_pipe_per_me *
+ adev->gfx.me.num_queue_per_pipe;
- if (me >= adev->gfx.me.num_me)
- break;
+ if (multipipe_policy) {
/* policy: amdgpu owns the first queue per pipe at this stage
* will extend to mulitple queues per pipe later */
- if (me == 0 && queue < 1)
+ for (i = 0; i < max_queues_per_me; i++) {
+ pipe = i % adev->gfx.me.num_pipe_per_me;
+ queue = (i / adev->gfx.me.num_pipe_per_me) %
+ adev->gfx.me.num_queue_per_pipe;
+
+ set_bit(pipe * adev->gfx.me.num_queue_per_pipe + queue,
+ adev->gfx.me.queue_bitmap);
+ }
+ } else {
+ for (i = 0; i < max_queues_per_me; ++i)
set_bit(i, adev->gfx.me.queue_bitmap);
}