summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
diff options
context:
space:
mode:
authorChristian König <christian.koenig@amd.com>2018-01-31 16:24:45 +0300
committerAlex Deucher <alexander.deucher@amd.com>2018-02-19 22:19:17 +0300
commit58592a095c981a002137221205411f538b9f0fb9 (patch)
treea5f1e053f250a1dd25bc732ac4f9c24e7965074d /drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
parent25ddf75bb3aa5e1f47eb9c04f0d50bf37269702b (diff)
downloadlinux-58592a095c981a002137221205411f538b9f0fb9.tar.xz
drm/amdgpu: restructure amdgpu_vmid_grab
Now that we have the different cases for grabbing a VMID in separate functions, restructure the top level function to only have one place where VMIDs are assigned to jobs. Signed-off-by: Christian König <christian.koenig@amd.com> Acked-by: Chunming Zhou <david1.zhou@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c89
1 files changed, 42 insertions, 47 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
index 0a9789604c77..156e026046b5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
@@ -263,33 +263,34 @@ static int amdgpu_vmid_grab_reserved(struct amdgpu_vm *vm,
struct amdgpu_ring *ring,
struct amdgpu_sync *sync,
struct dma_fence *fence,
- struct amdgpu_job *job)
+ struct amdgpu_job *job,
+ struct amdgpu_vmid **id)
{
struct amdgpu_device *adev = ring->adev;
unsigned vmhub = ring->funcs->vmhub;
uint64_t fence_context = adev->fence_context + ring->idx;
- struct amdgpu_vmid *id = vm->reserved_vmid[vmhub];
- struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
struct dma_fence *updates = sync->last_vm_update;
bool needs_flush = vm->use_cpu_for_update;
int r = 0;
- if (updates && id->flushed_updates &&
- updates->context == id->flushed_updates->context &&
- !dma_fence_is_later(updates, id->flushed_updates))
+ *id = vm->reserved_vmid[vmhub];
+ if (updates && (*id)->flushed_updates &&
+ updates->context == (*id)->flushed_updates->context &&
+ !dma_fence_is_later(updates, (*id)->flushed_updates))
updates = NULL;
- if (id->owner != vm->entity.fence_context ||
- job->vm_pd_addr != id->pd_gpu_addr ||
- updates || !id->last_flush ||
- (id->last_flush->context != fence_context &&
- !dma_fence_is_signaled(id->last_flush))) {
+ if ((*id)->owner != vm->entity.fence_context ||
+ job->vm_pd_addr != (*id)->pd_gpu_addr ||
+ updates || !(*id)->last_flush ||
+ ((*id)->last_flush->context != fence_context &&
+ !dma_fence_is_signaled((*id)->last_flush))) {
struct dma_fence *tmp;
/* to prevent one context starved by another context */
- id->pd_gpu_addr = 0;
- tmp = amdgpu_sync_peek_fence(&id->active, ring);
+ (*id)->pd_gpu_addr = 0;
+ tmp = amdgpu_sync_peek_fence(&(*id)->active, ring);
if (tmp) {
+ *id = NULL;
r = amdgpu_sync_fence(adev, sync, tmp, false);
return r;
}
@@ -299,24 +300,15 @@ static int amdgpu_vmid_grab_reserved(struct amdgpu_vm *vm,
/* Good we can use this VMID. Remember this submission as
* user of the VMID.
*/
- r = amdgpu_sync_fence(ring->adev, &id->active, fence, false);
+ r = amdgpu_sync_fence(ring->adev, &(*id)->active, fence, false);
if (r)
return r;
if (updates) {
- dma_fence_put(id->flushed_updates);
- id->flushed_updates = dma_fence_get(updates);
+ dma_fence_put((*id)->flushed_updates);
+ (*id)->flushed_updates = dma_fence_get(updates);
}
- id->pd_gpu_addr = job->vm_pd_addr;
- id->owner = vm->entity.fence_context;
job->vm_needs_flush = needs_flush;
- if (needs_flush) {
- dma_fence_put(id->last_flush);
- id->last_flush = NULL;
- }
- job->vmid = id - id_mgr->ids;
- job->pasid = vm->pasid;
- trace_amdgpu_vm_grab_id(vm, ring, job);
return 0;
}
@@ -411,7 +403,6 @@ int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
struct amdgpu_device *adev = ring->adev;
unsigned vmhub = ring->funcs->vmhub;
struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
- struct dma_fence *updates = sync->last_vm_update;
struct amdgpu_vmid *id, *idle;
int r = 0;
@@ -421,37 +412,41 @@ int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
goto error;
if (vm->reserved_vmid[vmhub]) {
- r = amdgpu_vmid_grab_reserved(vm, ring, sync, fence, job);
- mutex_unlock(&id_mgr->lock);
- return r;
- }
+ r = amdgpu_vmid_grab_reserved(vm, ring, sync, fence, job, &id);
+ if (r || !id)
+ goto error;
+ } else {
+ r = amdgpu_vmid_grab_used(vm, ring, sync, fence, job, &id);
+ if (r)
+ goto error;
- r = amdgpu_vmid_grab_used(vm, ring, sync, fence, job, &id);
- if (r)
- goto error;
+ if (!id) {
+ struct dma_fence *updates = sync->last_vm_update;
- if (!id) {
- /* Still no ID to use? Then use the idle one found earlier */
- id = idle;
+ /* Still no ID to use? Then use the idle one found earlier */
+ id = idle;
- /* Remember this submission as user of the VMID */
- r = amdgpu_sync_fence(ring->adev, &id->active, fence, false);
- if (r)
- goto error;
+ /* Remember this submission as user of the VMID */
+ r = amdgpu_sync_fence(ring->adev, &id->active,
+ fence, false);
+ if (r)
+ goto error;
- id->pd_gpu_addr = job->vm_pd_addr;
- dma_fence_put(id->flushed_updates);
- id->flushed_updates = dma_fence_get(updates);
- id->owner = vm->entity.fence_context;
- job->vm_needs_flush = true;
+ dma_fence_put(id->flushed_updates);
+ id->flushed_updates = dma_fence_get(updates);
+ job->vm_needs_flush = true;
+ }
+
+ list_move_tail(&id->list, &id_mgr->ids_lru);
}
+ id->pd_gpu_addr = job->vm_pd_addr;
+ id->owner = vm->entity.fence_context;
+
if (job->vm_needs_flush) {
dma_fence_put(id->last_flush);
id->last_flush = NULL;
}
- list_move_tail(&id->list, &id_mgr->ids_lru);
-
job->vmid = id - id_mgr->ids;
job->pasid = vm->pasid;
trace_amdgpu_vm_grab_id(vm, ring, job);