summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/nouveau/nouveau_dmem.c
diff options
context:
space:
mode:
authorDanilo Krummrich <dakr@redhat.com>2023-08-04 21:23:47 +0300
committerDanilo Krummrich <dakr@redhat.com>2023-08-04 21:34:36 +0300
commit7f2a0b50b2b20308a19602b51c647566c62e144c (patch)
tree84ca56f21274bceb5ca1f37466f433d788485014 /drivers/gpu/drm/nouveau/nouveau_dmem.c
parentfbc0ced450060bbce807b35885fe4be8d19b1e22 (diff)
downloadlinux-7f2a0b50b2b20308a19602b51c647566c62e144c.tar.xz
drm/nouveau: fence: separate fence alloc and emit
The new (VM_BIND) UAPI exports DMA fences through DRM syncobjs. Hence, in order to emit fences within DMA fence signalling critical sections (e.g. as typically done in the DRM GPU schedulers run_job() callback) we need to separate fence allocation and fence emitting. Reviewed-by: Dave Airlie <airlied@redhat.com> Signed-off-by: Danilo Krummrich <dakr@redhat.com> Link: https://patchwork.freedesktop.org/patch/msgid/20230804182406.5222-8-dakr@redhat.com
Diffstat (limited to 'drivers/gpu/drm/nouveau/nouveau_dmem.c')
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dmem.c9
1 files changed, 6 insertions, 3 deletions
diff --git a/drivers/gpu/drm/nouveau/nouveau_dmem.c b/drivers/gpu/drm/nouveau/nouveau_dmem.c
index 789857faa048..4ad40e42cae1 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dmem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dmem.c
@@ -209,7 +209,8 @@ static vm_fault_t nouveau_dmem_migrate_to_ram(struct vm_fault *vmf)
goto done;
}
- nouveau_fence_new(dmem->migrate.chan, false, &fence);
+ if (!nouveau_fence_new(&fence))
+ nouveau_fence_emit(fence, dmem->migrate.chan);
migrate_vma_pages(&args);
nouveau_dmem_fence_done(&fence);
dma_unmap_page(drm->dev->dev, dma_addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
@@ -402,7 +403,8 @@ nouveau_dmem_evict_chunk(struct nouveau_dmem_chunk *chunk)
}
}
- nouveau_fence_new(chunk->drm->dmem->migrate.chan, false, &fence);
+ if (!nouveau_fence_new(&fence))
+ nouveau_fence_emit(fence, chunk->drm->dmem->migrate.chan);
migrate_device_pages(src_pfns, dst_pfns, npages);
nouveau_dmem_fence_done(&fence);
migrate_device_finalize(src_pfns, dst_pfns, npages);
@@ -675,7 +677,8 @@ static void nouveau_dmem_migrate_chunk(struct nouveau_drm *drm,
addr += PAGE_SIZE;
}
- nouveau_fence_new(drm->dmem->migrate.chan, false, &fence);
+ if (!nouveau_fence_new(&fence))
+ nouveau_fence_emit(fence, chunk->drm->dmem->migrate.chan);
migrate_vma_pages(args);
nouveau_dmem_fence_done(&fence);
nouveau_pfns_map(svmm, args->vma->vm_mm, args->start, pfns, i);