summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/nouveau/nouveau_dmem.c
diff options
context:
space:
mode:
authorDanilo Krummrich <dakr@redhat.com>2023-08-30 01:38:02 +0300
committerDanilo Krummrich <dakr@redhat.com>2023-08-31 01:46:23 +0300
commit978474dc8278f661930e02e08d292a45a45fa01a (patch)
tree142d03f066ad67bf867c8e7926452a7fff48db75 /drivers/gpu/drm/nouveau/nouveau_dmem.c
parent4b2fd81f2af7147e844ecec0c5c07a16bca6b86e (diff)
downloadlinux-978474dc8278f661930e02e08d292a45a45fa01a.tar.xz
drm/nouveau: fence: fix undefined fence state after emit
nouveau_fence_emit() can fail before and after initializing the dma-fence and hence before and after initializing the dma-fence' kref. In order to avoid nouveau_fence_emit() potentially failing before dma-fence initialization pass the channel to nouveau_fence_new() already and perform the required check before even allocating the fence. While at it, restore the original behavior of nouveau_fence_new() and add nouveau_fence_create() for separate (pre-)allocation instead. Always splitting up allocation end emit wasn't a good idea in the first place. Hence, limit it to the places where we actually need to pre-allocate. Fixes: 7f2a0b50b2b2 ("drm/nouveau: fence: separate fence alloc and emit") Signed-off-by: Danilo Krummrich <dakr@redhat.com> Reviewed-by: Dave Airlie <airlied@redhat.com> Link: https://patchwork.freedesktop.org/patch/msgid/20230829223847.4406-1-dakr@redhat.com
Diffstat (limited to 'drivers/gpu/drm/nouveau/nouveau_dmem.c')
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dmem.c9
1 files changed, 3 insertions, 6 deletions
diff --git a/drivers/gpu/drm/nouveau/nouveau_dmem.c b/drivers/gpu/drm/nouveau/nouveau_dmem.c
index 61e84562094a..12feecf71e75 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dmem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dmem.c
@@ -209,8 +209,7 @@ static vm_fault_t nouveau_dmem_migrate_to_ram(struct vm_fault *vmf)
goto done;
}
- if (!nouveau_fence_new(&fence))
- nouveau_fence_emit(fence, dmem->migrate.chan);
+ nouveau_fence_new(&fence, dmem->migrate.chan);
migrate_vma_pages(&args);
nouveau_dmem_fence_done(&fence);
dma_unmap_page(drm->dev->dev, dma_addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
@@ -403,8 +402,7 @@ nouveau_dmem_evict_chunk(struct nouveau_dmem_chunk *chunk)
}
}
- if (!nouveau_fence_new(&fence))
- nouveau_fence_emit(fence, chunk->drm->dmem->migrate.chan);
+ nouveau_fence_new(&fence, chunk->drm->dmem->migrate.chan);
migrate_device_pages(src_pfns, dst_pfns, npages);
nouveau_dmem_fence_done(&fence);
migrate_device_finalize(src_pfns, dst_pfns, npages);
@@ -677,8 +675,7 @@ static void nouveau_dmem_migrate_chunk(struct nouveau_drm *drm,
addr += PAGE_SIZE;
}
- if (!nouveau_fence_new(&fence))
- nouveau_fence_emit(fence, drm->dmem->migrate.chan);
+ nouveau_fence_new(&fence, drm->dmem->migrate.chan);
migrate_vma_pages(args);
nouveau_dmem_fence_done(&fence);
nouveau_pfns_map(svmm, args->vma->vm_mm, args->start, pfns, i);