summaryrefslogtreecommitdiff
path: root/drivers/dma-buf
diff options
context:
space:
mode:
authorChristian König <christian.koenig@amd.com>2021-06-15 16:15:01 +0300
committerChristian König <christian.koenig@amd.com>2021-10-06 10:48:25 +0300
commit96601e8a4755d333a8d2e826134d5220ae2e8c24 (patch)
tree98785a3b1bc86f4e8240c582f8b294658727a67c /drivers/dma-buf
parentc921ff373b469ad7907cde219fa700909f59cac4 (diff)
downloadlinux-96601e8a4755d333a8d2e826134d5220ae2e8c24.tar.xz
dma-buf: use new iterator in dma_resv_copy_fences
This makes the function much simpler since the complex retry logic is now handled else where. Signed-off-by: Christian König <christian.koenig@amd.com> Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch> Link: https://patchwork.freedesktop.org/patch/msgid/20211005113742.1101-5-christian.koenig@amd.com
Diffstat (limited to 'drivers/dma-buf')
-rw-r--r--drivers/dma-buf/dma-resv.c84
1 files changed, 32 insertions, 52 deletions
diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c
index 3cbcf66a137e..08e5b8c381f4 100644
--- a/drivers/dma-buf/dma-resv.c
+++ b/drivers/dma-buf/dma-resv.c
@@ -432,74 +432,54 @@ EXPORT_SYMBOL(dma_resv_iter_next_unlocked);
*/
int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src)
{
- struct dma_resv_list *src_list, *dst_list;
- struct dma_fence *old, *new;
- unsigned int i;
+ struct dma_resv_iter cursor;
+ struct dma_resv_list *list;
+ struct dma_fence *f, *excl;
dma_resv_assert_held(dst);
- rcu_read_lock();
- src_list = dma_resv_shared_list(src);
+ list = NULL;
+ excl = NULL;
-retry:
- if (src_list) {
- unsigned int shared_count = src_list->shared_count;
+ dma_resv_iter_begin(&cursor, src, true);
+ dma_resv_for_each_fence_unlocked(&cursor, f) {
- rcu_read_unlock();
+ if (dma_resv_iter_is_restarted(&cursor)) {
+ dma_resv_list_free(list);
+ dma_fence_put(excl);
- dst_list = dma_resv_list_alloc(shared_count);
- if (!dst_list)
- return -ENOMEM;
+ if (cursor.fences) {
+ unsigned int cnt = cursor.fences->shared_count;
- rcu_read_lock();
- src_list = dma_resv_shared_list(src);
- if (!src_list || src_list->shared_count > shared_count) {
- kfree(dst_list);
- goto retry;
- }
-
- dst_list->shared_count = 0;
- for (i = 0; i < src_list->shared_count; ++i) {
- struct dma_fence __rcu **dst;
- struct dma_fence *fence;
+ list = dma_resv_list_alloc(cnt);
+ if (!list) {
+ dma_resv_iter_end(&cursor);
+ return -ENOMEM;
+ }
- fence = rcu_dereference(src_list->shared[i]);
- if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
- &fence->flags))
- continue;
+ list->shared_count = 0;
- if (!dma_fence_get_rcu(fence)) {
- dma_resv_list_free(dst_list);
- src_list = dma_resv_shared_list(src);
- goto retry;
+ } else {
+ list = NULL;
}
-
- if (dma_fence_is_signaled(fence)) {
- dma_fence_put(fence);
- continue;
- }
-
- dst = &dst_list->shared[dst_list->shared_count++];
- rcu_assign_pointer(*dst, fence);
+ excl = NULL;
}
- } else {
- dst_list = NULL;
- }
- new = dma_fence_get_rcu_safe(&src->fence_excl);
- rcu_read_unlock();
-
- src_list = dma_resv_shared_list(dst);
- old = dma_resv_excl_fence(dst);
+ dma_fence_get(f);
+ if (dma_resv_iter_is_exclusive(&cursor))
+ excl = f;
+ else
+ RCU_INIT_POINTER(list->shared[list->shared_count++], f);
+ }
+ dma_resv_iter_end(&cursor);
write_seqcount_begin(&dst->seq);
- /* write_seqcount_begin provides the necessary memory barrier */
- RCU_INIT_POINTER(dst->fence_excl, new);
- RCU_INIT_POINTER(dst->fence, dst_list);
+ excl = rcu_replace_pointer(dst->fence_excl, excl, dma_resv_held(dst));
+ list = rcu_replace_pointer(dst->fence, list, dma_resv_held(dst));
write_seqcount_end(&dst->seq);
- dma_resv_list_free(src_list);
- dma_fence_put(old);
+ dma_resv_list_free(list);
+ dma_fence_put(excl);
return 0;
}