From 0adec22702d497385dbdc52abb165f379a00efba Mon Sep 17 00:00:00 2001 From: Thomas Zimmermann Date: Tue, 13 Jun 2023 16:51:33 +0200 Subject: drm: Remove struct drm_driver.gem_prime_mmap All drivers initialize this field with drm_gem_prime_mmap(). Call the function directly and remove the field. Simplifies the code and resolves a long-standing TODO item. Signed-off-by: Thomas Zimmermann Reviewed-by: Alex Deucher Link: https://patchwork.freedesktop.org/patch/msgid/20230613150441.17720-3-tzimmermann@suse.de --- include/drm/drm_drv.h | 14 -------------- include/drm/drm_gem_dma_helper.h | 6 ++---- include/drm/drm_gem_shmem_helper.h | 1 - include/drm/drm_gem_vram_helper.h | 1 - 4 files changed, 2 insertions(+), 20 deletions(-) (limited to 'include/drm') diff --git a/include/drm/drm_drv.h b/include/drm/drm_drv.h index 89e2706cac56..870278ecd8ba 100644 --- a/include/drm/drm_drv.h +++ b/include/drm/drm_drv.h @@ -343,20 +343,6 @@ struct drm_driver { struct drm_device *dev, struct dma_buf_attachment *attach, struct sg_table *sgt); - /** - * @gem_prime_mmap: - * - * mmap hook for GEM drivers, used to implement dma-buf mmap in the - * PRIME helpers. - * - * This hook only exists for historical reasons. Drivers must use - * drm_gem_prime_mmap() to implement it. - * - * FIXME: Convert all drivers to implement mmap in struct - * &drm_gem_object_funcs and inline drm_gem_prime_mmap() into - * its callers. This hook should be removed afterwards. - */ - int (*gem_prime_mmap)(struct drm_gem_object *obj, struct vm_area_struct *vma); /** * @dumb_create: diff --git a/include/drm/drm_gem_dma_helper.h b/include/drm/drm_gem_dma_helper.h index 8a043235dad8..61da596780b6 100644 --- a/include/drm/drm_gem_dma_helper.h +++ b/include/drm/drm_gem_dma_helper.h @@ -169,8 +169,7 @@ drm_gem_dma_prime_import_sg_table(struct drm_device *dev, .dumb_create = (dumb_create_func), \ .prime_handle_to_fd = drm_gem_prime_handle_to_fd, \ .prime_fd_to_handle = drm_gem_prime_fd_to_handle, \ - .gem_prime_import_sg_table = drm_gem_dma_prime_import_sg_table, \ - .gem_prime_mmap = drm_gem_prime_mmap + .gem_prime_import_sg_table = drm_gem_dma_prime_import_sg_table /** * DRM_GEM_DMA_DRIVER_OPS - DMA GEM driver operations @@ -207,8 +206,7 @@ drm_gem_dma_prime_import_sg_table(struct drm_device *dev, .dumb_create = dumb_create_func, \ .prime_handle_to_fd = drm_gem_prime_handle_to_fd, \ .prime_fd_to_handle = drm_gem_prime_fd_to_handle, \ - .gem_prime_import_sg_table = drm_gem_dma_prime_import_sg_table_vmap, \ - .gem_prime_mmap = drm_gem_prime_mmap + .gem_prime_import_sg_table = drm_gem_dma_prime_import_sg_table_vmap /** * DRM_GEM_DMA_DRIVER_OPS_VMAP - DMA GEM driver operations ensuring a virtual diff --git a/include/drm/drm_gem_shmem_helper.h b/include/drm/drm_gem_shmem_helper.h index 5994fed5e327..46eb46e69063 100644 --- a/include/drm/drm_gem_shmem_helper.h +++ b/include/drm/drm_gem_shmem_helper.h @@ -293,7 +293,6 @@ int drm_gem_shmem_dumb_create(struct drm_file *file, struct drm_device *dev, .prime_handle_to_fd = drm_gem_prime_handle_to_fd, \ .prime_fd_to_handle = drm_gem_prime_fd_to_handle, \ .gem_prime_import_sg_table = drm_gem_shmem_prime_import_sg_table, \ - .gem_prime_mmap = drm_gem_prime_mmap, \ .dumb_create = drm_gem_shmem_dumb_create #endif /* __DRM_GEM_SHMEM_HELPER_H__ */ diff --git a/include/drm/drm_gem_vram_helper.h b/include/drm/drm_gem_vram_helper.h index f4aab64411d8..6b265cb9f45a 100644 --- a/include/drm/drm_gem_vram_helper.h +++ b/include/drm/drm_gem_vram_helper.h @@ -160,7 +160,6 @@ void drm_gem_vram_simple_display_pipe_cleanup_fb( .debugfs_init = drm_vram_mm_debugfs_init, \ .dumb_create = drm_gem_vram_driver_dumb_create, \ .dumb_map_offset = drm_gem_ttm_dumb_map_offset, \ - .gem_prime_mmap = drm_gem_prime_mmap, \ .prime_handle_to_fd = drm_gem_prime_handle_to_fd, \ .prime_fd_to_handle = drm_gem_prime_fd_to_handle -- cgit v1.2.3 From 21aa27ddc58269349597c6d243212bcc4065d277 Mon Sep 17 00:00:00 2001 From: Dmitry Osipenko Date: Tue, 30 May 2023 01:39:35 +0300 Subject: drm/shmem-helper: Switch to reservation lock Replace all drm-shmem locks with a GEM reservation lock. This makes locks consistent with dma-buf locking convention where importers are responsible for holding reservation lock for all operations performed over dma-bufs, preventing deadlock between dma-buf importers and exporters. Suggested-by: Daniel Vetter Acked-by: Thomas Zimmermann Reviewed-by: Emil Velikov Signed-off-by: Dmitry Osipenko Link: https://patchwork.freedesktop.org/patch/msgid/20230529223935.2672495-7-dmitry.osipenko@collabora.com --- drivers/gpu/drm/drm_gem_shmem_helper.c | 210 ++++++++++------------- drivers/gpu/drm/lima/lima_gem.c | 8 +- drivers/gpu/drm/panfrost/panfrost_drv.c | 7 +- drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c | 6 +- drivers/gpu/drm/panfrost/panfrost_mmu.c | 19 +- include/drm/drm_gem_shmem_helper.h | 14 +- 6 files changed, 116 insertions(+), 148 deletions(-) (limited to 'include/drm') diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c index 4ea6507a77e5..a783d2245599 100644 --- a/drivers/gpu/drm/drm_gem_shmem_helper.c +++ b/drivers/gpu/drm/drm_gem_shmem_helper.c @@ -88,8 +88,6 @@ __drm_gem_shmem_create(struct drm_device *dev, size_t size, bool private) if (ret) goto err_release; - mutex_init(&shmem->pages_lock); - mutex_init(&shmem->vmap_lock); INIT_LIST_HEAD(&shmem->madv_list); if (!private) { @@ -141,11 +139,13 @@ void drm_gem_shmem_free(struct drm_gem_shmem_object *shmem) { struct drm_gem_object *obj = &shmem->base; - drm_WARN_ON(obj->dev, shmem->vmap_use_count); - if (obj->import_attach) { drm_prime_gem_destroy(obj, shmem->sgt); } else { + dma_resv_lock(shmem->base.resv, NULL); + + drm_WARN_ON(obj->dev, shmem->vmap_use_count); + if (shmem->sgt) { dma_unmap_sgtable(obj->dev->dev, shmem->sgt, DMA_BIDIRECTIONAL, 0); @@ -154,22 +154,24 @@ void drm_gem_shmem_free(struct drm_gem_shmem_object *shmem) } if (shmem->pages) drm_gem_shmem_put_pages(shmem); - } - drm_WARN_ON(obj->dev, shmem->pages_use_count); + drm_WARN_ON(obj->dev, shmem->pages_use_count); + + dma_resv_unlock(shmem->base.resv); + } drm_gem_object_release(obj); - mutex_destroy(&shmem->pages_lock); - mutex_destroy(&shmem->vmap_lock); kfree(shmem); } EXPORT_SYMBOL_GPL(drm_gem_shmem_free); -static int drm_gem_shmem_get_pages_locked(struct drm_gem_shmem_object *shmem) +static int drm_gem_shmem_get_pages(struct drm_gem_shmem_object *shmem) { struct drm_gem_object *obj = &shmem->base; struct page **pages; + dma_resv_assert_held(shmem->base.resv); + if (shmem->pages_use_count++ > 0) return 0; @@ -197,35 +199,16 @@ static int drm_gem_shmem_get_pages_locked(struct drm_gem_shmem_object *shmem) } /* - * drm_gem_shmem_get_pages - Allocate backing pages for a shmem GEM object + * drm_gem_shmem_put_pages - Decrease use count on the backing pages for a shmem GEM object * @shmem: shmem GEM object * - * This function makes sure that backing pages exists for the shmem GEM object - * and increases the use count. - * - * Returns: - * 0 on success or a negative error code on failure. + * This function decreases the use count and puts the backing pages when use drops to zero. */ -int drm_gem_shmem_get_pages(struct drm_gem_shmem_object *shmem) +void drm_gem_shmem_put_pages(struct drm_gem_shmem_object *shmem) { struct drm_gem_object *obj = &shmem->base; - int ret; - drm_WARN_ON(obj->dev, obj->import_attach); - - ret = mutex_lock_interruptible(&shmem->pages_lock); - if (ret) - return ret; - ret = drm_gem_shmem_get_pages_locked(shmem); - mutex_unlock(&shmem->pages_lock); - - return ret; -} -EXPORT_SYMBOL(drm_gem_shmem_get_pages); - -static void drm_gem_shmem_put_pages_locked(struct drm_gem_shmem_object *shmem) -{ - struct drm_gem_object *obj = &shmem->base; + dma_resv_assert_held(shmem->base.resv); if (drm_WARN_ON_ONCE(obj->dev, !shmem->pages_use_count)) return; @@ -243,20 +226,25 @@ static void drm_gem_shmem_put_pages_locked(struct drm_gem_shmem_object *shmem) shmem->pages_mark_accessed_on_put); shmem->pages = NULL; } +EXPORT_SYMBOL(drm_gem_shmem_put_pages); -/* - * drm_gem_shmem_put_pages - Decrease use count on the backing pages for a shmem GEM object - * @shmem: shmem GEM object - * - * This function decreases the use count and puts the backing pages when use drops to zero. - */ -void drm_gem_shmem_put_pages(struct drm_gem_shmem_object *shmem) +static int drm_gem_shmem_pin_locked(struct drm_gem_shmem_object *shmem) { - mutex_lock(&shmem->pages_lock); - drm_gem_shmem_put_pages_locked(shmem); - mutex_unlock(&shmem->pages_lock); + int ret; + + dma_resv_assert_held(shmem->base.resv); + + ret = drm_gem_shmem_get_pages(shmem); + + return ret; +} + +static void drm_gem_shmem_unpin_locked(struct drm_gem_shmem_object *shmem) +{ + dma_resv_assert_held(shmem->base.resv); + + drm_gem_shmem_put_pages(shmem); } -EXPORT_SYMBOL(drm_gem_shmem_put_pages); /** * drm_gem_shmem_pin - Pin backing pages for a shmem GEM object @@ -271,10 +259,17 @@ EXPORT_SYMBOL(drm_gem_shmem_put_pages); int drm_gem_shmem_pin(struct drm_gem_shmem_object *shmem) { struct drm_gem_object *obj = &shmem->base; + int ret; drm_WARN_ON(obj->dev, obj->import_attach); - return drm_gem_shmem_get_pages(shmem); + ret = dma_resv_lock_interruptible(shmem->base.resv, NULL); + if (ret) + return ret; + ret = drm_gem_shmem_pin_locked(shmem); + dma_resv_unlock(shmem->base.resv); + + return ret; } EXPORT_SYMBOL(drm_gem_shmem_pin); @@ -291,12 +286,29 @@ void drm_gem_shmem_unpin(struct drm_gem_shmem_object *shmem) drm_WARN_ON(obj->dev, obj->import_attach); - drm_gem_shmem_put_pages(shmem); + dma_resv_lock(shmem->base.resv, NULL); + drm_gem_shmem_unpin_locked(shmem); + dma_resv_unlock(shmem->base.resv); } EXPORT_SYMBOL(drm_gem_shmem_unpin); -static int drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem, - struct iosys_map *map) +/* + * drm_gem_shmem_vmap - Create a virtual mapping for a shmem GEM object + * @shmem: shmem GEM object + * @map: Returns the kernel virtual address of the SHMEM GEM object's backing + * store. + * + * This function makes sure that a contiguous kernel virtual address mapping + * exists for the buffer backing the shmem GEM object. It hides the differences + * between dma-buf imported and natively allocated objects. + * + * Acquired mappings should be cleaned up by calling drm_gem_shmem_vunmap(). + * + * Returns: + * 0 on success or a negative error code on failure. + */ +int drm_gem_shmem_vmap(struct drm_gem_shmem_object *shmem, + struct iosys_map *map) { struct drm_gem_object *obj = &shmem->base; int ret = 0; @@ -312,6 +324,8 @@ static int drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem, } else { pgprot_t prot = PAGE_KERNEL; + dma_resv_assert_held(shmem->base.resv); + if (shmem->vmap_use_count++ > 0) { iosys_map_set_vaddr(map, shmem->vaddr); return 0; @@ -346,45 +360,30 @@ err_zero_use: return ret; } +EXPORT_SYMBOL(drm_gem_shmem_vmap); /* - * drm_gem_shmem_vmap - Create a virtual mapping for a shmem GEM object + * drm_gem_shmem_vunmap - Unmap a virtual mapping for a shmem GEM object * @shmem: shmem GEM object - * @map: Returns the kernel virtual address of the SHMEM GEM object's backing - * store. - * - * This function makes sure that a contiguous kernel virtual address mapping - * exists for the buffer backing the shmem GEM object. It hides the differences - * between dma-buf imported and natively allocated objects. + * @map: Kernel virtual address where the SHMEM GEM object was mapped * - * Acquired mappings should be cleaned up by calling drm_gem_shmem_vunmap(). + * This function cleans up a kernel virtual address mapping acquired by + * drm_gem_shmem_vmap(). The mapping is only removed when the use count drops to + * zero. * - * Returns: - * 0 on success or a negative error code on failure. + * This function hides the differences between dma-buf imported and natively + * allocated objects. */ -int drm_gem_shmem_vmap(struct drm_gem_shmem_object *shmem, - struct iosys_map *map) -{ - int ret; - - ret = mutex_lock_interruptible(&shmem->vmap_lock); - if (ret) - return ret; - ret = drm_gem_shmem_vmap_locked(shmem, map); - mutex_unlock(&shmem->vmap_lock); - - return ret; -} -EXPORT_SYMBOL(drm_gem_shmem_vmap); - -static void drm_gem_shmem_vunmap_locked(struct drm_gem_shmem_object *shmem, - struct iosys_map *map) +void drm_gem_shmem_vunmap(struct drm_gem_shmem_object *shmem, + struct iosys_map *map) { struct drm_gem_object *obj = &shmem->base; if (obj->import_attach) { dma_buf_vunmap(obj->import_attach->dmabuf, map); } else { + dma_resv_assert_held(shmem->base.resv); + if (drm_WARN_ON_ONCE(obj->dev, !shmem->vmap_use_count)) return; @@ -397,26 +396,6 @@ static void drm_gem_shmem_vunmap_locked(struct drm_gem_shmem_object *shmem, shmem->vaddr = NULL; } - -/* - * drm_gem_shmem_vunmap - Unmap a virtual mapping for a shmem GEM object - * @shmem: shmem GEM object - * @map: Kernel virtual address where the SHMEM GEM object was mapped - * - * This function cleans up a kernel virtual address mapping acquired by - * drm_gem_shmem_vmap(). The mapping is only removed when the use count drops to - * zero. - * - * This function hides the differences between dma-buf imported and natively - * allocated objects. - */ -void drm_gem_shmem_vunmap(struct drm_gem_shmem_object *shmem, - struct iosys_map *map) -{ - mutex_lock(&shmem->vmap_lock); - drm_gem_shmem_vunmap_locked(shmem, map); - mutex_unlock(&shmem->vmap_lock); -} EXPORT_SYMBOL(drm_gem_shmem_vunmap); static int @@ -447,24 +426,24 @@ drm_gem_shmem_create_with_handle(struct drm_file *file_priv, */ int drm_gem_shmem_madvise(struct drm_gem_shmem_object *shmem, int madv) { - mutex_lock(&shmem->pages_lock); + dma_resv_assert_held(shmem->base.resv); if (shmem->madv >= 0) shmem->madv = madv; madv = shmem->madv; - mutex_unlock(&shmem->pages_lock); - return (madv >= 0); } EXPORT_SYMBOL(drm_gem_shmem_madvise); -void drm_gem_shmem_purge_locked(struct drm_gem_shmem_object *shmem) +void drm_gem_shmem_purge(struct drm_gem_shmem_object *shmem) { struct drm_gem_object *obj = &shmem->base; struct drm_device *dev = obj->dev; + dma_resv_assert_held(shmem->base.resv); + drm_WARN_ON(obj->dev, !drm_gem_shmem_is_purgeable(shmem)); dma_unmap_sgtable(dev->dev, shmem->sgt, DMA_BIDIRECTIONAL, 0); @@ -472,7 +451,7 @@ void drm_gem_shmem_purge_locked(struct drm_gem_shmem_object *shmem) kfree(shmem->sgt); shmem->sgt = NULL; - drm_gem_shmem_put_pages_locked(shmem); + drm_gem_shmem_put_pages(shmem); shmem->madv = -1; @@ -488,17 +467,6 @@ void drm_gem_shmem_purge_locked(struct drm_gem_shmem_object *shmem) invalidate_mapping_pages(file_inode(obj->filp)->i_mapping, 0, (loff_t)-1); } -EXPORT_SYMBOL(drm_gem_shmem_purge_locked); - -bool drm_gem_shmem_purge(struct drm_gem_shmem_object *shmem) -{ - if (!mutex_trylock(&shmem->pages_lock)) - return false; - drm_gem_shmem_purge_locked(shmem); - mutex_unlock(&shmem->pages_lock); - - return true; -} EXPORT_SYMBOL(drm_gem_shmem_purge); /** @@ -551,7 +519,7 @@ static vm_fault_t drm_gem_shmem_fault(struct vm_fault *vmf) /* We don't use vmf->pgoff since that has the fake offset */ page_offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT; - mutex_lock(&shmem->pages_lock); + dma_resv_lock(shmem->base.resv, NULL); if (page_offset >= num_pages || drm_WARN_ON_ONCE(obj->dev, !shmem->pages) || @@ -563,7 +531,7 @@ static vm_fault_t drm_gem_shmem_fault(struct vm_fault *vmf) ret = vmf_insert_pfn(vma, vmf->address, page_to_pfn(page)); } - mutex_unlock(&shmem->pages_lock); + dma_resv_unlock(shmem->base.resv); return ret; } @@ -575,7 +543,7 @@ static void drm_gem_shmem_vm_open(struct vm_area_struct *vma) drm_WARN_ON(obj->dev, obj->import_attach); - mutex_lock(&shmem->pages_lock); + dma_resv_lock(shmem->base.resv, NULL); /* * We should have already pinned the pages when the buffer was first @@ -585,7 +553,7 @@ static void drm_gem_shmem_vm_open(struct vm_area_struct *vma) if (!drm_WARN_ON_ONCE(obj->dev, !shmem->pages_use_count)) shmem->pages_use_count++; - mutex_unlock(&shmem->pages_lock); + dma_resv_unlock(shmem->base.resv); drm_gem_vm_open(vma); } @@ -595,7 +563,10 @@ static void drm_gem_shmem_vm_close(struct vm_area_struct *vma) struct drm_gem_object *obj = vma->vm_private_data; struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); + dma_resv_lock(shmem->base.resv, NULL); drm_gem_shmem_put_pages(shmem); + dma_resv_unlock(shmem->base.resv); + drm_gem_vm_close(vma); } @@ -633,7 +604,10 @@ int drm_gem_shmem_mmap(struct drm_gem_shmem_object *shmem, struct vm_area_struct return ret; } + dma_resv_lock(shmem->base.resv, NULL); ret = drm_gem_shmem_get_pages(shmem); + dma_resv_unlock(shmem->base.resv); + if (ret) return ret; @@ -699,7 +673,7 @@ static struct sg_table *drm_gem_shmem_get_pages_sgt_locked(struct drm_gem_shmem_ drm_WARN_ON(obj->dev, obj->import_attach); - ret = drm_gem_shmem_get_pages_locked(shmem); + ret = drm_gem_shmem_get_pages(shmem); if (ret) return ERR_PTR(ret); @@ -721,7 +695,7 @@ err_free_sgt: sg_free_table(sgt); kfree(sgt); err_put_pages: - drm_gem_shmem_put_pages_locked(shmem); + drm_gem_shmem_put_pages(shmem); return ERR_PTR(ret); } @@ -746,11 +720,11 @@ struct sg_table *drm_gem_shmem_get_pages_sgt(struct drm_gem_shmem_object *shmem) int ret; struct sg_table *sgt; - ret = mutex_lock_interruptible(&shmem->pages_lock); + ret = dma_resv_lock_interruptible(shmem->base.resv, NULL); if (ret) return ERR_PTR(ret); sgt = drm_gem_shmem_get_pages_sgt_locked(shmem); - mutex_unlock(&shmem->pages_lock); + dma_resv_unlock(shmem->base.resv); return sgt; } diff --git a/drivers/gpu/drm/lima/lima_gem.c b/drivers/gpu/drm/lima/lima_gem.c index 10252dc11a22..4f9736e5f929 100644 --- a/drivers/gpu/drm/lima/lima_gem.c +++ b/drivers/gpu/drm/lima/lima_gem.c @@ -34,7 +34,7 @@ int lima_heap_alloc(struct lima_bo *bo, struct lima_vm *vm) new_size = min(new_size, bo->base.base.size); - mutex_lock(&bo->base.pages_lock); + dma_resv_lock(bo->base.base.resv, NULL); if (bo->base.pages) { pages = bo->base.pages; @@ -42,7 +42,7 @@ int lima_heap_alloc(struct lima_bo *bo, struct lima_vm *vm) pages = kvmalloc_array(bo->base.base.size >> PAGE_SHIFT, sizeof(*pages), GFP_KERNEL | __GFP_ZERO); if (!pages) { - mutex_unlock(&bo->base.pages_lock); + dma_resv_unlock(bo->base.base.resv); return -ENOMEM; } @@ -56,13 +56,13 @@ int lima_heap_alloc(struct lima_bo *bo, struct lima_vm *vm) struct page *page = shmem_read_mapping_page(mapping, i); if (IS_ERR(page)) { - mutex_unlock(&bo->base.pages_lock); + dma_resv_unlock(bo->base.base.resv); return PTR_ERR(page); } pages[i] = page; } - mutex_unlock(&bo->base.pages_lock); + dma_resv_unlock(bo->base.base.resv); ret = sg_alloc_table_from_pages(&sgt, pages, i, 0, new_size, GFP_KERNEL); diff --git a/drivers/gpu/drm/panfrost/panfrost_drv.c b/drivers/gpu/drm/panfrost/panfrost_drv.c index d2916bf43547..598ad1dbe6e1 100644 --- a/drivers/gpu/drm/panfrost/panfrost_drv.c +++ b/drivers/gpu/drm/panfrost/panfrost_drv.c @@ -407,6 +407,10 @@ static int panfrost_ioctl_madvise(struct drm_device *dev, void *data, bo = to_panfrost_bo(gem_obj); + ret = dma_resv_lock_interruptible(bo->base.base.resv, NULL); + if (ret) + goto out_put_object; + mutex_lock(&pfdev->shrinker_lock); mutex_lock(&bo->mappings.lock); if (args->madv == PANFROST_MADV_DONTNEED) { @@ -444,7 +448,8 @@ static int panfrost_ioctl_madvise(struct drm_device *dev, void *data, out_unlock_mappings: mutex_unlock(&bo->mappings.lock); mutex_unlock(&pfdev->shrinker_lock); - + dma_resv_unlock(bo->base.base.resv); +out_put_object: drm_gem_object_put(gem_obj); return ret; } diff --git a/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c b/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c index bf0170782f25..6a71a2555f85 100644 --- a/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c +++ b/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c @@ -48,14 +48,14 @@ static bool panfrost_gem_purge(struct drm_gem_object *obj) if (!mutex_trylock(&bo->mappings.lock)) return false; - if (!mutex_trylock(&shmem->pages_lock)) + if (!dma_resv_trylock(shmem->base.resv)) goto unlock_mappings; panfrost_gem_teardown_mappings_locked(bo); - drm_gem_shmem_purge_locked(&bo->base); + drm_gem_shmem_purge(&bo->base); ret = true; - mutex_unlock(&shmem->pages_lock); + dma_resv_unlock(shmem->base.resv); unlock_mappings: mutex_unlock(&bo->mappings.lock); diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.c b/drivers/gpu/drm/panfrost/panfrost_mmu.c index e961fa27702c..c0123d09f699 100644 --- a/drivers/gpu/drm/panfrost/panfrost_mmu.c +++ b/drivers/gpu/drm/panfrost/panfrost_mmu.c @@ -443,6 +443,7 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as, struct panfrost_gem_mapping *bomapping; struct panfrost_gem_object *bo; struct address_space *mapping; + struct drm_gem_object *obj; pgoff_t page_offset; struct sg_table *sgt; struct page **pages; @@ -465,15 +466,16 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as, page_offset = addr >> PAGE_SHIFT; page_offset -= bomapping->mmnode.start; - mutex_lock(&bo->base.pages_lock); + obj = &bo->base.base; + + dma_resv_lock(obj->resv, NULL); if (!bo->base.pages) { bo->sgts = kvmalloc_array(bo->base.base.size / SZ_2M, sizeof(struct sg_table), GFP_KERNEL | __GFP_ZERO); if (!bo->sgts) { - mutex_unlock(&bo->base.pages_lock); ret = -ENOMEM; - goto err_bo; + goto err_unlock; } pages = kvmalloc_array(bo->base.base.size >> PAGE_SHIFT, @@ -481,9 +483,8 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as, if (!pages) { kvfree(bo->sgts); bo->sgts = NULL; - mutex_unlock(&bo->base.pages_lock); ret = -ENOMEM; - goto err_bo; + goto err_unlock; } bo->base.pages = pages; bo->base.pages_use_count = 1; @@ -491,7 +492,6 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as, pages = bo->base.pages; if (pages[page_offset]) { /* Pages are already mapped, bail out. */ - mutex_unlock(&bo->base.pages_lock); goto out; } } @@ -502,15 +502,12 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as, for (i = page_offset; i < page_offset + NUM_FAULT_PAGES; i++) { pages[i] = shmem_read_mapping_page(mapping, i); if (IS_ERR(pages[i])) { - mutex_unlock(&bo->base.pages_lock); ret = PTR_ERR(pages[i]); pages[i] = NULL; goto err_pages; } } - mutex_unlock(&bo->base.pages_lock); - sgt = &bo->sgts[page_offset / (SZ_2M / PAGE_SIZE)]; ret = sg_alloc_table_from_pages(sgt, pages + page_offset, NUM_FAULT_PAGES, 0, SZ_2M, GFP_KERNEL); @@ -529,6 +526,8 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as, dev_dbg(pfdev->dev, "mapped page fault @ AS%d %llx", as, addr); out: + dma_resv_unlock(obj->resv); + panfrost_gem_mapping_put(bomapping); return 0; @@ -537,6 +536,8 @@ err_map: sg_free_table(sgt); err_pages: drm_gem_shmem_put_pages(&bo->base); +err_unlock: + dma_resv_unlock(obj->resv); err_bo: panfrost_gem_mapping_put(bomapping); return ret; diff --git a/include/drm/drm_gem_shmem_helper.h b/include/drm/drm_gem_shmem_helper.h index 46eb46e69063..2867d2aba88b 100644 --- a/include/drm/drm_gem_shmem_helper.h +++ b/include/drm/drm_gem_shmem_helper.h @@ -26,11 +26,6 @@ struct drm_gem_shmem_object { */ struct drm_gem_object base; - /** - * @pages_lock: Protects the page table and use count - */ - struct mutex pages_lock; - /** * @pages: Page table */ @@ -65,11 +60,6 @@ struct drm_gem_shmem_object { */ struct sg_table *sgt; - /** - * @vmap_lock: Protects the vmap address and use count - */ - struct mutex vmap_lock; - /** * @vaddr: Kernel virtual address of the backing memory */ @@ -109,7 +99,6 @@ struct drm_gem_shmem_object { struct drm_gem_shmem_object *drm_gem_shmem_create(struct drm_device *dev, size_t size); void drm_gem_shmem_free(struct drm_gem_shmem_object *shmem); -int drm_gem_shmem_get_pages(struct drm_gem_shmem_object *shmem); void drm_gem_shmem_put_pages(struct drm_gem_shmem_object *shmem); int drm_gem_shmem_pin(struct drm_gem_shmem_object *shmem); void drm_gem_shmem_unpin(struct drm_gem_shmem_object *shmem); @@ -128,8 +117,7 @@ static inline bool drm_gem_shmem_is_purgeable(struct drm_gem_shmem_object *shmem !shmem->base.dma_buf && !shmem->base.import_attach; } -void drm_gem_shmem_purge_locked(struct drm_gem_shmem_object *shmem); -bool drm_gem_shmem_purge(struct drm_gem_shmem_object *shmem); +void drm_gem_shmem_purge(struct drm_gem_shmem_object *shmem); struct sg_table *drm_gem_shmem_get_sg_table(struct drm_gem_shmem_object *shmem); struct sg_table *drm_gem_shmem_get_pages_sgt(struct drm_gem_shmem_object *shmem); -- cgit v1.2.3 From 0cf8d292ba5ed90c7873ea84270deaecc1988f05 Mon Sep 17 00:00:00 2001 From: Simon Ser Date: Tue, 20 Jun 2023 17:42:42 +0000 Subject: drm/sysfs: rename drm_sysfs_connector_status_event() Rename drm_sysfs_connector_status_event() to drm_sysfs_connector_property_event(). Indeed, "status" is a bit vague: it can easily be confused with the connected/disconnected status of the connector. This function has nothing to do with connected/disconnected: it merely sends a notification that a connector's property has changed (e.g. HDCP, privacy screen, etc). Signed-off-by: Simon Ser Cc: Manasi Navare Cc: Sam Ravnborg Cc: Harry Wentland Cc: Daniel Vetter Reviewed-by: Sam Ravnborg Link: https://patchwork.freedesktop.org/patch/msgid/20230620174231.260335-1-contact@emersion.fr --- drivers/gpu/drm/display/drm_hdcp_helper.c | 4 ++-- drivers/gpu/drm/drm_connector.c | 8 ++++---- drivers/gpu/drm/drm_sysfs.c | 23 +++++++++++++---------- include/drm/drm_sysfs.h | 4 ++-- 4 files changed, 21 insertions(+), 18 deletions(-) (limited to 'include/drm') diff --git a/drivers/gpu/drm/display/drm_hdcp_helper.c b/drivers/gpu/drm/display/drm_hdcp_helper.c index e78999c72bd7..a3f0e6d96105 100644 --- a/drivers/gpu/drm/display/drm_hdcp_helper.c +++ b/drivers/gpu/drm/display/drm_hdcp_helper.c @@ -415,7 +415,7 @@ void drm_hdcp_update_content_protection(struct drm_connector *connector, return; state->content_protection = val; - drm_sysfs_connector_status_event(connector, - dev->mode_config.content_protection_property); + drm_sysfs_connector_property_event(connector, + dev->mode_config.content_protection_property); } EXPORT_SYMBOL(drm_hdcp_update_content_protection); diff --git a/drivers/gpu/drm/drm_connector.c b/drivers/gpu/drm/drm_connector.c index 3ed4cfcb350c..bf8371dc2a61 100644 --- a/drivers/gpu/drm/drm_connector.c +++ b/drivers/gpu/drm/drm_connector.c @@ -2730,10 +2730,10 @@ static int drm_connector_privacy_screen_notifier( drm_connector_update_privacy_screen_properties(connector, true); drm_modeset_unlock(&dev->mode_config.connection_mutex); - drm_sysfs_connector_status_event(connector, - connector->privacy_screen_sw_state_property); - drm_sysfs_connector_status_event(connector, - connector->privacy_screen_hw_state_property); + drm_sysfs_connector_property_event(connector, + connector->privacy_screen_sw_state_property); + drm_sysfs_connector_property_event(connector, + connector->privacy_screen_hw_state_property); return NOTIFY_DONE; } diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c index f62767ff34b2..b169b3e44a92 100644 --- a/drivers/gpu/drm/drm_sysfs.c +++ b/drivers/gpu/drm/drm_sysfs.c @@ -487,17 +487,17 @@ void drm_sysfs_connector_hotplug_event(struct drm_connector *connector) EXPORT_SYMBOL(drm_sysfs_connector_hotplug_event); /** - * drm_sysfs_connector_status_event - generate a DRM uevent for connector - * property status change - * @connector: connector on which property status changed - * @property: connector property whose status changed. + * drm_sysfs_connector_property_event - generate a DRM uevent for connector + * property change + * @connector: connector on which property changed + * @property: connector property which has changed. * - * Send a uevent for the DRM device specified by @dev. Currently we + * Send a uevent for the specified DRM connector and property. Currently we * set HOTPLUG=1 and connector id along with the attached property id - * related to the status change. + * related to the change. */ -void drm_sysfs_connector_status_event(struct drm_connector *connector, - struct drm_property *property) +void drm_sysfs_connector_property_event(struct drm_connector *connector, + struct drm_property *property) { struct drm_device *dev = connector->dev; char hotplug_str[] = "HOTPLUG=1", conn_id[21], prop_id[21]; @@ -511,11 +511,14 @@ void drm_sysfs_connector_status_event(struct drm_connector *connector, snprintf(prop_id, ARRAY_SIZE(prop_id), "PROPERTY=%u", property->base.id); - DRM_DEBUG("generating connector status event\n"); + drm_dbg_kms(connector->dev, + "[CONNECTOR:%d:%s] generating connector property event for [PROP:%d:%s]\n", + connector->base.id, connector->name, + property->base.id, property->name); kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, envp); } -EXPORT_SYMBOL(drm_sysfs_connector_status_event); +EXPORT_SYMBOL(drm_sysfs_connector_property_event); struct device *drm_sysfs_minor_alloc(struct drm_minor *minor) { diff --git a/include/drm/drm_sysfs.h b/include/drm/drm_sysfs.h index 6273cac44e47..96a5d858404b 100644 --- a/include/drm/drm_sysfs.h +++ b/include/drm/drm_sysfs.h @@ -12,6 +12,6 @@ void drm_class_device_unregister(struct device *dev); void drm_sysfs_hotplug_event(struct drm_device *dev); void drm_sysfs_connector_hotplug_event(struct drm_connector *connector); -void drm_sysfs_connector_status_event(struct drm_connector *connector, - struct drm_property *property); +void drm_sysfs_connector_property_event(struct drm_connector *connector, + struct drm_property *property); #endif -- cgit v1.2.3 From 6b85aa68d9d5a27556b8b1015e7e515a371e77de Mon Sep 17 00:00:00 2001 From: Thomas Zimmermann Date: Tue, 20 Jun 2023 09:59:57 +0200 Subject: drm: Enable PRIME import/export for all drivers Call drm_gem_prime_handle_to_fd() and drm_gem_prime_fd_to_handle() by default if no PRIME import/export helpers have been set. Both functions are the default for almost all drivers. DRM drivers implement struct drm_driver.gem_prime_import_sg_table to import dma-buf objects from other drivers. Having the function drm_gem_prime_fd_to_handle() functions set by default allows each driver to import dma-buf objects to itself, even without support for other drivers. For drm_gem_prime_handle_to_fd() it is similar: using it by default allows each driver to export to itself, even without support for other drivers. This functionality enables userspace to share per-driver buffers across process boundaries via PRIME (e.g., wlroots requires this functionality). The patch generalizes a pattern that has previously been implemented by GEM VRAM helpers [1] to work with any driver. For example, gma500 can now run the wlroots-based sway compositor. v2: * clean up docs and TODO comments (Simon, Zack) * clean up style in drm_getcap() Signed-off-by: Thomas Zimmermann Link: https://lore.kernel.org/dri-devel/20230302143502.500661-1-contact@emersion.fr/ # 1 Reviewed-by: Simon Ser Acked-by: Alex Deucher Reviewed-by: Jeffrey Hugo Link: https://patchwork.freedesktop.org/patch/msgid/20230620080252.16368-2-tzimmermann@suse.de --- drivers/gpu/drm/drm_ioctl.c | 3 +-- drivers/gpu/drm/drm_prime.c | 21 ++++++++++++--------- include/drm/drm_drv.h | 12 ++---------- 3 files changed, 15 insertions(+), 21 deletions(-) (limited to 'include/drm') diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c index 7c9d66ee917d..8e9afe7af19c 100644 --- a/drivers/gpu/drm/drm_ioctl.c +++ b/drivers/gpu/drm/drm_ioctl.c @@ -245,8 +245,7 @@ static int drm_getcap(struct drm_device *dev, void *data, struct drm_file *file_ req->value = 1; return 0; case DRM_CAP_PRIME: - req->value |= dev->driver->prime_fd_to_handle ? DRM_PRIME_CAP_IMPORT : 0; - req->value |= dev->driver->prime_handle_to_fd ? DRM_PRIME_CAP_EXPORT : 0; + req->value = DRM_PRIME_CAP_IMPORT | DRM_PRIME_CAP_EXPORT; return 0; case DRM_CAP_SYNCOBJ: req->value = drm_core_check_feature(dev, DRIVER_SYNCOBJ); diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c index f9d4e228b09e..7b48e0b0284a 100644 --- a/drivers/gpu/drm/drm_prime.c +++ b/drivers/gpu/drm/drm_prime.c @@ -372,11 +372,12 @@ int drm_prime_fd_to_handle_ioctl(struct drm_device *dev, void *data, { struct drm_prime_handle *args = data; - if (!dev->driver->prime_fd_to_handle) - return -ENOSYS; + if (dev->driver->prime_fd_to_handle) { + return dev->driver->prime_fd_to_handle(dev, file_priv, args->fd, + &args->handle); + } - return dev->driver->prime_fd_to_handle(dev, file_priv, - args->fd, &args->handle); + return drm_gem_prime_fd_to_handle(dev, file_priv, args->fd, &args->handle); } static struct dma_buf *export_and_register_object(struct drm_device *dev, @@ -518,15 +519,17 @@ int drm_prime_handle_to_fd_ioctl(struct drm_device *dev, void *data, { struct drm_prime_handle *args = data; - if (!dev->driver->prime_handle_to_fd) - return -ENOSYS; - /* check flags are valid */ if (args->flags & ~(DRM_CLOEXEC | DRM_RDWR)) return -EINVAL; - return dev->driver->prime_handle_to_fd(dev, file_priv, - args->handle, args->flags, &args->fd); + if (dev->driver->prime_handle_to_fd) { + return dev->driver->prime_handle_to_fd(dev, file_priv, + args->handle, args->flags, + &args->fd); + } + return drm_gem_prime_handle_to_fd(dev, file_priv, args->handle, + args->flags, &args->fd); } /** diff --git a/include/drm/drm_drv.h b/include/drm/drm_drv.h index 870278ecd8ba..b77f2c7275b7 100644 --- a/include/drm/drm_drv.h +++ b/include/drm/drm_drv.h @@ -304,22 +304,14 @@ struct drm_driver { /** * @prime_handle_to_fd: * - * Main PRIME export function. Should be implemented with - * drm_gem_prime_handle_to_fd() for GEM based drivers. - * - * For an in-depth discussion see :ref:`PRIME buffer sharing - * documentation `. + * PRIME export function. Only used by vmwgfx. */ int (*prime_handle_to_fd)(struct drm_device *dev, struct drm_file *file_priv, uint32_t handle, uint32_t flags, int *prime_fd); /** * @prime_fd_to_handle: * - * Main PRIME import function. Should be implemented with - * drm_gem_prime_fd_to_handle() for GEM based drivers. - * - * For an in-depth discussion see :ref:`PRIME buffer sharing - * documentation `. + * PRIME import function. Only used by vmwgfx. */ int (*prime_fd_to_handle)(struct drm_device *dev, struct drm_file *file_priv, int prime_fd, uint32_t *handle); -- cgit v1.2.3 From 71e801b9b44f86ce8c816b06960c705f901c50e5 Mon Sep 17 00:00:00 2001 From: Thomas Zimmermann Date: Tue, 20 Jun 2023 09:59:58 +0200 Subject: drm: Clear fd/handle callbacks in struct drm_driver Clear all assignments of struct drm_driver's fd/handle callbacks to drm_gem_prime_fd_to_handle() and drm_gem_prime_handle_to_fd(). These functions are called by default. Add a TODO item to convert vmwgfx to the defaults as well. v2: * remove TODO item (Zack) * also update amdgpu's amdgpu_partition_driver Signed-off-by: Thomas Zimmermann Reviewed-by: Simon Ser Acked-by: Alex Deucher Acked-by: Jeffrey Hugo # qaic Link: https://patchwork.freedesktop.org/patch/msgid/20230620080252.16368-3-tzimmermann@suse.de --- drivers/accel/ivpu/ivpu_drv.c | 2 -- drivers/accel/qaic/qaic_drv.c | 1 - drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c | 4 ---- drivers/gpu/drm/armada/armada_drv.c | 2 -- drivers/gpu/drm/drm_prime.c | 13 ++++--------- drivers/gpu/drm/etnaviv/etnaviv_drv.c | 2 -- drivers/gpu/drm/exynos/exynos_drm_drv.c | 2 -- drivers/gpu/drm/i915/i915_driver.c | 2 -- drivers/gpu/drm/lima/lima_drv.c | 2 -- drivers/gpu/drm/mediatek/mtk_drm_drv.c | 2 -- drivers/gpu/drm/msm/msm_drv.c | 2 -- drivers/gpu/drm/nouveau/nouveau_drm.c | 2 -- drivers/gpu/drm/omapdrm/omap_drv.c | 2 -- drivers/gpu/drm/panfrost/panfrost_drv.c | 2 -- drivers/gpu/drm/pl111/pl111_drv.c | 2 -- drivers/gpu/drm/qxl/qxl_drv.c | 2 -- drivers/gpu/drm/radeon/radeon_drv.c | 2 -- drivers/gpu/drm/renesas/rcar-du/rcar_du_drv.c | 2 -- drivers/gpu/drm/rockchip/rockchip_drm_drv.c | 2 -- drivers/gpu/drm/tegra/drm.c | 2 -- drivers/gpu/drm/v3d/v3d_drv.c | 2 -- drivers/gpu/drm/virtio/virtgpu_drv.c | 2 -- drivers/gpu/drm/xen/xen_drm_front.c | 2 -- include/drm/drm_gem_dma_helper.h | 8 ++------ include/drm/drm_gem_shmem_helper.h | 4 +--- include/drm/drm_gem_vram_helper.h | 8 +++----- 26 files changed, 10 insertions(+), 68 deletions(-) (limited to 'include/drm') diff --git a/drivers/accel/ivpu/ivpu_drv.c b/drivers/accel/ivpu/ivpu_drv.c index 9f2b9fdcc549..5167a65cf7bb 100644 --- a/drivers/accel/ivpu/ivpu_drv.c +++ b/drivers/accel/ivpu/ivpu_drv.c @@ -373,8 +373,6 @@ static const struct drm_driver driver = { .open = ivpu_open, .postclose = ivpu_postclose, - .prime_handle_to_fd = drm_gem_prime_handle_to_fd, - .prime_fd_to_handle = drm_gem_prime_fd_to_handle, .gem_prime_import = ivpu_gem_prime_import, .ioctls = ivpu_drm_ioctls, diff --git a/drivers/accel/qaic/qaic_drv.c b/drivers/accel/qaic/qaic_drv.c index b5ba550a0c04..b5de82e6eb4d 100644 --- a/drivers/accel/qaic/qaic_drv.c +++ b/drivers/accel/qaic/qaic_drv.c @@ -165,7 +165,6 @@ static const struct drm_driver qaic_accel_driver = { .ioctls = qaic_drm_ioctls, .num_ioctls = ARRAY_SIZE(qaic_drm_ioctls), - .prime_fd_to_handle = drm_gem_prime_fd_to_handle, .gem_prime_import = qaic_gem_prime_import, }; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index 07e16ad465d0..56dc69bc8b89 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -2850,8 +2850,6 @@ static const struct drm_driver amdgpu_kms_driver = { .show_fdinfo = amdgpu_show_fdinfo, #endif - .prime_handle_to_fd = drm_gem_prime_handle_to_fd, - .prime_fd_to_handle = drm_gem_prime_fd_to_handle, .gem_prime_import = amdgpu_gem_prime_import, .name = DRIVER_NAME, @@ -2876,8 +2874,6 @@ const struct drm_driver amdgpu_partition_driver = { .fops = &amdgpu_driver_kms_fops, .release = &amdgpu_driver_release_kms, - .prime_handle_to_fd = drm_gem_prime_handle_to_fd, - .prime_fd_to_handle = drm_gem_prime_fd_to_handle, .gem_prime_import = amdgpu_gem_prime_import, .name = DRIVER_NAME, diff --git a/drivers/gpu/drm/armada/armada_drv.c b/drivers/gpu/drm/armada/armada_drv.c index e120144d4b47..e8d2fe955909 100644 --- a/drivers/gpu/drm/armada/armada_drv.c +++ b/drivers/gpu/drm/armada/armada_drv.c @@ -37,8 +37,6 @@ static const struct drm_ioctl_desc armada_ioctls[] = { DEFINE_DRM_GEM_FOPS(armada_drm_fops); static const struct drm_driver armada_drm_driver = { - .prime_handle_to_fd = drm_gem_prime_handle_to_fd, - .prime_fd_to_handle = drm_gem_prime_fd_to_handle, .gem_prime_import = armada_gem_prime_import, .dumb_create = armada_gem_dumb_create, .major = 1, diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c index 7b48e0b0284a..834a5e28abbe 100644 --- a/drivers/gpu/drm/drm_prime.c +++ b/drivers/gpu/drm/drm_prime.c @@ -51,15 +51,10 @@ MODULE_IMPORT_NS(DMA_BUF); * between applications, they can't be guessed like the globally unique GEM * names. * - * Drivers that support the PRIME API implement the - * &drm_driver.prime_handle_to_fd and &drm_driver.prime_fd_to_handle operations. - * GEM based drivers must use drm_gem_prime_handle_to_fd() and - * drm_gem_prime_fd_to_handle() to implement these. For GEM based drivers the - * actual driver interfaces is provided through the &drm_gem_object_funcs.export - * and &drm_driver.gem_prime_import hooks. - * - * &dma_buf_ops implementations for GEM drivers are all individually exported - * for drivers which need to overwrite or reimplement some of them. + * Drivers that support the PRIME API implement the drm_gem_object_funcs.export + * and &drm_driver.gem_prime_import hooks. &dma_buf_ops implementations for + * drivers are all individually exported for drivers which need to overwrite + * or reimplement some of them. * * Reference Counting for GEM Drivers * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.c b/drivers/gpu/drm/etnaviv/etnaviv_drv.c index 00223a874909..ea55f6b7b744 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_drv.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.c @@ -481,8 +481,6 @@ static const struct drm_driver etnaviv_drm_driver = { .driver_features = DRIVER_GEM | DRIVER_RENDER, .open = etnaviv_open, .postclose = etnaviv_postclose, - .prime_handle_to_fd = drm_gem_prime_handle_to_fd, - .prime_fd_to_handle = drm_gem_prime_fd_to_handle, .gem_prime_import_sg_table = etnaviv_gem_prime_import_sg_table, #ifdef CONFIG_DEBUG_FS .debugfs_init = etnaviv_debugfs_init, diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c index c9e3c88fb329..8399256cb5c9 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_drv.c +++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c @@ -109,8 +109,6 @@ static const struct drm_driver exynos_drm_driver = { .open = exynos_drm_open, .postclose = exynos_drm_postclose, .dumb_create = exynos_drm_gem_dumb_create, - .prime_handle_to_fd = drm_gem_prime_handle_to_fd, - .prime_fd_to_handle = drm_gem_prime_fd_to_handle, .gem_prime_import = exynos_drm_gem_prime_import, .gem_prime_import_sg_table = exynos_drm_gem_prime_import_sg_table, .ioctls = exynos_ioctls, diff --git a/drivers/gpu/drm/i915/i915_driver.c b/drivers/gpu/drm/i915/i915_driver.c index 75cbc43b326d..171f4db9e5e3 100644 --- a/drivers/gpu/drm/i915/i915_driver.c +++ b/drivers/gpu/drm/i915/i915_driver.c @@ -1818,8 +1818,6 @@ static const struct drm_driver i915_drm_driver = { .postclose = i915_driver_postclose, .show_fdinfo = i915_drm_client_fdinfo, - .prime_handle_to_fd = drm_gem_prime_handle_to_fd, - .prime_fd_to_handle = drm_gem_prime_fd_to_handle, .gem_prime_import = i915_gem_prime_import, .dumb_create = i915_gem_dumb_create, diff --git a/drivers/gpu/drm/lima/lima_drv.c b/drivers/gpu/drm/lima/lima_drv.c index 65c31dc38049..3dd078f443bb 100644 --- a/drivers/gpu/drm/lima/lima_drv.c +++ b/drivers/gpu/drm/lima/lima_drv.c @@ -276,9 +276,7 @@ static const struct drm_driver lima_drm_driver = { .patchlevel = 0, .gem_create_object = lima_gem_create_object, - .prime_fd_to_handle = drm_gem_prime_fd_to_handle, .gem_prime_import_sg_table = drm_gem_shmem_prime_import_sg_table, - .prime_handle_to_fd = drm_gem_prime_handle_to_fd, }; struct lima_block_reader { diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c index 5693bb8d29ce..7fb65eb95c55 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c @@ -556,8 +556,6 @@ static const struct drm_driver mtk_drm_driver = { .dumb_create = mtk_drm_gem_dumb_create, - .prime_handle_to_fd = drm_gem_prime_handle_to_fd, - .prime_fd_to_handle = drm_gem_prime_fd_to_handle, .gem_prime_import = mtk_drm_gem_prime_import, .gem_prime_import_sg_table = mtk_gem_prime_import_sg_table, .fops = &mtk_drm_fops, diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c index 47efa3c4492c..2a0e3529598b 100644 --- a/drivers/gpu/drm/msm/msm_drv.c +++ b/drivers/gpu/drm/msm/msm_drv.c @@ -1086,8 +1086,6 @@ static const struct drm_driver msm_driver = { .postclose = msm_postclose, .dumb_create = msm_gem_dumb_create, .dumb_map_offset = msm_gem_dumb_map_offset, - .prime_handle_to_fd = drm_gem_prime_handle_to_fd, - .prime_fd_to_handle = drm_gem_prime_fd_to_handle, .gem_prime_import_sg_table = msm_gem_prime_import_sg_table, #ifdef CONFIG_DEBUG_FS .debugfs_init = msm_debugfs_init, diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c index 51f1918b44d3..ca3bb8075357 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drm.c +++ b/drivers/gpu/drm/nouveau/nouveau_drm.c @@ -1240,8 +1240,6 @@ driver_stub = { .num_ioctls = ARRAY_SIZE(nouveau_ioctls), .fops = &nouveau_driver_fops, - .prime_handle_to_fd = drm_gem_prime_handle_to_fd, - .prime_fd_to_handle = drm_gem_prime_fd_to_handle, .gem_prime_import_sg_table = nouveau_gem_prime_import_sg_table, .dumb_create = nouveau_display_dumb_create, diff --git a/drivers/gpu/drm/omapdrm/omap_drv.c b/drivers/gpu/drm/omapdrm/omap_drv.c index 671d26b9d339..e2697fe80e62 100644 --- a/drivers/gpu/drm/omapdrm/omap_drv.c +++ b/drivers/gpu/drm/omapdrm/omap_drv.c @@ -655,8 +655,6 @@ static const struct drm_driver omap_drm_driver = { #ifdef CONFIG_DEBUG_FS .debugfs_init = omap_debugfs_init, #endif - .prime_handle_to_fd = drm_gem_prime_handle_to_fd, - .prime_fd_to_handle = drm_gem_prime_fd_to_handle, .gem_prime_import = omap_gem_prime_import, .dumb_create = omap_gem_dumb_create, .dumb_map_offset = omap_gem_dumb_map_offset, diff --git a/drivers/gpu/drm/panfrost/panfrost_drv.c b/drivers/gpu/drm/panfrost/panfrost_drv.c index 598ad1dbe6e1..49b51f0db9b4 100644 --- a/drivers/gpu/drm/panfrost/panfrost_drv.c +++ b/drivers/gpu/drm/panfrost/panfrost_drv.c @@ -544,8 +544,6 @@ static const struct drm_driver panfrost_drm_driver = { .minor = 2, .gem_create_object = panfrost_gem_create_object, - .prime_handle_to_fd = drm_gem_prime_handle_to_fd, - .prime_fd_to_handle = drm_gem_prime_fd_to_handle, .gem_prime_import_sg_table = panfrost_gem_prime_import_sg_table, }; diff --git a/drivers/gpu/drm/pl111/pl111_drv.c b/drivers/gpu/drm/pl111/pl111_drv.c index c4b8357ea999..ba3b5b5f0cdf 100644 --- a/drivers/gpu/drm/pl111/pl111_drv.c +++ b/drivers/gpu/drm/pl111/pl111_drv.c @@ -224,8 +224,6 @@ static const struct drm_driver pl111_drm_driver = { .minor = 0, .patchlevel = 0, .dumb_create = drm_gem_dma_dumb_create, - .prime_handle_to_fd = drm_gem_prime_handle_to_fd, - .prime_fd_to_handle = drm_gem_prime_fd_to_handle, .gem_prime_import_sg_table = pl111_gem_import_sg_table, #if defined(CONFIG_DEBUG_FS) diff --git a/drivers/gpu/drm/qxl/qxl_drv.c b/drivers/gpu/drm/qxl/qxl_drv.c index a3b83f89e061..b30ede1cf62d 100644 --- a/drivers/gpu/drm/qxl/qxl_drv.c +++ b/drivers/gpu/drm/qxl/qxl_drv.c @@ -290,8 +290,6 @@ static struct drm_driver qxl_driver = { #if defined(CONFIG_DEBUG_FS) .debugfs_init = qxl_debugfs_init, #endif - .prime_handle_to_fd = drm_gem_prime_handle_to_fd, - .prime_fd_to_handle = drm_gem_prime_fd_to_handle, .gem_prime_import_sg_table = qxl_gem_prime_import_sg_table, .fops = &qxl_fops, .ioctls = qxl_ioctls, diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c index cf1b960c4200..39cdede460b5 100644 --- a/drivers/gpu/drm/radeon/radeon_drv.c +++ b/drivers/gpu/drm/radeon/radeon_drv.c @@ -604,8 +604,6 @@ static const struct drm_driver kms_driver = { .dumb_map_offset = radeon_mode_dumb_mmap, .fops = &radeon_driver_kms_fops, - .prime_handle_to_fd = drm_gem_prime_handle_to_fd, - .prime_fd_to_handle = drm_gem_prime_fd_to_handle, .gem_prime_import_sg_table = radeon_gem_prime_import_sg_table, .name = DRIVER_NAME, diff --git a/drivers/gpu/drm/renesas/rcar-du/rcar_du_drv.c b/drivers/gpu/drm/renesas/rcar-du/rcar_du_drv.c index 4280ff5fa91f..a4f3615f3291 100644 --- a/drivers/gpu/drm/renesas/rcar-du/rcar_du_drv.c +++ b/drivers/gpu/drm/renesas/rcar-du/rcar_du_drv.c @@ -605,8 +605,6 @@ DEFINE_DRM_GEM_DMA_FOPS(rcar_du_fops); static const struct drm_driver rcar_du_driver = { .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC, .dumb_create = rcar_du_dumb_create, - .prime_handle_to_fd = drm_gem_prime_handle_to_fd, - .prime_fd_to_handle = drm_gem_prime_fd_to_handle, .gem_prime_import_sg_table = rcar_du_gem_prime_import_sg_table, .fops = &rcar_du_fops, .name = "rcar-du", diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c index b8cf89f0cc56..e35be6ea2849 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c @@ -224,8 +224,6 @@ DEFINE_DRM_GEM_FOPS(rockchip_drm_driver_fops); static const struct drm_driver rockchip_drm_driver = { .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC, .dumb_create = rockchip_gem_dumb_create, - .prime_handle_to_fd = drm_gem_prime_handle_to_fd, - .prime_fd_to_handle = drm_gem_prime_fd_to_handle, .gem_prime_import_sg_table = rockchip_gem_prime_import_sg_table, .fops = &rockchip_drm_driver_fops, .name = DRIVER_NAME, diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c index 35ff303c6674..ff36171c8fb7 100644 --- a/drivers/gpu/drm/tegra/drm.c +++ b/drivers/gpu/drm/tegra/drm.c @@ -887,8 +887,6 @@ static const struct drm_driver tegra_drm_driver = { .debugfs_init = tegra_debugfs_init, #endif - .prime_handle_to_fd = drm_gem_prime_handle_to_fd, - .prime_fd_to_handle = drm_gem_prime_fd_to_handle, .gem_prime_import = tegra_gem_prime_import, .dumb_create = tegra_bo_dumb_create, diff --git a/drivers/gpu/drm/v3d/v3d_drv.c b/drivers/gpu/drm/v3d/v3d_drv.c index 845a36e36450..ffbbe9d527d3 100644 --- a/drivers/gpu/drm/v3d/v3d_drv.c +++ b/drivers/gpu/drm/v3d/v3d_drv.c @@ -171,8 +171,6 @@ static const struct drm_driver v3d_drm_driver = { #endif .gem_create_object = v3d_create_object, - .prime_handle_to_fd = drm_gem_prime_handle_to_fd, - .prime_fd_to_handle = drm_gem_prime_fd_to_handle, .gem_prime_import_sg_table = v3d_prime_import_sg_table, .ioctls = v3d_drm_ioctls, diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.c b/drivers/gpu/drm/virtio/virtgpu_drv.c index 91ace7a44f2a..a7ec5a3770da 100644 --- a/drivers/gpu/drm/virtio/virtgpu_drv.c +++ b/drivers/gpu/drm/virtio/virtgpu_drv.c @@ -186,8 +186,6 @@ static const struct drm_driver driver = { #if defined(CONFIG_DEBUG_FS) .debugfs_init = virtio_gpu_debugfs_init, #endif - .prime_handle_to_fd = drm_gem_prime_handle_to_fd, - .prime_fd_to_handle = drm_gem_prime_fd_to_handle, .gem_prime_import = virtgpu_gem_prime_import, .gem_prime_import_sg_table = virtgpu_gem_prime_import_sg_table, diff --git a/drivers/gpu/drm/xen/xen_drm_front.c b/drivers/gpu/drm/xen/xen_drm_front.c index 62c3c13b3a17..7e9431c50c5a 100644 --- a/drivers/gpu/drm/xen/xen_drm_front.c +++ b/drivers/gpu/drm/xen/xen_drm_front.c @@ -474,8 +474,6 @@ DEFINE_DRM_GEM_FOPS(xen_drm_dev_fops); static const struct drm_driver xen_drm_driver = { .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC, .release = xen_drm_drv_release, - .prime_handle_to_fd = drm_gem_prime_handle_to_fd, - .prime_fd_to_handle = drm_gem_prime_fd_to_handle, .gem_prime_import_sg_table = xen_drm_front_gem_import_sg_table, .dumb_create = xen_drm_drv_dumb_create, .fops = &xen_drm_dev_fops, diff --git a/include/drm/drm_gem_dma_helper.h b/include/drm/drm_gem_dma_helper.h index 61da596780b6..a827bde494f6 100644 --- a/include/drm/drm_gem_dma_helper.h +++ b/include/drm/drm_gem_dma_helper.h @@ -166,9 +166,7 @@ drm_gem_dma_prime_import_sg_table(struct drm_device *dev, * DRM_GEM_DMA_DRIVER_OPS_VMAP_WITH_DUMB_CREATE() instead. */ #define DRM_GEM_DMA_DRIVER_OPS_WITH_DUMB_CREATE(dumb_create_func) \ - .dumb_create = (dumb_create_func), \ - .prime_handle_to_fd = drm_gem_prime_handle_to_fd, \ - .prime_fd_to_handle = drm_gem_prime_fd_to_handle, \ + .dumb_create = (dumb_create_func), \ .gem_prime_import_sg_table = drm_gem_dma_prime_import_sg_table /** @@ -203,9 +201,7 @@ drm_gem_dma_prime_import_sg_table(struct drm_device *dev, * DRM_GEM_DMA_DRIVER_OPS_WITH_DUMB_CREATE() instead. */ #define DRM_GEM_DMA_DRIVER_OPS_VMAP_WITH_DUMB_CREATE(dumb_create_func) \ - .dumb_create = dumb_create_func, \ - .prime_handle_to_fd = drm_gem_prime_handle_to_fd, \ - .prime_fd_to_handle = drm_gem_prime_fd_to_handle, \ + .dumb_create = (dumb_create_func), \ .gem_prime_import_sg_table = drm_gem_dma_prime_import_sg_table_vmap /** diff --git a/include/drm/drm_gem_shmem_helper.h b/include/drm/drm_gem_shmem_helper.h index 2867d2aba88b..bf0c31aa8fbe 100644 --- a/include/drm/drm_gem_shmem_helper.h +++ b/include/drm/drm_gem_shmem_helper.h @@ -278,9 +278,7 @@ int drm_gem_shmem_dumb_create(struct drm_file *file, struct drm_device *dev, * the &drm_driver structure. */ #define DRM_GEM_SHMEM_DRIVER_OPS \ - .prime_handle_to_fd = drm_gem_prime_handle_to_fd, \ - .prime_fd_to_handle = drm_gem_prime_fd_to_handle, \ .gem_prime_import_sg_table = drm_gem_shmem_prime_import_sg_table, \ - .dumb_create = drm_gem_shmem_dumb_create + .dumb_create = drm_gem_shmem_dumb_create #endif /* __DRM_GEM_SHMEM_HELPER_H__ */ diff --git a/include/drm/drm_gem_vram_helper.h b/include/drm/drm_gem_vram_helper.h index 6b265cb9f45a..e18429f09e53 100644 --- a/include/drm/drm_gem_vram_helper.h +++ b/include/drm/drm_gem_vram_helper.h @@ -157,11 +157,9 @@ void drm_gem_vram_simple_display_pipe_cleanup_fb( * &struct drm_driver with default functions. */ #define DRM_GEM_VRAM_DRIVER \ - .debugfs_init = drm_vram_mm_debugfs_init, \ - .dumb_create = drm_gem_vram_driver_dumb_create, \ - .dumb_map_offset = drm_gem_ttm_dumb_map_offset, \ - .prime_handle_to_fd = drm_gem_prime_handle_to_fd, \ - .prime_fd_to_handle = drm_gem_prime_fd_to_handle + .debugfs_init = drm_vram_mm_debugfs_init, \ + .dumb_create = drm_gem_vram_driver_dumb_create, \ + .dumb_map_offset = drm_gem_ttm_dumb_map_offset /* * VRAM memory manager -- cgit v1.2.3 From 71a7974ac7019afeec105a54447ae1dc7216cbb3 Mon Sep 17 00:00:00 2001 From: Thomas Zimmermann Date: Tue, 20 Jun 2023 09:59:59 +0200 Subject: drm/prime: Unexport helpers for fd/handle conversion Unexport drm_gem_prime_fd_to_handle() and drm_gem_prime_handle_to_fd(). Both are only used internally within the PRIME code. v2: * reword docs as functions are now unexported (Simon) Signed-off-by: Thomas Zimmermann Reviewed-by: Simon Ser Acked-by: Alex Deucher Reviewed-by: Jeffrey Hugo Link: https://patchwork.freedesktop.org/patch/msgid/20230620080252.16368-4-tzimmermann@suse.de --- drivers/gpu/drm/drm_prime.c | 33 +++++++++++++++------------------ include/drm/drm_prime.h | 7 ------- 2 files changed, 15 insertions(+), 25 deletions(-) (limited to 'include/drm') diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c index 834a5e28abbe..63b709a67471 100644 --- a/drivers/gpu/drm/drm_prime.c +++ b/drivers/gpu/drm/drm_prime.c @@ -278,7 +278,7 @@ void drm_gem_dmabuf_release(struct dma_buf *dma_buf) } EXPORT_SYMBOL(drm_gem_dmabuf_release); -/** +/* * drm_gem_prime_fd_to_handle - PRIME import function for GEM drivers * @dev: drm_device to import into * @file_priv: drm file-private structure @@ -292,9 +292,9 @@ EXPORT_SYMBOL(drm_gem_dmabuf_release); * * Returns 0 on success or a negative error code on failure. */ -int drm_gem_prime_fd_to_handle(struct drm_device *dev, - struct drm_file *file_priv, int prime_fd, - uint32_t *handle) +static int drm_gem_prime_fd_to_handle(struct drm_device *dev, + struct drm_file *file_priv, int prime_fd, + uint32_t *handle) { struct dma_buf *dma_buf; struct drm_gem_object *obj; @@ -360,7 +360,6 @@ out_put: dma_buf_put(dma_buf); return ret; } -EXPORT_SYMBOL(drm_gem_prime_fd_to_handle); int drm_prime_fd_to_handle_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) @@ -409,7 +408,7 @@ static struct dma_buf *export_and_register_object(struct drm_device *dev, return dmabuf; } -/** +/* * drm_gem_prime_handle_to_fd - PRIME export function for GEM drivers * @dev: dev to export the buffer from * @file_priv: drm file-private structure @@ -422,10 +421,10 @@ static struct dma_buf *export_and_register_object(struct drm_device *dev, * The actual exporting from GEM object to a dma-buf is done through the * &drm_gem_object_funcs.export callback. */ -int drm_gem_prime_handle_to_fd(struct drm_device *dev, - struct drm_file *file_priv, uint32_t handle, - uint32_t flags, - int *prime_fd) +static int drm_gem_prime_handle_to_fd(struct drm_device *dev, + struct drm_file *file_priv, uint32_t handle, + uint32_t flags, + int *prime_fd) { struct drm_gem_object *obj; int ret = 0; @@ -507,7 +506,6 @@ out_unlock: return ret; } -EXPORT_SYMBOL(drm_gem_prime_handle_to_fd); int drm_prime_handle_to_fd_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) @@ -866,9 +864,9 @@ EXPORT_SYMBOL(drm_prime_get_contiguous_size); * @obj: GEM object to export * @flags: flags like DRM_CLOEXEC and DRM_RDWR * - * This is the implementation of the &drm_gem_object_funcs.export functions for GEM drivers - * using the PRIME helpers. It is used as the default in - * drm_gem_prime_handle_to_fd(). + * This is the implementation of the &drm_gem_object_funcs.export functions + * for GEM drivers using the PRIME helpers. It is used as the default for + * drivers that do not set their own. */ struct dma_buf *drm_gem_prime_export(struct drm_gem_object *obj, int flags) @@ -964,10 +962,9 @@ EXPORT_SYMBOL(drm_gem_prime_import_dev); * @dev: drm_device to import into * @dma_buf: dma-buf object to import * - * This is the implementation of the gem_prime_import functions for GEM drivers - * using the PRIME helpers. Drivers can use this as their - * &drm_driver.gem_prime_import implementation. It is used as the default - * implementation in drm_gem_prime_fd_to_handle(). + * This is the implementation of the gem_prime_import functions for GEM + * drivers using the PRIME helpers. It is the default for drivers that do + * not set their own &drm_driver.gem_prime_import. * * Drivers must arrange to call drm_prime_gem_destroy() from their * &drm_gem_object_funcs.free hook when using this function. diff --git a/include/drm/drm_prime.h b/include/drm/drm_prime.h index 2a1d01e5b56b..a7abf9f3e697 100644 --- a/include/drm/drm_prime.h +++ b/include/drm/drm_prime.h @@ -60,19 +60,12 @@ enum dma_data_direction; struct drm_device; struct drm_gem_object; -struct drm_file; /* core prime functions */ struct dma_buf *drm_gem_dmabuf_export(struct drm_device *dev, struct dma_buf_export_info *exp_info); void drm_gem_dmabuf_release(struct dma_buf *dma_buf); -int drm_gem_prime_fd_to_handle(struct drm_device *dev, - struct drm_file *file_priv, int prime_fd, uint32_t *handle); -int drm_gem_prime_handle_to_fd(struct drm_device *dev, - struct drm_file *file_priv, uint32_t handle, uint32_t flags, - int *prime_fd); - /* helper functions for exporting */ int drm_gem_map_attach(struct dma_buf *dma_buf, struct dma_buf_attachment *attach); -- cgit v1.2.3 From 7ed40ff1d134bf3a4aef706eed478b926f35b404 Mon Sep 17 00:00:00 2001 From: Adrián Larumbe Date: Sun, 25 Jun 2023 15:17:15 +0100 Subject: drm/bridge: dw-hdmi: change YUV420 selection logic at clock setup MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Right now clocking value selection code is prioritising RGB, YUV444 modes over YUV420 for HDMI2 sinks. However, because of the bus format selection procedure in dw-hdmi, for HDMI2 sinks YUV420 is the format that will always be picked during the drm bridge chain check stage. Later on dw_hdmi_setup will configure a colour space based on the bus format that doesn't match the pixel value we had calculated as described above. Fix it by bringing back dw-hdmi bus format check when picking the right pixel clock. Signed-off-by: Adrián Larumbe Acked-by: Neil Armstrong Signed-off-by: Neil Armstrong Link: https://patchwork.freedesktop.org/patch/msgid/6230bfae2cd97cf6527fc62ba5c850464919ccf8.1687702042.git.adrian.larumbe@collabora.com --- drivers/gpu/drm/bridge/synopsys/dw-hdmi.c | 6 ++++++ drivers/gpu/drm/meson/meson_dw_hdmi.c | 4 ++-- include/drm/bridge/dw_hdmi.h | 2 ++ 3 files changed, 10 insertions(+), 2 deletions(-) (limited to 'include/drm') diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c index 9d6dcaf317a1..8e1a9854ebc0 100644 --- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c +++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c @@ -3346,6 +3346,12 @@ static int dw_hdmi_parse_dt(struct dw_hdmi *hdmi) return 0; } +bool dw_hdmi_bus_fmt_is_420(struct dw_hdmi *hdmi) +{ + return hdmi_bus_fmt_is_yuv420(hdmi->hdmi_data.enc_out_bus_format); +} +EXPORT_SYMBOL_GPL(dw_hdmi_bus_fmt_is_420); + struct dw_hdmi *dw_hdmi_probe(struct platform_device *pdev, const struct dw_hdmi_plat_data *plat_data) { diff --git a/drivers/gpu/drm/meson/meson_dw_hdmi.c b/drivers/gpu/drm/meson/meson_dw_hdmi.c index 411219d53b14..18a0f4f4638c 100644 --- a/drivers/gpu/drm/meson/meson_dw_hdmi.c +++ b/drivers/gpu/drm/meson/meson_dw_hdmi.c @@ -379,8 +379,8 @@ static int dw_hdmi_phy_init(struct dw_hdmi *hdmi, void *data, mode->clock > 340000 ? 40 : 10); if (drm_mode_is_420_only(display, mode) || - (!is_hdmi2_sink && - drm_mode_is_420_also(display, mode))) + (!is_hdmi2_sink && drm_mode_is_420_also(display, mode)) || + dw_hdmi_bus_fmt_is_420(hdmi)) mode_is_420 = true; /* Enable clocks */ diff --git a/include/drm/bridge/dw_hdmi.h b/include/drm/bridge/dw_hdmi.h index f668e75fbabe..6a46baa0737c 100644 --- a/include/drm/bridge/dw_hdmi.h +++ b/include/drm/bridge/dw_hdmi.h @@ -206,4 +206,6 @@ void dw_hdmi_phy_update_hpd(struct dw_hdmi *hdmi, void *data, bool force, bool disabled, bool rxsense); void dw_hdmi_phy_setup_hpd(struct dw_hdmi *hdmi, void *data); +bool dw_hdmi_bus_fmt_is_420(struct dw_hdmi *hdmi); + #endif /* __IMX_HDMI_H__ */ -- cgit v1.2.3 From 09593216bff15866f95c8ad406cb7fdcec1ee40a Mon Sep 17 00:00:00 2001 From: Christian König Date: Thu, 7 Apr 2022 08:17:51 +0200 Subject: drm: execution context for GEM buffers v7 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This adds the infrastructure for an execution context for GEM buffers which is similar to the existing TTMs execbuf util and intended to replace it in the long term. The basic functionality is that we abstracts the necessary loop to lock many different GEM buffers with automated deadlock and duplicate handling. v2: drop xarray and use dynamic resized array instead, the locking overhead is unnecessary and measurable. v3: drop duplicate tracking, radeon is really the only one needing that. v4: fixes issues pointed out by Danilo, some typos in comments and a helper for lock arrays of GEM objects. v5: some suggestions by Boris Brezillon, especially just use one retry macro, drop loop in prepare_array, use flags instead of bool v6: minor changes suggested by Thomas, Boris and Danilo v7: minor typos pointed out by checkpatch.pl fixed Signed-off-by: Christian König Reviewed-by: Boris Brezillon Reviewed-by: Danilo Krummrich Tested-by: Danilo Krummrich Acked-by: Alex Deucher Link: https://patchwork.freedesktop.org/patch/msgid/20230711133122.3710-2-christian.koenig@amd.com --- Documentation/gpu/drm-mm.rst | 12 ++ drivers/gpu/drm/Kconfig | 6 + drivers/gpu/drm/Makefile | 2 + drivers/gpu/drm/drm_exec.c | 333 +++++++++++++++++++++++++++++++++++++++++++ include/drm/drm_exec.h | 123 ++++++++++++++++ 5 files changed, 476 insertions(+) create mode 100644 drivers/gpu/drm/drm_exec.c create mode 100644 include/drm/drm_exec.h (limited to 'include/drm') diff --git a/Documentation/gpu/drm-mm.rst b/Documentation/gpu/drm-mm.rst index a79fd3549ff8..a52e6f4117d6 100644 --- a/Documentation/gpu/drm-mm.rst +++ b/Documentation/gpu/drm-mm.rst @@ -493,6 +493,18 @@ DRM Sync Objects .. kernel-doc:: drivers/gpu/drm/drm_syncobj.c :export: +DRM Execution context +===================== + +.. kernel-doc:: drivers/gpu/drm/drm_exec.c + :doc: Overview + +.. kernel-doc:: include/drm/drm_exec.h + :internal: + +.. kernel-doc:: drivers/gpu/drm/drm_exec.c + :export: + GPU Scheduler ============= diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig index cc9c9947fdef..c0b4063a3ee6 100644 --- a/drivers/gpu/drm/Kconfig +++ b/drivers/gpu/drm/Kconfig @@ -194,6 +194,12 @@ config DRM_TTM GPU memory types. Will be enabled automatically if a device driver uses it. +config DRM_EXEC + tristate + depends on DRM + help + Execution context for command submissions + config DRM_BUDDY tristate depends on DRM diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile index 1c0f5204e47b..021b3f0ac152 100644 --- a/drivers/gpu/drm/Makefile +++ b/drivers/gpu/drm/Makefile @@ -78,6 +78,8 @@ obj-$(CONFIG_DRM_PANEL_ORIENTATION_QUIRKS) += drm_panel_orientation_quirks.o # # Memory-management helpers # +# +obj-$(CONFIG_DRM_EXEC) += drm_exec.o obj-$(CONFIG_DRM_BUDDY) += drm_buddy.o diff --git a/drivers/gpu/drm/drm_exec.c b/drivers/gpu/drm/drm_exec.c new file mode 100644 index 000000000000..ff69cf0fb42a --- /dev/null +++ b/drivers/gpu/drm/drm_exec.c @@ -0,0 +1,333 @@ +// SPDX-License-Identifier: GPL-2.0 OR MIT + +#include +#include +#include + +/** + * DOC: Overview + * + * This component mainly abstracts the retry loop necessary for locking + * multiple GEM objects while preparing hardware operations (e.g. command + * submissions, page table updates etc..). + * + * If a contention is detected while locking a GEM object the cleanup procedure + * unlocks all previously locked GEM objects and locks the contended one first + * before locking any further objects. + * + * After an object is locked fences slots can optionally be reserved on the + * dma_resv object inside the GEM object. + * + * A typical usage pattern should look like this:: + * + * struct drm_gem_object *obj; + * struct drm_exec exec; + * unsigned long index; + * int ret; + * + * drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT); + * drm_exec_until_all_locked(&exec) { + * ret = drm_exec_prepare_obj(&exec, boA, 1); + * drm_exec_retry_on_contention(&exec); + * if (ret) + * goto error; + * + * ret = drm_exec_prepare_obj(&exec, boB, 1); + * drm_exec_retry_on_contention(&exec); + * if (ret) + * goto error; + * } + * + * drm_exec_for_each_locked_object(&exec, index, obj) { + * dma_resv_add_fence(obj->resv, fence, DMA_RESV_USAGE_READ); + * ... + * } + * drm_exec_fini(&exec); + * + * See struct dma_exec for more details. + */ + +/* Dummy value used to initially enter the retry loop */ +#define DRM_EXEC_DUMMY ((void *)~0) + +/* Unlock all objects and drop references */ +static void drm_exec_unlock_all(struct drm_exec *exec) +{ + struct drm_gem_object *obj; + unsigned long index; + + drm_exec_for_each_locked_object(exec, index, obj) { + dma_resv_unlock(obj->resv); + drm_gem_object_put(obj); + } + + drm_gem_object_put(exec->prelocked); + exec->prelocked = NULL; +} + +/** + * drm_exec_init - initialize a drm_exec object + * @exec: the drm_exec object to initialize + * @flags: controls locking behavior, see DRM_EXEC_* defines + * + * Initialize the object and make sure that we can track locked objects. + */ +void drm_exec_init(struct drm_exec *exec, uint32_t flags) +{ + exec->flags = flags; + exec->objects = kmalloc(PAGE_SIZE, GFP_KERNEL); + + /* If allocation here fails, just delay that till the first use */ + exec->max_objects = exec->objects ? PAGE_SIZE / sizeof(void *) : 0; + exec->num_objects = 0; + exec->contended = DRM_EXEC_DUMMY; + exec->prelocked = NULL; +} +EXPORT_SYMBOL(drm_exec_init); + +/** + * drm_exec_fini - finalize a drm_exec object + * @exec: the drm_exec object to finalize + * + * Unlock all locked objects, drop the references to objects and free all memory + * used for tracking the state. + */ +void drm_exec_fini(struct drm_exec *exec) +{ + drm_exec_unlock_all(exec); + kvfree(exec->objects); + if (exec->contended != DRM_EXEC_DUMMY) { + drm_gem_object_put(exec->contended); + ww_acquire_fini(&exec->ticket); + } +} +EXPORT_SYMBOL(drm_exec_fini); + +/** + * drm_exec_cleanup - cleanup when contention is detected + * @exec: the drm_exec object to cleanup + * + * Cleanup the current state and return true if we should stay inside the retry + * loop, false if there wasn't any contention detected and we can keep the + * objects locked. + */ +bool drm_exec_cleanup(struct drm_exec *exec) +{ + if (likely(!exec->contended)) { + ww_acquire_done(&exec->ticket); + return false; + } + + if (likely(exec->contended == DRM_EXEC_DUMMY)) { + exec->contended = NULL; + ww_acquire_init(&exec->ticket, &reservation_ww_class); + return true; + } + + drm_exec_unlock_all(exec); + exec->num_objects = 0; + return true; +} +EXPORT_SYMBOL(drm_exec_cleanup); + +/* Track the locked object in the array */ +static int drm_exec_obj_locked(struct drm_exec *exec, + struct drm_gem_object *obj) +{ + if (unlikely(exec->num_objects == exec->max_objects)) { + size_t size = exec->max_objects * sizeof(void *); + void *tmp; + + tmp = kvrealloc(exec->objects, size, size + PAGE_SIZE, + GFP_KERNEL); + if (!tmp) + return -ENOMEM; + + exec->objects = tmp; + exec->max_objects += PAGE_SIZE / sizeof(void *); + } + drm_gem_object_get(obj); + exec->objects[exec->num_objects++] = obj; + + return 0; +} + +/* Make sure the contended object is locked first */ +static int drm_exec_lock_contended(struct drm_exec *exec) +{ + struct drm_gem_object *obj = exec->contended; + int ret; + + if (likely(!obj)) + return 0; + + /* Always cleanup the contention so that error handling can kick in */ + exec->contended = NULL; + if (exec->flags & DRM_EXEC_INTERRUPTIBLE_WAIT) { + ret = dma_resv_lock_slow_interruptible(obj->resv, + &exec->ticket); + if (unlikely(ret)) + goto error_dropref; + } else { + dma_resv_lock_slow(obj->resv, &exec->ticket); + } + + ret = drm_exec_obj_locked(exec, obj); + if (unlikely(ret)) + goto error_unlock; + + exec->prelocked = obj; + return 0; + +error_unlock: + dma_resv_unlock(obj->resv); + +error_dropref: + drm_gem_object_put(obj); + return ret; +} + +/** + * drm_exec_lock_obj - lock a GEM object for use + * @exec: the drm_exec object with the state + * @obj: the GEM object to lock + * + * Lock a GEM object for use and grab a reference to it. + * + * Returns: -EDEADLK if a contention is detected, -EALREADY when object is + * already locked (can be suppressed by setting the DRM_EXEC_IGNORE_DUPLICATES + * flag), -ENOMEM when memory allocation failed and zero for success. + */ +int drm_exec_lock_obj(struct drm_exec *exec, struct drm_gem_object *obj) +{ + int ret; + + ret = drm_exec_lock_contended(exec); + if (unlikely(ret)) + return ret; + + if (exec->prelocked == obj) { + drm_gem_object_put(exec->prelocked); + exec->prelocked = NULL; + return 0; + } + + if (exec->flags & DRM_EXEC_INTERRUPTIBLE_WAIT) + ret = dma_resv_lock_interruptible(obj->resv, &exec->ticket); + else + ret = dma_resv_lock(obj->resv, &exec->ticket); + + if (unlikely(ret == -EDEADLK)) { + drm_gem_object_get(obj); + exec->contended = obj; + return -EDEADLK; + } + + if (unlikely(ret == -EALREADY) && + exec->flags & DRM_EXEC_IGNORE_DUPLICATES) + return 0; + + if (unlikely(ret)) + return ret; + + ret = drm_exec_obj_locked(exec, obj); + if (ret) + goto error_unlock; + + return 0; + +error_unlock: + dma_resv_unlock(obj->resv); + return ret; +} +EXPORT_SYMBOL(drm_exec_lock_obj); + +/** + * drm_exec_unlock_obj - unlock a GEM object in this exec context + * @exec: the drm_exec object with the state + * @obj: the GEM object to unlock + * + * Unlock the GEM object and remove it from the collection of locked objects. + * Should only be used to unlock the most recently locked objects. It's not time + * efficient to unlock objects locked long ago. + */ +void drm_exec_unlock_obj(struct drm_exec *exec, struct drm_gem_object *obj) +{ + unsigned int i; + + for (i = exec->num_objects; i--;) { + if (exec->objects[i] == obj) { + dma_resv_unlock(obj->resv); + for (++i; i < exec->num_objects; ++i) + exec->objects[i - 1] = exec->objects[i]; + --exec->num_objects; + drm_gem_object_put(obj); + return; + } + + } +} +EXPORT_SYMBOL(drm_exec_unlock_obj); + +/** + * drm_exec_prepare_obj - prepare a GEM object for use + * @exec: the drm_exec object with the state + * @obj: the GEM object to prepare + * @num_fences: how many fences to reserve + * + * Prepare a GEM object for use by locking it and reserving fence slots. + * + * Returns: -EDEADLK if a contention is detected, -EALREADY when object is + * already locked, -ENOMEM when memory allocation failed and zero for success. + */ +int drm_exec_prepare_obj(struct drm_exec *exec, struct drm_gem_object *obj, + unsigned int num_fences) +{ + int ret; + + ret = drm_exec_lock_obj(exec, obj); + if (ret) + return ret; + + ret = dma_resv_reserve_fences(obj->resv, num_fences); + if (ret) { + drm_exec_unlock_obj(exec, obj); + return ret; + } + + return 0; +} +EXPORT_SYMBOL(drm_exec_prepare_obj); + +/** + * drm_exec_prepare_array - helper to prepare an array of objects + * @exec: the drm_exec object with the state + * @objects: array of GEM object to prepare + * @num_objects: number of GEM objects in the array + * @num_fences: number of fences to reserve on each GEM object + * + * Prepares all GEM objects in an array, aborts on first error. + * Reserves @num_fences on each GEM object after locking it. + * + * Returns: -EDEADLOCK on contention, -EALREADY when object is already locked, + * -ENOMEM when memory allocation failed and zero for success. + */ +int drm_exec_prepare_array(struct drm_exec *exec, + struct drm_gem_object **objects, + unsigned int num_objects, + unsigned int num_fences) +{ + int ret; + + for (unsigned int i = 0; i < num_objects; ++i) { + ret = drm_exec_prepare_obj(exec, objects[i], num_fences); + if (unlikely(ret)) + return ret; + } + + return 0; +} +EXPORT_SYMBOL(drm_exec_prepare_array); + +MODULE_DESCRIPTION("DRM execution context"); +MODULE_LICENSE("Dual MIT/GPL"); diff --git a/include/drm/drm_exec.h b/include/drm/drm_exec.h new file mode 100644 index 000000000000..73205afec162 --- /dev/null +++ b/include/drm/drm_exec.h @@ -0,0 +1,123 @@ +/* SPDX-License-Identifier: GPL-2.0 OR MIT */ + +#ifndef __DRM_EXEC_H__ +#define __DRM_EXEC_H__ + +#include + +#define DRM_EXEC_INTERRUPTIBLE_WAIT BIT(0) +#define DRM_EXEC_IGNORE_DUPLICATES BIT(1) + +struct drm_gem_object; + +/** + * struct drm_exec - Execution context + */ +struct drm_exec { + /** + * @flags: Flags to control locking behavior + */ + uint32_t flags; + + /** + * @ticket: WW ticket used for acquiring locks + */ + struct ww_acquire_ctx ticket; + + /** + * @num_objects: number of objects locked + */ + unsigned int num_objects; + + /** + * @max_objects: maximum objects in array + */ + unsigned int max_objects; + + /** + * @objects: array of the locked objects + */ + struct drm_gem_object **objects; + + /** + * @contended: contended GEM object we backed off for + */ + struct drm_gem_object *contended; + + /** + * @prelocked: already locked GEM object due to contention + */ + struct drm_gem_object *prelocked; +}; + +/** + * drm_exec_for_each_locked_object - iterate over all the locked objects + * @exec: drm_exec object + * @index: unsigned long index for the iteration + * @obj: the current GEM object + * + * Iterate over all the locked GEM objects inside the drm_exec object. + */ +#define drm_exec_for_each_locked_object(exec, index, obj) \ + for (index = 0, obj = (exec)->objects[0]; \ + index < (exec)->num_objects; \ + ++index, obj = (exec)->objects[index]) + +/** + * drm_exec_until_all_locked - loop until all GEM objects are locked + * @exec: drm_exec object + * + * Core functionality of the drm_exec object. Loops until all GEM objects are + * locked and no more contention exists. At the beginning of the loop it is + * guaranteed that no GEM object is locked. + * + * Since labels can't be defined local to the loops body we use a jump pointer + * to make sure that the retry is only used from within the loops body. + */ +#define drm_exec_until_all_locked(exec) \ + for (void *__drm_exec_retry_ptr; ({ \ + __label__ __drm_exec_retry; \ +__drm_exec_retry: \ + __drm_exec_retry_ptr = &&__drm_exec_retry; \ + (void)__drm_exec_retry_ptr; \ + drm_exec_cleanup(exec); \ + });) + +/** + * drm_exec_retry_on_contention - restart the loop to grap all locks + * @exec: drm_exec object + * + * Control flow helper to continue when a contention was detected and we need to + * clean up and re-start the loop to prepare all GEM objects. + */ +#define drm_exec_retry_on_contention(exec) \ + do { \ + if (unlikely(drm_exec_is_contended(exec))) \ + goto *__drm_exec_retry_ptr; \ + } while (0) + +/** + * drm_exec_is_contended - check for contention + * @exec: drm_exec object + * + * Returns true if the drm_exec object has run into some contention while + * locking a GEM object and needs to clean up. + */ +static inline bool drm_exec_is_contended(struct drm_exec *exec) +{ + return !!exec->contended; +} + +void drm_exec_init(struct drm_exec *exec, uint32_t flags); +void drm_exec_fini(struct drm_exec *exec); +bool drm_exec_cleanup(struct drm_exec *exec); +int drm_exec_lock_obj(struct drm_exec *exec, struct drm_gem_object *obj); +void drm_exec_unlock_obj(struct drm_exec *exec, struct drm_gem_object *obj); +int drm_exec_prepare_obj(struct drm_exec *exec, struct drm_gem_object *obj, + unsigned int num_fences); +int drm_exec_prepare_array(struct drm_exec *exec, + struct drm_gem_object **objects, + unsigned int num_objects, + unsigned int num_fences); + +#endif -- cgit v1.2.3