summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/vgem
diff options
context:
space:
mode:
authorThomas Zimmermann <tzimmermann@suse.de>2020-11-03 12:30:11 +0300
committerThomas Zimmermann <tzimmermann@suse.de>2020-11-09 11:19:24 +0300
commit49a3f51dfeeecb52c5aa28c5cb9592fe5e39bf95 (patch)
tree60399216163b2213ccd45bdddc2dde62f6e0002d /drivers/gpu/drm/vgem
parent43676605f890b218e551f396a55dbaea7799acb4 (diff)
downloadlinux-49a3f51dfeeecb52c5aa28c5cb9592fe5e39bf95.tar.xz
drm/gem: Use struct dma_buf_map in GEM vmap ops and convert GEM backends
This patch replaces the vmap/vunmap's use of raw pointers in GEM object functions with instances of struct dma_buf_map. GEM backends are converted as well. For most of them, this simply changes the returned type. TTM-based drivers now return information about the location of the memory, either system or I/O memory. GEM VRAM helpers and qxl now use ttm_bo_vmap() et al. Amdgpu, nouveau and radeon use drm_gem_ttm_vmap() et al instead of implementing their own vmap callbacks. v7: * init QXL cursor to mapped BO buffer (kernel test robot) v5: * update vkms after switch to shmem v4: * use ttm_bo_vmap(), drm_gem_ttm_vmap(), et al. (Daniel, Christian) * fix a trailing { in drm_gem_vmap() * remove several empty functions instead of converting them (Daniel) * comment uses of raw pointers with a TODO (Daniel) * TODO list: convert more helpers to use struct dma_buf_map Signed-off-by: Thomas Zimmermann <tzimmermann@suse.de> Acked-by: Christian König <christian.koenig@amd.com> Tested-by: Sam Ravnborg <sam@ravnborg.org> Link: https://patchwork.freedesktop.org/patch/msgid/20201103093015.1063-7-tzimmermann@suse.de
Diffstat (limited to 'drivers/gpu/drm/vgem')
-rw-r--r--drivers/gpu/drm/vgem/vgem_drv.c16
1 files changed, 11 insertions, 5 deletions
diff --git a/drivers/gpu/drm/vgem/vgem_drv.c b/drivers/gpu/drm/vgem/vgem_drv.c
index 3e9b7a4c30ce..9a413091abb6 100644
--- a/drivers/gpu/drm/vgem/vgem_drv.c
+++ b/drivers/gpu/drm/vgem/vgem_drv.c
@@ -361,24 +361,30 @@ static struct drm_gem_object *vgem_prime_import_sg_table(struct drm_device *dev,
return &obj->base;
}
-static void *vgem_prime_vmap(struct drm_gem_object *obj)
+static int vgem_prime_vmap(struct drm_gem_object *obj, struct dma_buf_map *map)
{
struct drm_vgem_gem_object *bo = to_vgem_bo(obj);
long n_pages = obj->size >> PAGE_SHIFT;
struct page **pages;
+ void *vaddr;
pages = vgem_pin_pages(bo);
if (IS_ERR(pages))
- return NULL;
+ return PTR_ERR(pages);
+
+ vaddr = vmap(pages, n_pages, 0, pgprot_writecombine(PAGE_KERNEL));
+ if (!vaddr)
+ return -ENOMEM;
+ dma_buf_map_set_vaddr(map, vaddr);
- return vmap(pages, n_pages, 0, pgprot_writecombine(PAGE_KERNEL));
+ return 0;
}
-static void vgem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
+static void vgem_prime_vunmap(struct drm_gem_object *obj, struct dma_buf_map *map)
{
struct drm_vgem_gem_object *bo = to_vgem_bo(obj);
- vunmap(vaddr);
+ vunmap(map->vaddr);
vgem_unpin_pages(bo);
}