summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm')
-rw-r--r--drivers/gpu/drm/drm_gem_cma_helper.c13
-rw-r--r--drivers/gpu/drm/drm_gem_shmem_helper.c14
-rw-r--r--drivers/gpu/drm/drm_prime.c9
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c8
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c11
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c12
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/mock_dmabuf.c10
-rw-r--r--drivers/gpu/drm/tegra/gem.c18
8 files changed, 67 insertions, 28 deletions
diff --git a/drivers/gpu/drm/drm_gem_cma_helper.c b/drivers/gpu/drm/drm_gem_cma_helper.c
index 1e5b389c435c..eb412e45f896 100644
--- a/drivers/gpu/drm/drm_gem_cma_helper.c
+++ b/drivers/gpu/drm/drm_gem_cma_helper.c
@@ -634,22 +634,23 @@ drm_gem_cma_prime_import_sg_table_vmap(struct drm_device *dev,
{
struct drm_gem_cma_object *cma_obj;
struct drm_gem_object *obj;
- void *vaddr;
+ struct dma_buf_map map;
+ int ret;
- vaddr = dma_buf_vmap(attach->dmabuf);
- if (!vaddr) {
+ ret = dma_buf_vmap(attach->dmabuf, &map);
+ if (ret) {
DRM_ERROR("Failed to vmap PRIME buffer\n");
- return ERR_PTR(-ENOMEM);
+ return ERR_PTR(ret);
}
obj = drm_gem_cma_prime_import_sg_table(dev, attach, sgt);
if (IS_ERR(obj)) {
- dma_buf_vunmap(attach->dmabuf, vaddr);
+ dma_buf_vunmap(attach->dmabuf, map.vaddr);
return obj;
}
cma_obj = to_drm_gem_cma_obj(obj);
- cma_obj->vaddr = vaddr;
+ cma_obj->vaddr = map.vaddr;
return obj;
}
diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c
index 0a952f27c184..ad10a57cfece 100644
--- a/drivers/gpu/drm/drm_gem_shmem_helper.c
+++ b/drivers/gpu/drm/drm_gem_shmem_helper.c
@@ -261,13 +261,16 @@ EXPORT_SYMBOL(drm_gem_shmem_unpin);
static void *drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem)
{
struct drm_gem_object *obj = &shmem->base;
- int ret;
+ struct dma_buf_map map;
+ int ret = 0;
if (shmem->vmap_use_count++ > 0)
return shmem->vaddr;
if (obj->import_attach) {
- shmem->vaddr = dma_buf_vmap(obj->import_attach->dmabuf);
+ ret = dma_buf_vmap(obj->import_attach->dmabuf, &map);
+ if (!ret)
+ shmem->vaddr = map.vaddr;
} else {
pgprot_t prot = PAGE_KERNEL;
@@ -279,11 +282,12 @@ static void *drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem)
prot = pgprot_writecombine(prot);
shmem->vaddr = vmap(shmem->pages, obj->size >> PAGE_SHIFT,
VM_MAP, prot);
+ if (!shmem->vaddr)
+ ret = -ENOMEM;
}
- if (!shmem->vaddr) {
- DRM_DEBUG_KMS("Failed to vmap pages\n");
- ret = -ENOMEM;
+ if (ret) {
+ DRM_DEBUG_KMS("Failed to vmap pages, error %d\n", ret);
goto err_put_pages;
}
diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c
index 194784a5b344..2d75e58d21fe 100644
--- a/drivers/gpu/drm/drm_prime.c
+++ b/drivers/gpu/drm/drm_prime.c
@@ -662,22 +662,25 @@ EXPORT_SYMBOL(drm_gem_unmap_dma_buf);
/**
* drm_gem_dmabuf_vmap - dma_buf vmap implementation for GEM
* @dma_buf: buffer to be mapped
+ * @map: the virtual address of the buffer
*
* Sets up a kernel virtual mapping. This can be used as the &dma_buf_ops.vmap
* callback. Calls into &drm_gem_object_funcs.vmap for device specific handling.
*
* Returns the kernel virtual address or NULL on failure.
*/
-void *drm_gem_dmabuf_vmap(struct dma_buf *dma_buf)
+int drm_gem_dmabuf_vmap(struct dma_buf *dma_buf, struct dma_buf_map *map)
{
struct drm_gem_object *obj = dma_buf->priv;
void *vaddr;
vaddr = drm_gem_vmap(obj);
if (IS_ERR(vaddr))
- vaddr = NULL;
+ return PTR_ERR(vaddr);
- return vaddr;
+ dma_buf_map_set_vaddr(map, vaddr);
+
+ return 0;
}
EXPORT_SYMBOL(drm_gem_dmabuf_vmap);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c
index 4aa3426a9ba4..80a9fc143bbb 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c
@@ -85,9 +85,15 @@ static void etnaviv_gem_prime_release(struct etnaviv_gem_object *etnaviv_obj)
static void *etnaviv_gem_prime_vmap_impl(struct etnaviv_gem_object *etnaviv_obj)
{
+ struct dma_buf_map map;
+ int ret;
+
lockdep_assert_held(&etnaviv_obj->lock);
- return dma_buf_vmap(etnaviv_obj->base.import_attach->dmabuf);
+ ret = dma_buf_vmap(etnaviv_obj->base.import_attach->dmabuf, &map);
+ if (ret)
+ return NULL;
+ return map.vaddr;
}
static int etnaviv_gem_prime_mmap_obj(struct etnaviv_gem_object *etnaviv_obj,
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
index 27fddc22a7c6..77b363d3000b 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
@@ -82,11 +82,18 @@ static void i915_gem_unmap_dma_buf(struct dma_buf_attachment *attachment,
i915_gem_object_unpin_pages(obj);
}
-static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf)
+static int i915_gem_dmabuf_vmap(struct dma_buf *dma_buf, struct dma_buf_map *map)
{
struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
+ void *vaddr;
- return i915_gem_object_pin_map(obj, I915_MAP_WB);
+ vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB);
+ if (IS_ERR(vaddr))
+ return PTR_ERR(vaddr);
+
+ dma_buf_map_set_vaddr(map, vaddr);
+
+ return 0;
}
static void i915_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c
index 0845ce1ae37c..3aad7643b0b7 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c
@@ -82,6 +82,7 @@ static int igt_dmabuf_import(void *arg)
struct drm_i915_gem_object *obj;
struct dma_buf *dmabuf;
void *obj_map, *dma_map;
+ struct dma_buf_map map;
u32 pattern[] = { 0, 0xaa, 0xcc, 0x55, 0xff };
int err, i;
@@ -110,7 +111,8 @@ static int igt_dmabuf_import(void *arg)
goto out_obj;
}
- dma_map = dma_buf_vmap(dmabuf);
+ err = dma_buf_vmap(dmabuf, &map);
+ dma_map = err ? NULL : map.vaddr;
if (!dma_map) {
pr_err("dma_buf_vmap failed\n");
err = -ENOMEM;
@@ -163,6 +165,7 @@ static int igt_dmabuf_import_ownership(void *arg)
struct drm_i915_private *i915 = arg;
struct drm_i915_gem_object *obj;
struct dma_buf *dmabuf;
+ struct dma_buf_map map;
void *ptr;
int err;
@@ -170,7 +173,8 @@ static int igt_dmabuf_import_ownership(void *arg)
if (IS_ERR(dmabuf))
return PTR_ERR(dmabuf);
- ptr = dma_buf_vmap(dmabuf);
+ err = dma_buf_vmap(dmabuf, &map);
+ ptr = err ? NULL : map.vaddr;
if (!ptr) {
pr_err("dma_buf_vmap failed\n");
err = -ENOMEM;
@@ -212,6 +216,7 @@ static int igt_dmabuf_export_vmap(void *arg)
struct drm_i915_private *i915 = arg;
struct drm_i915_gem_object *obj;
struct dma_buf *dmabuf;
+ struct dma_buf_map map;
void *ptr;
int err;
@@ -228,7 +233,8 @@ static int igt_dmabuf_export_vmap(void *arg)
}
i915_gem_object_put(obj);
- ptr = dma_buf_vmap(dmabuf);
+ err = dma_buf_vmap(dmabuf, &map);
+ ptr = err ? NULL : map.vaddr;
if (!ptr) {
pr_err("dma_buf_vmap failed\n");
err = -ENOMEM;
diff --git a/drivers/gpu/drm/i915/gem/selftests/mock_dmabuf.c b/drivers/gpu/drm/i915/gem/selftests/mock_dmabuf.c
index debaf7b18ab5..dcae46441f7d 100644
--- a/drivers/gpu/drm/i915/gem/selftests/mock_dmabuf.c
+++ b/drivers/gpu/drm/i915/gem/selftests/mock_dmabuf.c
@@ -62,11 +62,17 @@ static void mock_dmabuf_release(struct dma_buf *dma_buf)
kfree(mock);
}
-static void *mock_dmabuf_vmap(struct dma_buf *dma_buf)
+static int mock_dmabuf_vmap(struct dma_buf *dma_buf, struct dma_buf_map *map)
{
struct mock_dmabuf *mock = to_mock(dma_buf);
+ void *vaddr;
- return vm_map_ram(mock->pages, mock->npages, 0);
+ vaddr = vm_map_ram(mock->pages, mock->npages, 0);
+ if (!vaddr)
+ return -ENOMEM;
+ dma_buf_map_set_vaddr(map, vaddr);
+
+ return 0;
}
static void mock_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
diff --git a/drivers/gpu/drm/tegra/gem.c b/drivers/gpu/drm/tegra/gem.c
index d481dea4738d..2b6e06c7a2c5 100644
--- a/drivers/gpu/drm/tegra/gem.c
+++ b/drivers/gpu/drm/tegra/gem.c
@@ -132,14 +132,18 @@ static void tegra_bo_unpin(struct device *dev, struct sg_table *sgt)
static void *tegra_bo_mmap(struct host1x_bo *bo)
{
struct tegra_bo *obj = host1x_to_tegra_bo(bo);
+ struct dma_buf_map map;
+ int ret;
- if (obj->vaddr)
+ if (obj->vaddr) {
return obj->vaddr;
- else if (obj->gem.import_attach)
- return dma_buf_vmap(obj->gem.import_attach->dmabuf);
- else
+ } else if (obj->gem.import_attach) {
+ ret = dma_buf_vmap(obj->gem.import_attach->dmabuf, &map);
+ return ret ? NULL : map.vaddr;
+ } else {
return vmap(obj->pages, obj->num_pages, VM_MAP,
pgprot_writecombine(PAGE_KERNEL));
+ }
}
static void tegra_bo_munmap(struct host1x_bo *bo, void *addr)
@@ -649,12 +653,14 @@ static int tegra_gem_prime_mmap(struct dma_buf *buf, struct vm_area_struct *vma)
return __tegra_gem_mmap(gem, vma);
}
-static void *tegra_gem_prime_vmap(struct dma_buf *buf)
+static int tegra_gem_prime_vmap(struct dma_buf *buf, struct dma_buf_map *map)
{
struct drm_gem_object *gem = buf->priv;
struct tegra_bo *bo = to_tegra_bo(gem);
- return bo->vaddr;
+ dma_buf_map_set_vaddr(map, bo->vaddr);
+
+ return 0;
}
static void tegra_gem_prime_vunmap(struct dma_buf *buf, void *vaddr)