summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/i915/gem
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2020-12-14 22:07:56 +0300
committerLinus Torvalds <torvalds@linux-foundation.org>2020-12-14 22:07:56 +0300
commit1d36dffa5d887715dacca0f717f4519b7be5e498 (patch)
treea68f7c00dbb3036a67806ed6c6b8cc61c3cff60d /drivers/gpu/drm/i915/gem
parent2c85ebc57b3e1817b6ce1a6b703928e113a90442 (diff)
parentb10733527bfd864605c33ab2e9a886eec317ec39 (diff)
downloadlinux-1d36dffa5d887715dacca0f717f4519b7be5e498.tar.xz
Merge tag 'drm-next-2020-12-11' of git://anongit.freedesktop.org/drm/drm
Pull drm updates from Dave Airlie: "Not a huge amount of big things here, AMD has support for a few new HW variants (vangogh, green sardine, dimgrey cavefish), Intel has some more DG1 enablement. We have a few big reworks of the TTM layers and interfaces, GEM and atomic internal API reworks cross tree. fbdev is marked orphaned in here as well to reflect the current reality. core: - documentation updates - deprecate DRM_FORMAT_MOD_NONE - atomic crtc enable/disable rework - GEM convert drivers to gem object functions - remove SCATTER_LIST_MAX_SEGMENT sched: - avoid infinite waits ttm: - remove AGP support - don't modify caching for swapout - ttm pinning rework - major TTM reworks - new backend allocator - multihop support vram-helper: - top down BO placement fix - TTM changes - GEM object support displayport: - DP 2.0 DPCD prep work - DP MST extended DPCD caps fbdev: - mark as orphaned amdgpu: - Initial Vangogh support - Green Sardine support - Dimgrey Cavefish support - SG display support for renoir - SMU7 improvements - gfx9+ modiifier support - CI BACO fixes radeon: - expose voltage via hwmon on SUMO amdkfd: - fix unique id handling i915: - more DG1 enablement - bigjoiner support - integer scaling filter support - async flip support - ICL+ DSI command mode - Improve display shutdown - Display refactoring - eLLC machine fbdev loading fix - dma scatterlist fixes - TGL hang fixes - eLLC display buffer caching on SKL+ - MOCS PTE seeting for gen9+ msm: - Shutdown hook - GPU cooling device support - DSI 7nm and 10nm phy/pll updates - sm8150/sm2850 DPU support - GEM locking re-work - LLCC system cache support aspeed: - sysfs output config support ast: - LUT fix - new display mode gma500: - remove 2d framebuffer accel panfrost: - move gpu reset to a worker exynos: - new HDMI mode support mediatek: - MT8167 support - yaml bindings - MIPI DSI phy code moved etnaviv: - new perf counter - more lockdep annotation hibmc: - i2c DDC support ingenic: - pixel clock reset fix - reserved memory support - allow both DMA channels at once - different pixel format support - 30/24/8-bit palette modes tilcdc: - don't keep vblank irq enabled vc4: - new maintainer added - DSI registration fix virtio: - blob resource support - host visible and cross-device support - uuid api support" * tag 'drm-next-2020-12-11' of git://anongit.freedesktop.org/drm/drm: (1754 commits) drm/amdgpu: Initialise drm_gem_object_funcs for imported BOs drm/amdgpu: fix size calculation with stolen vga memory drm/amdgpu: remove amdgpu_ttm_late_init and amdgpu_bo_late_init drm/amdgpu: free the pre-OS console framebuffer after the first modeset drm/amdgpu: enable runtime pm using BACO on CI dGPUs drm/amdgpu/cik: enable BACO reset on Bonaire drm/amd/pm: update smu10.h WORKLOAD_PPLIB setting for raven drm/amd/pm: remove one unsupported smu function for vangogh drm/amd/display: setup system context for APUs drm/amd/display: add S/G support for Vangogh drm/amdkfd: Fix leak in dmabuf import drm/amdgpu: use AMDGPU_NUM_VMID when possible drm/amdgpu: fix sdma instance fw version and feature version init drm/amd/pm: update driver if version for dimgrey_cavefish drm/amd/display: 3.2.115 drm/amd/display: [FW Promotion] Release 0.0.45 drm/amd/display: Revert DCN2.1 dram_clock_change_latency update drm/amd/display: Enable gpu_vm_support for dcn3.01 drm/amd/display: Fixed the audio noise during mode switching with HDCP mode on drm/amd/display: Add wm table for Renoir ...
Diffstat (limited to 'drivers/gpu/drm/i915/gem')
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_client_blt.c18
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c13
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object.c23
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object.h23
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object_types.h17
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_pages.c21
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_stolen.c48
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c18
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/mock_dmabuf.c14
9 files changed, 151 insertions, 44 deletions
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_client_blt.c b/drivers/gpu/drm/i915/gem/i915_gem_client_blt.c
index 272cf3ea68d5..44821d94544f 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_client_blt.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_client_blt.c
@@ -202,12 +202,6 @@ retry:
if (unlikely(err))
goto out_request;
- if (w->ce->engine->emit_init_breadcrumb) {
- err = w->ce->engine->emit_init_breadcrumb(rq);
- if (unlikely(err))
- goto out_request;
- }
-
/*
* w->dma is already exported via (vma|obj)->resv we need only
* keep track of the GPU activity within this vma/request, and
@@ -217,9 +211,15 @@ retry:
if (err)
goto out_request;
- err = w->ce->engine->emit_bb_start(rq,
- batch->node.start, batch->node.size,
- 0);
+ if (rq->engine->emit_init_breadcrumb) {
+ err = rq->engine->emit_init_breadcrumb(rq);
+ if (unlikely(err))
+ goto out_request;
+ }
+
+ err = rq->engine->emit_bb_start(rq,
+ batch->node.start, batch->node.size,
+ 0);
out_request:
if (unlikely(err)) {
i915_request_set_error_once(rq, err);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
index 8dd295dbe241..0dd477e56573 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
@@ -77,14 +77,21 @@ static void i915_gem_unmap_dma_buf(struct dma_buf_attachment *attachment,
i915_gem_object_unpin_pages(obj);
}
-static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf)
+static int i915_gem_dmabuf_vmap(struct dma_buf *dma_buf, struct dma_buf_map *map)
{
struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
+ void *vaddr;
- return i915_gem_object_pin_map(obj, I915_MAP_WB);
+ vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB);
+ if (IS_ERR(vaddr))
+ return PTR_ERR(vaddr);
+
+ dma_buf_map_set_vaddr(map, vaddr);
+
+ return 0;
}
-static void i915_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
+static void i915_gem_dmabuf_vunmap(struct dma_buf *dma_buf, struct dma_buf_map *map)
{
struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.c b/drivers/gpu/drm/i915/gem/i915_gem_object.c
index c8421fd9d2dc..00d24000b5e8 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.c
@@ -39,9 +39,18 @@ static struct i915_global_object {
struct kmem_cache *slab_objects;
} global;
+static const struct drm_gem_object_funcs i915_gem_object_funcs;
+
struct drm_i915_gem_object *i915_gem_object_alloc(void)
{
- return kmem_cache_zalloc(global.slab_objects, GFP_KERNEL);
+ struct drm_i915_gem_object *obj;
+
+ obj = kmem_cache_zalloc(global.slab_objects, GFP_KERNEL);
+ if (!obj)
+ return NULL;
+ obj->base.funcs = &i915_gem_object_funcs;
+
+ return obj;
}
void i915_gem_object_free(struct drm_i915_gem_object *obj)
@@ -73,6 +82,8 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj,
obj->mm.madv = I915_MADV_WILLNEED;
INIT_RADIX_TREE(&obj->mm.get_page.radix, GFP_KERNEL | __GFP_NOWARN);
mutex_init(&obj->mm.get_page.lock);
+ INIT_RADIX_TREE(&obj->mm.get_dma_page.radix, GFP_KERNEL | __GFP_NOWARN);
+ mutex_init(&obj->mm.get_dma_page.lock);
if (IS_ENABLED(CONFIG_LOCKDEP) && i915_gem_object_is_shrinkable(obj))
i915_gem_shrinker_taints_mutex(to_i915(obj->base.dev),
@@ -101,7 +112,7 @@ void i915_gem_object_set_cache_coherency(struct drm_i915_gem_object *obj,
!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE);
}
-void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file)
+static void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file)
{
struct drm_i915_gem_object *obj = to_intel_bo(gem);
struct drm_i915_file_private *fpriv = file->driver_priv;
@@ -264,7 +275,7 @@ static void __i915_gem_free_work(struct work_struct *work)
i915_gem_flush_free_objects(i915);
}
-void i915_gem_free_object(struct drm_gem_object *gem_obj)
+static void i915_gem_free_object(struct drm_gem_object *gem_obj)
{
struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
struct drm_i915_private *i915 = to_i915(obj->base.dev);
@@ -403,6 +414,12 @@ int __init i915_global_objects_init(void)
return 0;
}
+static const struct drm_gem_object_funcs i915_gem_object_funcs = {
+ .free = i915_gem_free_object,
+ .close = i915_gem_close_object,
+ .export = i915_gem_prime_export,
+};
+
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "selftests/huge_gem_object.c"
#include "selftests/huge_pages.c"
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.h b/drivers/gpu/drm/i915/gem/i915_gem_object.h
index d46db8d8f38e..be14486f63a7 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.h
@@ -38,9 +38,6 @@ void __i915_gem_object_release_shmem(struct drm_i915_gem_object *obj,
int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align);
-void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file);
-void i915_gem_free_object(struct drm_gem_object *obj);
-
void i915_gem_flush_free_objects(struct drm_i915_private *i915);
struct sg_table *
@@ -275,8 +272,26 @@ int i915_gem_object_set_tiling(struct drm_i915_gem_object *obj,
unsigned int tiling, unsigned int stride);
struct scatterlist *
+__i915_gem_object_get_sg(struct drm_i915_gem_object *obj,
+ struct i915_gem_object_page_iter *iter,
+ unsigned int n,
+ unsigned int *offset);
+
+static inline struct scatterlist *
i915_gem_object_get_sg(struct drm_i915_gem_object *obj,
- unsigned int n, unsigned int *offset);
+ unsigned int n,
+ unsigned int *offset)
+{
+ return __i915_gem_object_get_sg(obj, &obj->mm.get_page, n, offset);
+}
+
+static inline struct scatterlist *
+i915_gem_object_get_sg_dma(struct drm_i915_gem_object *obj,
+ unsigned int n,
+ unsigned int *offset)
+{
+ return __i915_gem_object_get_sg(obj, &obj->mm.get_dma_page, n, offset);
+}
struct page *
i915_gem_object_get_page(struct drm_i915_gem_object *obj,
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
index d6711caa7f39..e2d9b7e1e152 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
@@ -82,6 +82,14 @@ struct i915_mmap_offset {
struct rb_node offset;
};
+struct i915_gem_object_page_iter {
+ struct scatterlist *sg_pos;
+ unsigned int sg_idx; /* in pages, but 32bit eek! */
+
+ struct radix_tree_root radix;
+ struct mutex lock; /* protects this cache */
+};
+
struct drm_i915_gem_object {
struct drm_gem_object base;
@@ -248,13 +256,8 @@ struct drm_i915_gem_object {
I915_SELFTEST_DECLARE(unsigned int page_mask);
- struct i915_gem_object_page_iter {
- struct scatterlist *sg_pos;
- unsigned int sg_idx; /* in pages, but 32bit eek! */
-
- struct radix_tree_root radix;
- struct mutex lock; /* protects this cache */
- } get_page;
+ struct i915_gem_object_page_iter get_page;
+ struct i915_gem_object_page_iter get_dma_page;
/**
* Element within i915->mm.unbound_list or i915->mm.bound_list,
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pages.c b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
index f60ca6dc911f..e2c7b2a7895f 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_pages.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
@@ -33,6 +33,8 @@ void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
obj->mm.get_page.sg_pos = pages->sgl;
obj->mm.get_page.sg_idx = 0;
+ obj->mm.get_dma_page.sg_pos = pages->sgl;
+ obj->mm.get_dma_page.sg_idx = 0;
obj->mm.pages = pages;
@@ -155,6 +157,8 @@ static void __i915_gem_object_reset_page_iter(struct drm_i915_gem_object *obj)
rcu_read_lock();
radix_tree_for_each_slot(slot, &obj->mm.get_page.radix, &iter, 0)
radix_tree_delete(&obj->mm.get_page.radix, iter.index);
+ radix_tree_for_each_slot(slot, &obj->mm.get_dma_page.radix, &iter, 0)
+ radix_tree_delete(&obj->mm.get_dma_page.radix, iter.index);
rcu_read_unlock();
}
@@ -438,11 +442,12 @@ void __i915_gem_object_release_map(struct drm_i915_gem_object *obj)
}
struct scatterlist *
-i915_gem_object_get_sg(struct drm_i915_gem_object *obj,
- unsigned int n,
- unsigned int *offset)
+__i915_gem_object_get_sg(struct drm_i915_gem_object *obj,
+ struct i915_gem_object_page_iter *iter,
+ unsigned int n,
+ unsigned int *offset)
{
- struct i915_gem_object_page_iter *iter = &obj->mm.get_page;
+ const bool dma = iter == &obj->mm.get_dma_page;
struct scatterlist *sg;
unsigned int idx, count;
@@ -471,7 +476,7 @@ i915_gem_object_get_sg(struct drm_i915_gem_object *obj,
sg = iter->sg_pos;
idx = iter->sg_idx;
- count = __sg_page_count(sg);
+ count = dma ? __sg_dma_page_count(sg) : __sg_page_count(sg);
while (idx + count <= n) {
void *entry;
@@ -499,7 +504,7 @@ i915_gem_object_get_sg(struct drm_i915_gem_object *obj,
idx += count;
sg = ____sg_next(sg);
- count = __sg_page_count(sg);
+ count = dma ? __sg_dma_page_count(sg) : __sg_page_count(sg);
}
scan:
@@ -517,7 +522,7 @@ scan:
while (idx + count <= n) {
idx += count;
sg = ____sg_next(sg);
- count = __sg_page_count(sg);
+ count = dma ? __sg_dma_page_count(sg) : __sg_page_count(sg);
}
*offset = n - idx;
@@ -584,7 +589,7 @@ i915_gem_object_get_dma_address_len(struct drm_i915_gem_object *obj,
struct scatterlist *sg;
unsigned int offset;
- sg = i915_gem_object_get_sg(obj, n, &offset);
+ sg = i915_gem_object_get_sg_dma(obj, n, &offset);
if (len)
*len = sg_dma_len(sg) - (offset << PAGE_SHIFT);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
index 84b2707d8b17..29bffc6afcc1 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
@@ -497,6 +497,43 @@ static int i915_gem_init_stolen(struct drm_i915_private *i915)
return 0;
}
+static void dbg_poison(struct i915_ggtt *ggtt,
+ dma_addr_t addr, resource_size_t size,
+ u8 x)
+{
+#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
+ if (!drm_mm_node_allocated(&ggtt->error_capture))
+ return;
+
+ if (ggtt->vm.bind_async_flags & I915_VMA_GLOBAL_BIND)
+ return; /* beware stop_machine() inversion */
+
+ GEM_BUG_ON(!IS_ALIGNED(size, PAGE_SIZE));
+
+ mutex_lock(&ggtt->error_mutex);
+ while (size) {
+ void __iomem *s;
+
+ ggtt->vm.insert_page(&ggtt->vm, addr,
+ ggtt->error_capture.start,
+ I915_CACHE_NONE, 0);
+ mb();
+
+ s = io_mapping_map_wc(&ggtt->iomap,
+ ggtt->error_capture.start,
+ PAGE_SIZE);
+ memset_io(s, x, PAGE_SIZE);
+ io_mapping_unmap(s);
+
+ addr += PAGE_SIZE;
+ size -= PAGE_SIZE;
+ }
+ mb();
+ ggtt->vm.clear_range(&ggtt->vm, ggtt->error_capture.start, PAGE_SIZE);
+ mutex_unlock(&ggtt->error_mutex);
+#endif
+}
+
static struct sg_table *
i915_pages_create_for_stolen(struct drm_device *dev,
resource_size_t offset, resource_size_t size)
@@ -540,6 +577,11 @@ static int i915_gem_object_get_pages_stolen(struct drm_i915_gem_object *obj)
if (IS_ERR(pages))
return PTR_ERR(pages);
+ dbg_poison(&to_i915(obj->base.dev)->ggtt,
+ sg_dma_address(pages->sgl),
+ sg_dma_len(pages->sgl),
+ POISON_INUSE);
+
__i915_gem_object_set_pages(obj, pages, obj->stolen->size);
return 0;
@@ -549,6 +591,12 @@ static void i915_gem_object_put_pages_stolen(struct drm_i915_gem_object *obj,
struct sg_table *pages)
{
/* Should only be called from i915_gem_object_release_stolen() */
+
+ dbg_poison(&to_i915(obj->base.dev)->ggtt,
+ sg_dma_address(pages->sgl),
+ sg_dma_len(pages->sgl),
+ POISON_FREE);
+
sg_free_table(pages);
kfree(pages);
}
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c
index 0845ce1ae37c..b6d43880b0c1 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c
@@ -82,6 +82,7 @@ static int igt_dmabuf_import(void *arg)
struct drm_i915_gem_object *obj;
struct dma_buf *dmabuf;
void *obj_map, *dma_map;
+ struct dma_buf_map map;
u32 pattern[] = { 0, 0xaa, 0xcc, 0x55, 0xff };
int err, i;
@@ -110,7 +111,8 @@ static int igt_dmabuf_import(void *arg)
goto out_obj;
}
- dma_map = dma_buf_vmap(dmabuf);
+ err = dma_buf_vmap(dmabuf, &map);
+ dma_map = err ? NULL : map.vaddr;
if (!dma_map) {
pr_err("dma_buf_vmap failed\n");
err = -ENOMEM;
@@ -150,7 +152,7 @@ static int igt_dmabuf_import(void *arg)
err = 0;
out_dma_map:
- dma_buf_vunmap(dmabuf, dma_map);
+ dma_buf_vunmap(dmabuf, &map);
out_obj:
i915_gem_object_put(obj);
out_dmabuf:
@@ -163,6 +165,7 @@ static int igt_dmabuf_import_ownership(void *arg)
struct drm_i915_private *i915 = arg;
struct drm_i915_gem_object *obj;
struct dma_buf *dmabuf;
+ struct dma_buf_map map;
void *ptr;
int err;
@@ -170,7 +173,8 @@ static int igt_dmabuf_import_ownership(void *arg)
if (IS_ERR(dmabuf))
return PTR_ERR(dmabuf);
- ptr = dma_buf_vmap(dmabuf);
+ err = dma_buf_vmap(dmabuf, &map);
+ ptr = err ? NULL : map.vaddr;
if (!ptr) {
pr_err("dma_buf_vmap failed\n");
err = -ENOMEM;
@@ -178,7 +182,7 @@ static int igt_dmabuf_import_ownership(void *arg)
}
memset(ptr, 0xc5, PAGE_SIZE);
- dma_buf_vunmap(dmabuf, ptr);
+ dma_buf_vunmap(dmabuf, &map);
obj = to_intel_bo(i915_gem_prime_import(&i915->drm, dmabuf));
if (IS_ERR(obj)) {
@@ -212,6 +216,7 @@ static int igt_dmabuf_export_vmap(void *arg)
struct drm_i915_private *i915 = arg;
struct drm_i915_gem_object *obj;
struct dma_buf *dmabuf;
+ struct dma_buf_map map;
void *ptr;
int err;
@@ -228,7 +233,8 @@ static int igt_dmabuf_export_vmap(void *arg)
}
i915_gem_object_put(obj);
- ptr = dma_buf_vmap(dmabuf);
+ err = dma_buf_vmap(dmabuf, &map);
+ ptr = err ? NULL : map.vaddr;
if (!ptr) {
pr_err("dma_buf_vmap failed\n");
err = -ENOMEM;
@@ -244,7 +250,7 @@ static int igt_dmabuf_export_vmap(void *arg)
memset(ptr, 0xc5, dmabuf->size);
err = 0;
- dma_buf_vunmap(dmabuf, ptr);
+ dma_buf_vunmap(dmabuf, &map);
out:
dma_buf_put(dmabuf);
return err;
diff --git a/drivers/gpu/drm/i915/gem/selftests/mock_dmabuf.c b/drivers/gpu/drm/i915/gem/selftests/mock_dmabuf.c
index be30b27e2926..2855d11c7a51 100644
--- a/drivers/gpu/drm/i915/gem/selftests/mock_dmabuf.c
+++ b/drivers/gpu/drm/i915/gem/selftests/mock_dmabuf.c
@@ -61,18 +61,24 @@ static void mock_dmabuf_release(struct dma_buf *dma_buf)
kfree(mock);
}
-static void *mock_dmabuf_vmap(struct dma_buf *dma_buf)
+static int mock_dmabuf_vmap(struct dma_buf *dma_buf, struct dma_buf_map *map)
{
struct mock_dmabuf *mock = to_mock(dma_buf);
+ void *vaddr;
- return vm_map_ram(mock->pages, mock->npages, 0);
+ vaddr = vm_map_ram(mock->pages, mock->npages, 0);
+ if (!vaddr)
+ return -ENOMEM;
+ dma_buf_map_set_vaddr(map, vaddr);
+
+ return 0;
}
-static void mock_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
+static void mock_dmabuf_vunmap(struct dma_buf *dma_buf, struct dma_buf_map *map)
{
struct mock_dmabuf *mock = to_mock(dma_buf);
- vm_unmap_ram(vaddr, mock->npages);
+ vm_unmap_ram(map->vaddr, mock->npages);
}
static int mock_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma)