From e70a27d7612596d6e8bade18a1094a3213cd012e Mon Sep 17 00:00:00 2001 From: Maarten Lankhorst Date: Tue, 23 Mar 2021 16:50:47 +0100 Subject: drm/i915/selftests: Prepare memory region tests for obj->mm.lock removal MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Use the unlocked variants for pin_map and pin_pages, and add lock around unpinning/putting pages. Signed-off-by: Maarten Lankhorst Reviewed-by: Thomas Hellström Signed-off-by: Daniel Vetter Link: https://patchwork.freedesktop.org/patch/msgid/20210323155059.628690-59-maarten.lankhorst@linux.intel.com --- drivers/gpu/drm/i915/selftests/intel_memory_region.c | 18 +++++++++++------- 1 file changed, 11 insertions(+), 7 deletions(-) (limited to 'drivers/gpu/drm/i915/selftests/intel_memory_region.c') diff --git a/drivers/gpu/drm/i915/selftests/intel_memory_region.c b/drivers/gpu/drm/i915/selftests/intel_memory_region.c index ce7adfa3bca0..15ccd28f938e 100644 --- a/drivers/gpu/drm/i915/selftests/intel_memory_region.c +++ b/drivers/gpu/drm/i915/selftests/intel_memory_region.c @@ -31,10 +31,12 @@ static void close_objects(struct intel_memory_region *mem, struct drm_i915_gem_object *obj, *on; list_for_each_entry_safe(obj, on, objects, st_link) { + i915_gem_object_lock(obj, NULL); if (i915_gem_object_has_pinned_pages(obj)) i915_gem_object_unpin_pages(obj); /* No polluting the memory region between tests */ __i915_gem_object_put_pages(obj); + i915_gem_object_unlock(obj); list_del(&obj->st_link); i915_gem_object_put(obj); } @@ -69,7 +71,7 @@ static int igt_mock_fill(void *arg) break; } - err = i915_gem_object_pin_pages(obj); + err = i915_gem_object_pin_pages_unlocked(obj); if (err) { i915_gem_object_put(obj); break; @@ -109,7 +111,7 @@ igt_object_create(struct intel_memory_region *mem, if (IS_ERR(obj)) return obj; - err = i915_gem_object_pin_pages(obj); + err = i915_gem_object_pin_pages_unlocked(obj); if (err) goto put; @@ -123,8 +125,10 @@ put: static void igt_object_release(struct drm_i915_gem_object *obj) { + i915_gem_object_lock(obj, NULL); i915_gem_object_unpin_pages(obj); __i915_gem_object_put_pages(obj); + i915_gem_object_unlock(obj); list_del(&obj->st_link); i915_gem_object_put(obj); } @@ -433,7 +437,7 @@ static int igt_cpu_check(struct drm_i915_gem_object *obj, u32 dword, u32 val) if (err) return err; - ptr = i915_gem_object_pin_map(obj, I915_MAP_WC); + ptr = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WC); if (IS_ERR(ptr)) return PTR_ERR(ptr); @@ -538,7 +542,7 @@ static int igt_lmem_create(void *arg) if (IS_ERR(obj)) return PTR_ERR(obj); - err = i915_gem_object_pin_pages(obj); + err = i915_gem_object_pin_pages_unlocked(obj); if (err) goto out_put; @@ -577,7 +581,7 @@ static int igt_lmem_write_gpu(void *arg) goto out_file; } - err = i915_gem_object_pin_pages(obj); + err = i915_gem_object_pin_pages_unlocked(obj); if (err) goto out_put; @@ -649,7 +653,7 @@ static int igt_lmem_write_cpu(void *arg) if (IS_ERR(obj)) return PTR_ERR(obj); - vaddr = i915_gem_object_pin_map(obj, I915_MAP_WC); + vaddr = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WC); if (IS_ERR(vaddr)) { err = PTR_ERR(vaddr); goto out_put; @@ -753,7 +757,7 @@ create_region_for_mapping(struct intel_memory_region *mr, u64 size, u32 type, return obj; } - addr = i915_gem_object_pin_map(obj, type); + addr = i915_gem_object_pin_map_unlocked(obj, type); if (IS_ERR(addr)) { i915_gem_object_put(obj); if (PTR_ERR(addr) == -ENXIO) -- cgit v1.2.3 From adeca641bcb64f9e4fd477c0d1fe482f18934e90 Mon Sep 17 00:00:00 2001 From: Abdiel Janulgue Date: Wed, 27 Jan 2021 13:14:13 +0000 Subject: drm/i915: introduce mem->reserved In the following patch we need to reserve regions unaccessible to the driver during initialization, so add mem->reserved for collecting such regions. v2: turn into an actual intel_memory_region_reserve api Cc: Imre Deak Signed-off-by: Abdiel Janulgue Signed-off-by: Matthew Auld Reviewed-by: Chris Wilson Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20210127131417.393872-4-matthew.auld@intel.com Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/intel_memory_region.c | 14 ++++ drivers/gpu/drm/i915/intel_memory_region.h | 5 ++ .../gpu/drm/i915/selftests/intel_memory_region.c | 77 ++++++++++++++++++++++ 3 files changed, 96 insertions(+) (limited to 'drivers/gpu/drm/i915/selftests/intel_memory_region.c') diff --git a/drivers/gpu/drm/i915/intel_memory_region.c b/drivers/gpu/drm/i915/intel_memory_region.c index b1b610bfff09..49d306b5532f 100644 --- a/drivers/gpu/drm/i915/intel_memory_region.c +++ b/drivers/gpu/drm/i915/intel_memory_region.c @@ -156,9 +156,22 @@ int intel_memory_region_init_buddy(struct intel_memory_region *mem) void intel_memory_region_release_buddy(struct intel_memory_region *mem) { + i915_buddy_free_list(&mem->mm, &mem->reserved); i915_buddy_fini(&mem->mm); } +int intel_memory_region_reserve(struct intel_memory_region *mem, + u64 offset, u64 size) +{ + int ret; + + mutex_lock(&mem->mm_lock); + ret = i915_buddy_alloc_range(&mem->mm, &mem->reserved, offset, size); + mutex_unlock(&mem->mm_lock); + + return ret; +} + struct intel_memory_region * intel_memory_region_create(struct drm_i915_private *i915, resource_size_t start, @@ -185,6 +198,7 @@ intel_memory_region_create(struct drm_i915_private *i915, mutex_init(&mem->objects.lock); INIT_LIST_HEAD(&mem->objects.list); INIT_LIST_HEAD(&mem->objects.purgeable); + INIT_LIST_HEAD(&mem->reserved); mutex_init(&mem->mm_lock); diff --git a/drivers/gpu/drm/i915/intel_memory_region.h b/drivers/gpu/drm/i915/intel_memory_region.h index 6ffc0673f005..d17e4fe3123c 100644 --- a/drivers/gpu/drm/i915/intel_memory_region.h +++ b/drivers/gpu/drm/i915/intel_memory_region.h @@ -89,6 +89,8 @@ struct intel_memory_region { unsigned int id; char name[8]; + struct list_head reserved; + dma_addr_t remap_addr; struct { @@ -113,6 +115,9 @@ void __intel_memory_region_put_pages_buddy(struct intel_memory_region *mem, struct list_head *blocks); void __intel_memory_region_put_block_buddy(struct i915_buddy_block *block); +int intel_memory_region_reserve(struct intel_memory_region *mem, + u64 offset, u64 size); + struct intel_memory_region * intel_memory_region_create(struct drm_i915_private *i915, resource_size_t start, diff --git a/drivers/gpu/drm/i915/selftests/intel_memory_region.c b/drivers/gpu/drm/i915/selftests/intel_memory_region.c index ce7adfa3bca0..64348528e1d5 100644 --- a/drivers/gpu/drm/i915/selftests/intel_memory_region.c +++ b/drivers/gpu/drm/i915/selftests/intel_memory_region.c @@ -144,6 +144,82 @@ static bool is_contiguous(struct drm_i915_gem_object *obj) return true; } +static int igt_mock_reserve(void *arg) +{ + struct intel_memory_region *mem = arg; + resource_size_t avail = resource_size(&mem->region); + struct drm_i915_gem_object *obj; + const u32 chunk_size = SZ_32M; + u32 i, offset, count, *order; + u64 allocated, cur_avail; + I915_RND_STATE(prng); + LIST_HEAD(objects); + int err = 0; + + if (!list_empty(&mem->reserved)) { + pr_err("%s region reserved list is not empty\n", __func__); + return -EINVAL; + } + + count = avail / chunk_size; + order = i915_random_order(count, &prng); + if (!order) + return 0; + + /* Reserve a bunch of ranges within the region */ + for (i = 0; i < count; ++i) { + u64 start = order[i] * chunk_size; + u64 size = i915_prandom_u32_max_state(chunk_size, &prng); + + /* Allow for some really big holes */ + if (!size) + continue; + + size = round_up(size, PAGE_SIZE); + offset = igt_random_offset(&prng, 0, chunk_size, size, + PAGE_SIZE); + + err = intel_memory_region_reserve(mem, start + offset, size); + if (err) { + pr_err("%s failed to reserve range", __func__); + goto out_close; + } + + /* XXX: maybe sanity check the block range here? */ + avail -= size; + } + + /* Try to see if we can allocate from the remaining space */ + allocated = 0; + cur_avail = avail; + do { + u32 size = i915_prandom_u32_max_state(cur_avail, &prng); + + size = max_t(u32, round_up(size, PAGE_SIZE), PAGE_SIZE); + obj = igt_object_create(mem, &objects, size, 0); + if (IS_ERR(obj)) { + if (PTR_ERR(obj) == -ENXIO) + break; + + err = PTR_ERR(obj); + goto out_close; + } + cur_avail -= size; + allocated += size; + } while (1); + + if (allocated != avail) { + pr_err("%s mismatch between allocation and free space", __func__); + err = -EINVAL; + } + +out_close: + kfree(order); + close_objects(mem, &objects); + i915_buddy_free_list(&mem->mm, &mem->reserved); + return err; +} + static int igt_mock_contiguous(void *arg) { struct intel_memory_region *mem = arg; @@ -930,6 +1006,7 @@ static int perf_memcpy(void *arg) int intel_memory_region_mock_selftests(void) { static const struct i915_subtest tests[] = { + SUBTEST(igt_mock_reserve), SUBTEST(igt_mock_fill), SUBTEST(igt_mock_contiguous), SUBTEST(igt_mock_splintered_region), -- cgit v1.2.3