summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/i915/selftests
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2017-11-16 07:42:10 +0300
committerLinus Torvalds <torvalds@linux-foundation.org>2017-11-16 07:42:10 +0300
commite60e1ee60630cafef5e430c2ae364877e061d980 (patch)
tree816aeef8fe8d4a2c6a1ebbc7a350839bac8dd4c2 /drivers/gpu/drm/i915/selftests
parent5d352e69c60e54b5f04d6e337a1d2bf0dbf3d94a (diff)
parentf150891fd9878ef0d9197c4e8451ce67c3bdd014 (diff)
downloadlinux-e60e1ee60630cafef5e430c2ae364877e061d980.tar.xz
Merge tag 'drm-for-v4.15' of git://people.freedesktop.org/~airlied/linux
Pull drm updates from Dave Airlie: "This is the main drm pull request for v4.15. Core: - Atomic object lifetime fixes - Atomic iterator improvements - Sparse/smatch fixes - Legacy kms ioctls to be interruptible - EDID override improvements - fb/gem helper cleanups - Simple outreachy patches - Documentation improvements - Fix dma-buf rcu races - DRM mode object leasing for improving VR use cases. - vgaarb improvements for non-x86 platforms. New driver: - tve200: Faraday Technology TVE200 block. This "TV Encoder" encodes a ITU-T BT.656 stream and can be found in the StorLink SL3516 (later Cortina Systems CS3516) as well as the Grain Media GM8180. New bridges: - SiI9234 support New panels: - S6E63J0X03, OTM8009A, Seiko 43WVF1G, 7" rpi touch panel, Toshiba LT089AC19000, Innolux AT043TN24 i915: - Remove Coffeelake from alpha support - Cannonlake workarounds - Infoframe refactoring for DisplayPort - VBT updates - DisplayPort vswing/emph/buffer translation refactoring - CCS fixes - Restore GPU clock boost on missed vblanks - Scatter list updates for userptr allocations - Gen9+ transition watermarks - Display IPC (Isochronous Priority Control) - Private PAT management - GVT: improved error handling and pci config sanitizing - Execlist refactoring - Transparent Huge Page support - User defined priorities support - HuC/GuC firmware refactoring - DP MST fixes - eDP power sequencing fixes - Use RCU instead of stop_machine - PSR state tracking support - Eviction fixes - BDW DP aux channel timeout fixes - LSPCON fixes - Cannonlake PLL fixes amdgpu: - Per VM BO support - Powerplay cleanups - CI powerplay support - PASID mgr for kfd - SR-IOV fixes - initial GPU reset for vega10 - Prime mmap support - TTM updates - Clock query interface for Raven - Fence to handle ioctl - UVD encode ring support on Polaris - Transparent huge page DMA support - Compute LRU pipe tweaks - BO flag to allow buffers to opt out of implicit sync - CTX priority setting API - VRAM lost infrastructure plumbing qxl: - fix flicker since atomic rework amdkfd: - Further improvements from internal AMD tree - Usermode events - Drop radeon support nouveau: - Pascal temperature sensor support - Improved BAR2 handling - MMU rework to support Pascal MMU exynos: - Improved HDMI/mixer support - HDMI audio interface support tegra: - Prep work for tegra186 - Cleanup/fixes msm: - Preemption support for a5xx - Display fixes for 8x96 (snapdragon 820) - Async cursor plane fixes - FW loading rework - GPU debugging improvements vc4: - Prep for DSI panels - fix T-format tiling scanout - New madvise ioctl Rockchip: - LVDS support omapdrm: - omap4 HDMI CEC support etnaviv: - GPU performance counters groundwork sun4i: - refactor driver load + TCON backend - HDMI improvements - A31 support - Misc fixes udl: - Probe/EDID read fixes. tilcdc: - Misc fixes. pl111: - Support more variants adv7511: - Improve EDID handling. - HDMI CEC support sii8620: - Add remote control support" * tag 'drm-for-v4.15' of git://people.freedesktop.org/~airlied/linux: (1480 commits) drm/rockchip: analogix_dp: Use mutex rather than spinlock drm/mode_object: fix documentation for object lookups. drm/i915: Reorder context-close to avoid calling i915_vma_close() under RCU drm/i915: Move init_clock_gating() back to where it was drm/i915: Prune the reservation shared fence array drm/i915: Idle the GPU before shinking everything drm/i915: Lock llist_del_first() vs llist_del_all() drm/i915: Calculate ironlake intermediate watermarks correctly, v2. drm/i915: Disable lazy PPGTT page table optimization for vGPU drm/i915/execlists: Remove the priority "optimisation" drm/i915: Filter out spurious execlists context-switch interrupts drm/amdgpu: use irq-safe lock for kiq->ring_lock drm/amdgpu: bypass lru touch for KIQ ring submission drm/amdgpu: Potential uninitialized variable in amdgpu_vm_update_directories() drm/amdgpu: potential uninitialized variable in amdgpu_vce_ring_parse_cs() drm/amd/powerplay: initialize a variable before using it drm/amd/powerplay: suppress KASAN out of bounds warning in vega10_populate_all_memory_levels drm/amd/amdgpu: fix evicted VRAM bo adjudgement condition drm/vblank: Tune drm_crtc_accurate_vblank_count() WARN down to a debug drm/rockchip: add CONFIG_OF dependency for lvds ...
Diffstat (limited to 'drivers/gpu/drm/i915/selftests')
-rw-r--r--drivers/gpu/drm/i915/selftests/huge_gem_object.c14
-rw-r--r--drivers/gpu/drm/i915/selftests/huge_pages.c1734
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_context.c2
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_evict.c162
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_gtt.c15
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_object.c8
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_request.c12
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_timeline.c2
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_live_selftests.h2
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_mock_selftests.h1
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_random.c5
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_random.h5
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_sw_fence.c42
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_hangcheck.c50
-rw-r--r--drivers/gpu/drm/i915/selftests/lib_sw_fence.c78
-rw-r--r--drivers/gpu/drm/i915/selftests/lib_sw_fence.h42
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_context.c6
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_engine.c8
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_gem_device.c20
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_gtt.c11
-rw-r--r--drivers/gpu/drm/i915/selftests/scatterlist.c15
21 files changed, 2175 insertions, 59 deletions
diff --git a/drivers/gpu/drm/i915/selftests/huge_gem_object.c b/drivers/gpu/drm/i915/selftests/huge_gem_object.c
index c5c7e8efbdd3..a2632df39173 100644
--- a/drivers/gpu/drm/i915/selftests/huge_gem_object.c
+++ b/drivers/gpu/drm/i915/selftests/huge_gem_object.c
@@ -37,8 +37,7 @@ static void huge_free_pages(struct drm_i915_gem_object *obj,
kfree(pages);
}
-static struct sg_table *
-huge_get_pages(struct drm_i915_gem_object *obj)
+static int huge_get_pages(struct drm_i915_gem_object *obj)
{
#define GFP (GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY)
const unsigned long nreal = obj->scratch / PAGE_SIZE;
@@ -49,11 +48,11 @@ huge_get_pages(struct drm_i915_gem_object *obj)
pages = kmalloc(sizeof(*pages), GFP);
if (!pages)
- return ERR_PTR(-ENOMEM);
+ return -ENOMEM;
if (sg_alloc_table(pages, npages, GFP)) {
kfree(pages);
- return ERR_PTR(-ENOMEM);
+ return -ENOMEM;
}
sg = pages->sgl;
@@ -81,11 +80,14 @@ huge_get_pages(struct drm_i915_gem_object *obj)
if (i915_gem_gtt_prepare_pages(obj, pages))
goto err;
- return pages;
+ __i915_gem_object_set_pages(obj, pages, PAGE_SIZE);
+
+ return 0;
err:
huge_free_pages(obj, pages);
- return ERR_PTR(-ENOMEM);
+
+ return -ENOMEM;
#undef GFP
}
diff --git a/drivers/gpu/drm/i915/selftests/huge_pages.c b/drivers/gpu/drm/i915/selftests/huge_pages.c
new file mode 100644
index 000000000000..5cc8101bb2b1
--- /dev/null
+++ b/drivers/gpu/drm/i915/selftests/huge_pages.c
@@ -0,0 +1,1734 @@
+/*
+ * Copyright © 2017 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#include "../i915_selftest.h"
+
+#include <linux/prime_numbers.h>
+
+#include "mock_drm.h"
+
+static const unsigned int page_sizes[] = {
+ I915_GTT_PAGE_SIZE_2M,
+ I915_GTT_PAGE_SIZE_64K,
+ I915_GTT_PAGE_SIZE_4K,
+};
+
+static unsigned int get_largest_page_size(struct drm_i915_private *i915,
+ u64 rem)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(page_sizes); ++i) {
+ unsigned int page_size = page_sizes[i];
+
+ if (HAS_PAGE_SIZES(i915, page_size) && rem >= page_size)
+ return page_size;
+ }
+
+ return 0;
+}
+
+static void huge_pages_free_pages(struct sg_table *st)
+{
+ struct scatterlist *sg;
+
+ for (sg = st->sgl; sg; sg = __sg_next(sg)) {
+ if (sg_page(sg))
+ __free_pages(sg_page(sg), get_order(sg->length));
+ }
+
+ sg_free_table(st);
+ kfree(st);
+}
+
+static int get_huge_pages(struct drm_i915_gem_object *obj)
+{
+#define GFP (GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY)
+ unsigned int page_mask = obj->mm.page_mask;
+ struct sg_table *st;
+ struct scatterlist *sg;
+ unsigned int sg_page_sizes;
+ u64 rem;
+
+ st = kmalloc(sizeof(*st), GFP);
+ if (!st)
+ return -ENOMEM;
+
+ if (sg_alloc_table(st, obj->base.size >> PAGE_SHIFT, GFP)) {
+ kfree(st);
+ return -ENOMEM;
+ }
+
+ rem = obj->base.size;
+ sg = st->sgl;
+ st->nents = 0;
+ sg_page_sizes = 0;
+
+ /*
+ * Our goal here is simple, we want to greedily fill the object from
+ * largest to smallest page-size, while ensuring that we use *every*
+ * page-size as per the given page-mask.
+ */
+ do {
+ unsigned int bit = ilog2(page_mask);
+ unsigned int page_size = BIT(bit);
+ int order = get_order(page_size);
+
+ do {
+ struct page *page;
+
+ GEM_BUG_ON(order >= MAX_ORDER);
+ page = alloc_pages(GFP | __GFP_ZERO, order);
+ if (!page)
+ goto err;
+
+ sg_set_page(sg, page, page_size, 0);
+ sg_page_sizes |= page_size;
+ st->nents++;
+
+ rem -= page_size;
+ if (!rem) {
+ sg_mark_end(sg);
+ break;
+ }
+
+ sg = __sg_next(sg);
+ } while ((rem - ((page_size-1) & page_mask)) >= page_size);
+
+ page_mask &= (page_size-1);
+ } while (page_mask);
+
+ if (i915_gem_gtt_prepare_pages(obj, st))
+ goto err;
+
+ obj->mm.madv = I915_MADV_DONTNEED;
+
+ GEM_BUG_ON(sg_page_sizes != obj->mm.page_mask);
+ __i915_gem_object_set_pages(obj, st, sg_page_sizes);
+
+ return 0;
+
+err:
+ sg_set_page(sg, NULL, 0, 0);
+ sg_mark_end(sg);
+ huge_pages_free_pages(st);
+
+ return -ENOMEM;
+}
+
+static void put_huge_pages(struct drm_i915_gem_object *obj,
+ struct sg_table *pages)
+{
+ i915_gem_gtt_finish_pages(obj, pages);
+ huge_pages_free_pages(pages);
+
+ obj->mm.dirty = false;
+ obj->mm.madv = I915_MADV_WILLNEED;
+}
+
+static const struct drm_i915_gem_object_ops huge_page_ops = {
+ .flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE |
+ I915_GEM_OBJECT_IS_SHRINKABLE,
+ .get_pages = get_huge_pages,
+ .put_pages = put_huge_pages,
+};
+
+static struct drm_i915_gem_object *
+huge_pages_object(struct drm_i915_private *i915,
+ u64 size,
+ unsigned int page_mask)
+{
+ struct drm_i915_gem_object *obj;
+
+ GEM_BUG_ON(!size);
+ GEM_BUG_ON(!IS_ALIGNED(size, BIT(__ffs(page_mask))));
+
+ if (size >> PAGE_SHIFT > INT_MAX)
+ return ERR_PTR(-E2BIG);
+
+ if (overflows_type(size, obj->base.size))
+ return ERR_PTR(-E2BIG);
+
+ obj = i915_gem_object_alloc(i915);
+ if (!obj)
+ return ERR_PTR(-ENOMEM);
+
+ drm_gem_private_object_init(&i915->drm, &obj->base, size);
+ i915_gem_object_init(obj, &huge_page_ops);
+
+ obj->base.write_domain = I915_GEM_DOMAIN_CPU;
+ obj->base.read_domains = I915_GEM_DOMAIN_CPU;
+ obj->cache_level = I915_CACHE_NONE;
+
+ obj->mm.page_mask = page_mask;
+
+ return obj;
+}
+
+static int fake_get_huge_pages(struct drm_i915_gem_object *obj)
+{
+ struct drm_i915_private *i915 = to_i915(obj->base.dev);
+ const u64 max_len = rounddown_pow_of_two(UINT_MAX);
+ struct sg_table *st;
+ struct scatterlist *sg;
+ unsigned int sg_page_sizes;
+ u64 rem;
+
+ st = kmalloc(sizeof(*st), GFP);
+ if (!st)
+ return -ENOMEM;
+
+ if (sg_alloc_table(st, obj->base.size >> PAGE_SHIFT, GFP)) {
+ kfree(st);
+ return -ENOMEM;
+ }
+
+ /* Use optimal page sized chunks to fill in the sg table */
+ rem = obj->base.size;
+ sg = st->sgl;
+ st->nents = 0;
+ sg_page_sizes = 0;
+ do {
+ unsigned int page_size = get_largest_page_size(i915, rem);
+ unsigned int len = min(page_size * div_u64(rem, page_size),
+ max_len);
+
+ GEM_BUG_ON(!page_size);
+
+ sg->offset = 0;
+ sg->length = len;
+ sg_dma_len(sg) = len;
+ sg_dma_address(sg) = page_size;
+
+ sg_page_sizes |= len;
+
+ st->nents++;
+
+ rem -= len;
+ if (!rem) {
+ sg_mark_end(sg);
+ break;
+ }
+
+ sg = sg_next(sg);
+ } while (1);
+
+ obj->mm.madv = I915_MADV_DONTNEED;
+
+ __i915_gem_object_set_pages(obj, st, sg_page_sizes);
+
+ return 0;
+}
+
+static int fake_get_huge_pages_single(struct drm_i915_gem_object *obj)
+{
+ struct drm_i915_private *i915 = to_i915(obj->base.dev);
+ struct sg_table *st;
+ struct scatterlist *sg;
+ unsigned int page_size;
+
+ st = kmalloc(sizeof(*st), GFP);
+ if (!st)
+ return -ENOMEM;
+
+ if (sg_alloc_table(st, 1, GFP)) {
+ kfree(st);
+ return -ENOMEM;
+ }
+
+ sg = st->sgl;
+ st->nents = 1;
+
+ page_size = get_largest_page_size(i915, obj->base.size);
+ GEM_BUG_ON(!page_size);
+
+ sg->offset = 0;
+ sg->length = obj->base.size;
+ sg_dma_len(sg) = obj->base.size;
+ sg_dma_address(sg) = page_size;
+
+ obj->mm.madv = I915_MADV_DONTNEED;
+
+ __i915_gem_object_set_pages(obj, st, sg->length);
+
+ return 0;
+#undef GFP
+}
+
+static void fake_free_huge_pages(struct drm_i915_gem_object *obj,
+ struct sg_table *pages)
+{
+ sg_free_table(pages);
+ kfree(pages);
+}
+
+static void fake_put_huge_pages(struct drm_i915_gem_object *obj,
+ struct sg_table *pages)
+{
+ fake_free_huge_pages(obj, pages);
+ obj->mm.dirty = false;
+ obj->mm.madv = I915_MADV_WILLNEED;
+}
+
+static const struct drm_i915_gem_object_ops fake_ops = {
+ .flags = I915_GEM_OBJECT_IS_SHRINKABLE,
+ .get_pages = fake_get_huge_pages,
+ .put_pages = fake_put_huge_pages,
+};
+
+static const struct drm_i915_gem_object_ops fake_ops_single = {
+ .flags = I915_GEM_OBJECT_IS_SHRINKABLE,
+ .get_pages = fake_get_huge_pages_single,
+ .put_pages = fake_put_huge_pages,
+};
+
+static struct drm_i915_gem_object *
+fake_huge_pages_object(struct drm_i915_private *i915, u64 size, bool single)
+{
+ struct drm_i915_gem_object *obj;
+
+ GEM_BUG_ON(!size);
+ GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
+
+ if (size >> PAGE_SHIFT > UINT_MAX)
+ return ERR_PTR(-E2BIG);
+
+ if (overflows_type(size, obj->base.size))
+ return ERR_PTR(-E2BIG);
+
+ obj = i915_gem_object_alloc(i915);
+ if (!obj)
+ return ERR_PTR(-ENOMEM);
+
+ drm_gem_private_object_init(&i915->drm, &obj->base, size);
+
+ if (single)
+ i915_gem_object_init(obj, &fake_ops_single);
+ else
+ i915_gem_object_init(obj, &fake_ops);
+
+ obj->base.write_domain = I915_GEM_DOMAIN_CPU;
+ obj->base.read_domains = I915_GEM_DOMAIN_CPU;
+ obj->cache_level = I915_CACHE_NONE;
+
+ return obj;
+}
+
+static int igt_check_page_sizes(struct i915_vma *vma)
+{
+ struct drm_i915_private *i915 = to_i915(vma->obj->base.dev);
+ unsigned int supported = INTEL_INFO(i915)->page_sizes;
+ struct drm_i915_gem_object *obj = vma->obj;
+ int err = 0;
+
+ if (!HAS_PAGE_SIZES(i915, vma->page_sizes.sg)) {
+ pr_err("unsupported page_sizes.sg=%u, supported=%u\n",
+ vma->page_sizes.sg & ~supported, supported);
+ err = -EINVAL;
+ }
+
+ if (!HAS_PAGE_SIZES(i915, vma->page_sizes.gtt)) {
+ pr_err("unsupported page_sizes.gtt=%u, supported=%u\n",
+ vma->page_sizes.gtt & ~supported, supported);
+ err = -EINVAL;
+ }
+
+ if (vma->page_sizes.phys != obj->mm.page_sizes.phys) {
+ pr_err("vma->page_sizes.phys(%u) != obj->mm.page_sizes.phys(%u)\n",
+ vma->page_sizes.phys, obj->mm.page_sizes.phys);
+ err = -EINVAL;
+ }
+
+ if (vma->page_sizes.sg != obj->mm.page_sizes.sg) {
+ pr_err("vma->page_sizes.sg(%u) != obj->mm.page_sizes.sg(%u)\n",
+ vma->page_sizes.sg, obj->mm.page_sizes.sg);
+ err = -EINVAL;
+ }
+
+ if (obj->mm.page_sizes.gtt) {
+ pr_err("obj->page_sizes.gtt(%u) should never be set\n",
+ obj->mm.page_sizes.gtt);
+ err = -EINVAL;
+ }
+
+ return err;
+}
+
+static int igt_mock_exhaust_device_supported_pages(void *arg)
+{
+ struct i915_hw_ppgtt *ppgtt = arg;
+ struct drm_i915_private *i915 = ppgtt->base.i915;
+ unsigned int saved_mask = INTEL_INFO(i915)->page_sizes;
+ struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
+ int i, j, single;
+ int err;
+
+ /*
+ * Sanity check creating objects with every valid page support
+ * combination for our mock device.
+ */
+
+ for (i = 1; i < BIT(ARRAY_SIZE(page_sizes)); i++) {
+ unsigned int combination = 0;
+
+ for (j = 0; j < ARRAY_SIZE(page_sizes); j++) {
+ if (i & BIT(j))
+ combination |= page_sizes[j];
+ }
+
+ mkwrite_device_info(i915)->page_sizes = combination;
+
+ for (single = 0; single <= 1; ++single) {
+ obj = fake_huge_pages_object(i915, combination, !!single);
+ if (IS_ERR(obj)) {
+ err = PTR_ERR(obj);
+ goto out_device;
+ }
+
+ if (obj->base.size != combination) {
+ pr_err("obj->base.size=%zu, expected=%u\n",
+ obj->base.size, combination);
+ err = -EINVAL;
+ goto out_put;
+ }
+
+ vma = i915_vma_instance(obj, &ppgtt->base, NULL);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ goto out_put;
+ }
+
+ err = i915_vma_pin(vma, 0, 0, PIN_USER);
+ if (err)
+ goto out_close;
+
+ err = igt_check_page_sizes(vma);
+
+ if (vma->page_sizes.sg != combination) {
+ pr_err("page_sizes.sg=%u, expected=%u\n",
+ vma->page_sizes.sg, combination);
+ err = -EINVAL;
+ }
+
+ i915_vma_unpin(vma);
+ i915_vma_close(vma);
+
+ i915_gem_object_put(obj);
+
+ if (err)
+ goto out_device;
+ }
+ }
+
+ goto out_device;
+
+out_close:
+ i915_vma_close(vma);
+out_put:
+ i915_gem_object_put(obj);
+out_device:
+ mkwrite_device_info(i915)->page_sizes = saved_mask;
+
+ return err;
+}
+
+static int igt_mock_ppgtt_misaligned_dma(void *arg)
+{
+ struct i915_hw_ppgtt *ppgtt = arg;
+ struct drm_i915_private *i915 = ppgtt->base.i915;
+ unsigned long supported = INTEL_INFO(i915)->page_sizes;
+ struct drm_i915_gem_object *obj;
+ int bit;
+ int err;
+
+ /*
+ * Sanity check dma misalignment for huge pages -- the dma addresses we
+ * insert into the paging structures need to always respect the page
+ * size alignment.
+ */
+
+ bit = ilog2(I915_GTT_PAGE_SIZE_64K);
+
+ for_each_set_bit_from(bit, &supported,
+ ilog2(I915_GTT_MAX_PAGE_SIZE) + 1) {
+ IGT_TIMEOUT(end_time);
+ unsigned int page_size = BIT(bit);
+ unsigned int flags = PIN_USER | PIN_OFFSET_FIXED;
+ unsigned int offset;
+ unsigned int size =
+ round_up(page_size, I915_GTT_PAGE_SIZE_2M) << 1;
+ struct i915_vma *vma;
+
+ obj = fake_huge_pages_object(i915, size, true);
+ if (IS_ERR(obj))
+ return PTR_ERR(obj);
+
+ if (obj->base.size != size) {
+ pr_err("obj->base.size=%zu, expected=%u\n",
+ obj->base.size, size);
+ err = -EINVAL;
+ goto out_put;
+ }
+
+ err = i915_gem_object_pin_pages(obj);
+ if (err)
+ goto out_put;
+
+ /* Force the page size for this object */
+ obj->mm.page_sizes.sg = page_size;
+
+ vma = i915_vma_instance(obj, &ppgtt->base, NULL);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ goto out_unpin;
+ }
+
+ err = i915_vma_pin(vma, 0, 0, flags);
+ if (err) {
+ i915_vma_close(vma);
+ goto out_unpin;
+ }
+
+
+ err = igt_check_page_sizes(vma);
+
+ if (vma->page_sizes.gtt != page_size) {
+ pr_err("page_sizes.gtt=%u, expected %u\n",
+ vma->page_sizes.gtt, page_size);
+ err = -EINVAL;
+ }
+
+ i915_vma_unpin(vma);
+
+ if (err) {
+ i915_vma_close(vma);
+ goto out_unpin;
+ }
+
+ /*
+ * Try all the other valid offsets until the next
+ * boundary -- should always fall back to using 4K
+ * pages.
+ */
+ for (offset = 4096; offset < page_size; offset += 4096) {
+ err = i915_vma_unbind(vma);
+ if (err) {
+ i915_vma_close(vma);
+ goto out_unpin;
+ }
+
+ err = i915_vma_pin(vma, 0, 0, flags | offset);
+ if (err) {
+ i915_vma_close(vma);
+ goto out_unpin;
+ }
+
+ err = igt_check_page_sizes(vma);
+
+ if (vma->page_sizes.gtt != I915_GTT_PAGE_SIZE_4K) {
+ pr_err("page_sizes.gtt=%u, expected %lu\n",
+ vma->page_sizes.gtt, I915_GTT_PAGE_SIZE_4K);
+ err = -EINVAL;
+ }
+
+ i915_vma_unpin(vma);
+
+ if (err) {
+ i915_vma_close(vma);
+ goto out_unpin;
+ }
+
+ if (igt_timeout(end_time,
+ "%s timed out at offset %x with page-size %x\n",
+ __func__, offset, page_size))
+ break;
+ }
+
+ i915_vma_close(vma);
+
+ i915_gem_object_unpin_pages(obj);
+ i915_gem_object_put(obj);
+ }
+
+ return 0;
+
+out_unpin:
+ i915_gem_object_unpin_pages(obj);
+out_put:
+ i915_gem_object_put(obj);
+
+ return err;
+}
+
+static void close_object_list(struct list_head *objects,
+ struct i915_hw_ppgtt *ppgtt)
+{
+ struct drm_i915_gem_object *obj, *on;
+
+ list_for_each_entry_safe(obj, on, objects, st_link) {
+ struct i915_vma *vma;
+
+ vma = i915_vma_instance(obj, &ppgtt->base, NULL);
+ if (!IS_ERR(vma))
+ i915_vma_close(vma);
+
+ list_del(&obj->st_link);
+ i915_gem_object_unpin_pages(obj);
+ i915_gem_object_put(obj);
+ }
+}
+
+static int igt_mock_ppgtt_huge_fill(void *arg)
+{
+ struct i915_hw_ppgtt *ppgtt = arg;
+ struct drm_i915_private *i915 = ppgtt->base.i915;
+ unsigned long max_pages = ppgtt->base.total >> PAGE_SHIFT;
+ unsigned long page_num;
+ bool single = false;
+ LIST_HEAD(objects);
+ IGT_TIMEOUT(end_time);
+ int err = -ENODEV;
+
+ for_each_prime_number_from(page_num, 1, max_pages) {
+ struct drm_i915_gem_object *obj;
+ u64 size = page_num << PAGE_SHIFT;
+ struct i915_vma *vma;
+ unsigned int expected_gtt = 0;
+ int i;
+
+ obj = fake_huge_pages_object(i915, size, single);
+ if (IS_ERR(obj)) {
+ err = PTR_ERR(obj);
+ break;
+ }
+
+ if (obj->base.size != size) {
+ pr_err("obj->base.size=%zd, expected=%llu\n",
+ obj->base.size, size);
+ i915_gem_object_put(obj);
+ err = -EINVAL;
+ break;
+ }
+
+ err = i915_gem_object_pin_pages(obj);
+ if (err) {
+ i915_gem_object_put(obj);
+ break;
+ }
+
+ list_add(&obj->st_link, &objects);
+
+ vma = i915_vma_instance(obj, &ppgtt->base, NULL);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ break;
+ }
+
+ err = i915_vma_pin(vma, 0, 0, PIN_USER);
+ if (err)
+ break;
+
+ err = igt_check_page_sizes(vma);
+ if (err) {
+ i915_vma_unpin(vma);
+ break;
+ }
+
+ /*
+ * Figure out the expected gtt page size knowing that we go from
+ * largest to smallest page size sg chunks, and that we align to
+ * the largest page size.
+ */
+ for (i = 0; i < ARRAY_SIZE(page_sizes); ++i) {
+ unsigned int page_size = page_sizes[i];
+
+ if (HAS_PAGE_SIZES(i915, page_size) &&
+ size >= page_size) {
+ expected_gtt |= page_size;
+ size &= page_size-1;
+ }
+ }
+
+ GEM_BUG_ON(!expected_gtt);
+ GEM_BUG_ON(size);
+
+ if (expected_gtt & I915_GTT_PAGE_SIZE_4K)
+ expected_gtt &= ~I915_GTT_PAGE_SIZE_64K;
+
+ i915_vma_unpin(vma);
+
+ if (vma->page_sizes.sg & I915_GTT_PAGE_SIZE_64K) {
+ if (!IS_ALIGNED(vma->node.start,
+ I915_GTT_PAGE_SIZE_2M)) {
+ pr_err("node.start(%llx) not aligned to 2M\n",
+ vma->node.start);
+ err = -EINVAL;
+ break;
+ }
+
+ if (!IS_ALIGNED(vma->node.size,
+ I915_GTT_PAGE_SIZE_2M)) {
+ pr_err("node.size(%llx) not aligned to 2M\n",
+ vma->node.size);
+ err = -EINVAL;
+ break;
+ }
+ }
+
+ if (vma->page_sizes.gtt != expected_gtt) {
+ pr_err("gtt=%u, expected=%u, size=%zd, single=%s\n",
+ vma->page_sizes.gtt, expected_gtt,
+ obj->base.size, yesno(!!single));
+ err = -EINVAL;
+ break;
+ }
+
+ if (igt_timeout(end_time,
+ "%s timed out at size %zd\n",
+ __func__, obj->base.size))
+ break;
+
+ single = !single;
+ }
+
+ close_object_list(&objects, ppgtt);
+
+ if (err == -ENOMEM || err == -ENOSPC)
+ err = 0;
+
+ return err;
+}
+
+static int igt_mock_ppgtt_64K(void *arg)
+{
+ struct i915_hw_ppgtt *ppgtt = arg;
+ struct drm_i915_private *i915 = ppgtt->base.i915;
+ struct drm_i915_gem_object *obj;
+ const struct object_info {
+ unsigned int size;
+ unsigned int gtt;
+ unsigned int offset;
+ } objects[] = {
+ /* Cases with forced padding/alignment */
+ {
+ .size = SZ_64K,
+ .gtt = I915_GTT_PAGE_SIZE_64K,
+ .offset = 0,
+ },
+ {
+ .size = SZ_64K + SZ_4K,
+ .gtt = I915_GTT_PAGE_SIZE_4K,
+ .offset = 0,
+ },
+ {
+ .size = SZ_64K - SZ_4K,
+ .gtt = I915_GTT_PAGE_SIZE_4K,
+ .offset = 0,
+ },
+ {
+ .size = SZ_2M,
+ .gtt = I915_GTT_PAGE_SIZE_64K,
+ .offset = 0,
+ },
+ {
+ .size = SZ_2M - SZ_4K,
+ .gtt = I915_GTT_PAGE_SIZE_4K,
+ .offset = 0,
+ },
+ {
+ .size = SZ_2M + SZ_4K,
+ .gtt = I915_GTT_PAGE_SIZE_64K | I915_GTT_PAGE_SIZE_4K,
+ .offset = 0,
+ },
+ {
+ .size = SZ_2M + SZ_64K,
+ .gtt = I915_GTT_PAGE_SIZE_64K,
+ .offset = 0,
+ },
+ {
+ .size = SZ_2M - SZ_64K,
+ .gtt = I915_GTT_PAGE_SIZE_64K,
+ .offset = 0,
+ },
+ /* Try without any forced padding/alignment */
+ {
+ .size = SZ_64K,
+ .offset = SZ_2M,
+ .gtt = I915_GTT_PAGE_SIZE_4K,
+ },
+ {
+ .size = SZ_128K,
+ .offset = SZ_2M - SZ_64K,
+ .gtt = I915_GTT_PAGE_SIZE_4K,
+ },
+ };
+ struct i915_vma *vma;
+ int i, single;
+ int err;
+
+ /*
+ * Sanity check some of the trickiness with 64K pages -- either we can
+ * safely mark the whole page-table(2M block) as 64K, or we have to
+ * always fallback to 4K.
+ */
+
+ if (!HAS_PAGE_SIZES(i915, I915_GTT_PAGE_SIZE_64K))
+ return 0;
+
+ for (i = 0; i < ARRAY_SIZE(objects); ++i) {
+ unsigned int size = objects[i].size;
+ unsigned int expected_gtt = objects[i].gtt;
+ unsigned int offset = objects[i].offset;
+ unsigned int flags = PIN_USER;
+
+ for (single = 0; single <= 1; single++) {
+ obj = fake_huge_pages_object(i915, size, !!single);
+ if (IS_ERR(obj))
+ return PTR_ERR(obj);
+
+ err = i915_gem_object_pin_pages(obj);
+ if (err)
+ goto out_object_put;
+
+ /*
+ * Disable 2M pages -- We only want to use 64K/4K pages
+ * for this test.
+ */
+ obj->mm.page_sizes.sg &= ~I915_GTT_PAGE_SIZE_2M;
+
+ vma = i915_vma_instance(obj, &ppgtt->base, NULL);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ goto out_object_unpin;
+ }
+
+ if (offset)
+ flags |= PIN_OFFSET_FIXED | offset;
+
+ err = i915_vma_pin(vma, 0, 0, flags);
+ if (err)
+ goto out_vma_close;
+
+ err = igt_check_page_sizes(vma);
+ if (err)
+ goto out_vma_unpin;
+
+ if (!offset && vma->page_sizes.sg & I915_GTT_PAGE_SIZE_64K) {
+ if (!IS_ALIGNED(vma->node.start,
+ I915_GTT_PAGE_SIZE_2M)) {
+ pr_err("node.start(%llx) not aligned to 2M\n",
+ vma->node.start);
+ err = -EINVAL;
+ goto out_vma_unpin;
+ }
+
+ if (!IS_ALIGNED(vma->node.size,
+ I915_GTT_PAGE_SIZE_2M)) {
+ pr_err("node.size(%llx) not aligned to 2M\n",
+ vma->node.size);
+ err = -EINVAL;
+ goto out_vma_unpin;
+ }
+ }
+
+ if (vma->page_sizes.gtt != expected_gtt) {
+ pr_err("gtt=%u, expected=%u, i=%d, single=%s\n",
+ vma->page_sizes.gtt, expected_gtt, i,
+ yesno(!!single));
+ err = -EINVAL;
+ goto out_vma_unpin;
+ }
+
+ i915_vma_unpin(vma);
+ i915_vma_close(vma);
+
+ i915_gem_object_unpin_pages(obj);
+ i915_gem_object_put(obj);
+ }
+ }
+
+ return 0;
+
+out_vma_unpin:
+ i915_vma_unpin(vma);
+out_vma_close:
+ i915_vma_close(vma);
+out_object_unpin:
+ i915_gem_object_unpin_pages(obj);
+out_object_put:
+ i915_gem_object_put(obj);
+
+ return err;
+}
+
+static struct i915_vma *
+gpu_write_dw(struct i915_vma *vma, u64 offset, u32 val)
+{
+ struct drm_i915_private *i915 = to_i915(vma->obj->base.dev);
+ const int gen = INTEL_GEN(vma->vm->i915);
+ unsigned int count = vma->size >> PAGE_SHIFT;
+ struct drm_i915_gem_object *obj;
+ struct i915_vma *batch;
+ unsigned int size;
+ u32 *cmd;
+ int n;
+ int err;
+
+ size = (1 + 4 * count) * sizeof(u32);
+ size = round_up(size, PAGE_SIZE);
+ obj = i915_gem_object_create_internal(i915, size);
+ if (IS_ERR(obj))
+ return ERR_CAST(obj);
+
+ cmd = i915_gem_object_pin_map(obj, I915_MAP_WB);
+ if (IS_ERR(cmd)) {
+ err = PTR_ERR(cmd);
+ goto err;
+ }
+
+ offset += vma->node.start;
+
+ for (n = 0; n < count; n++) {
+ if (gen >= 8) {
+ *cmd++ = MI_STORE_DWORD_IMM_GEN4;
+ *cmd++ = lower_32_bits(offset);
+ *cmd++ = upper_32_bits(offset);
+ *cmd++ = val;
+ } else if (gen >= 4) {
+ *cmd++ = MI_STORE_DWORD_IMM_GEN4 |
+ (gen < 6 ? 1 << 22 : 0);
+ *cmd++ = 0;
+ *cmd++ = offset;
+ *cmd++ = val;
+ } else {
+ *cmd++ = MI_STORE_DWORD_IMM | 1 << 22;
+ *cmd++ = offset;
+ *cmd++ = val;
+ }
+
+ offset += PAGE_SIZE;
+ }
+
+ *cmd = MI_BATCH_BUFFER_END;
+
+ i915_gem_object_unpin_map(obj);
+
+ err = i915_gem_object_set_to_gtt_domain(obj, false);
+ if (err)
+ goto err;
+
+ batch = i915_vma_instance(obj, vma->vm, NULL);
+ if (IS_ERR(batch)) {
+ err = PTR_ERR(batch);
+ goto err;
+ }
+
+ err = i915_vma_pin(batch, 0, 0, PIN_USER);
+ if (err)
+ goto err;
+
+ return batch;
+
+err:
+ i915_gem_object_put(obj);
+
+ return ERR_PTR(err);
+}
+
+static int gpu_write(struct i915_vma *vma,
+ struct i915_gem_context *ctx,
+ struct intel_engine_cs *engine,
+ u32 dword,
+ u32 value)
+{
+ struct drm_i915_gem_request *rq;
+ struct i915_vma *batch;
+ int flags = 0;
+ int err;
+
+ GEM_BUG_ON(!intel_engine_can_store_dword(engine));
+
+ err = i915_gem_object_set_to_gtt_domain(vma->obj, true);
+ if (err)
+ return err;
+
+ rq = i915_gem_request_alloc(engine, ctx);
+ if (IS_ERR(rq))
+ return PTR_ERR(rq);
+
+ batch = gpu_write_dw(vma, dword * sizeof(u32), value);
+ if (IS_ERR(batch)) {
+ err = PTR_ERR(batch);
+ goto err_request;
+ }
+
+ i915_vma_move_to_active(batch, rq, 0);
+ i915_gem_object_set_active_reference(batch->obj);
+ i915_vma_unpin(batch);
+ i915_vma_close(batch);
+
+ err = rq->engine->emit_flush(rq, EMIT_INVALIDATE);
+ if (err)
+ goto err_request;
+
+ err = i915_switch_context(rq);
+ if (err)
+ goto err_request;
+
+ err = rq->engine->emit_bb_start(rq,
+ batch->node.start, batch->node.size,
+ flags);
+ if (err)
+ goto err_request;
+
+ i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
+
+ reservation_object_lock(vma->resv, NULL);
+ reservation_object_add_excl_fence(vma->resv, &rq->fence);
+ reservation_object_unlock(vma->resv);
+
+err_request:
+ __i915_add_request(rq, err == 0);
+
+ return err;
+}
+
+static int cpu_check(struct drm_i915_gem_object *obj, u32 dword, u32 val)
+{
+ unsigned int needs_flush;
+ unsigned long n;
+ int err;
+
+ err = i915_gem_obj_prepare_shmem_read(obj, &needs_flush);
+ if (err)
+ return err;
+
+ for (n = 0; n < obj->base.size >> PAGE_SHIFT; ++n) {
+ u32 *ptr = kmap_atomic(i915_gem_object_get_page(obj, n));
+
+ if (needs_flush & CLFLUSH_BEFORE)
+ drm_clflush_virt_range(ptr, PAGE_SIZE);
+
+ if (ptr[dword] != val) {
+ pr_err("n=%lu ptr[%u]=%u, val=%u\n",
+ n, dword, ptr[dword], val);
+ kunmap_atomic(ptr);
+ err = -EINVAL;
+ break;
+ }
+
+ kunmap_atomic(ptr);
+ }
+
+ i915_gem_obj_finish_shmem_access(obj);
+
+ return err;
+}
+
+static int igt_write_huge(struct i915_gem_context *ctx,
+ struct drm_i915_gem_object *obj)
+{
+ struct drm_i915_private *i915 = to_i915(obj->base.dev);
+ struct i915_address_space *vm = ctx->ppgtt ? &ctx->ppgtt->base : &i915->ggtt.base;
+ struct intel_engine_cs *engine;
+ struct i915_vma *vma;
+ unsigned int flags = PIN_USER | PIN_OFFSET_FIXED;
+ unsigned int max_page_size;
+ unsigned int id;
+ u64 max;
+ u64 num;
+ u64 size;
+ int err = 0;
+
+ GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
+
+ size = obj->base.size;
+ if (obj->mm.page_sizes.sg & I915_GTT_PAGE_SIZE_64K)
+ size = round_up(size, I915_GTT_PAGE_SIZE_2M);
+
+ max_page_size = rounddown_pow_of_two(obj->mm.page_sizes.sg);
+ max = div_u64((vm->total - size), max_page_size);
+
+ vma = i915_vma_instance(obj, vm, NULL);
+ if (IS_ERR(vma))
+ return PTR_ERR(vma);
+
+ for_each_engine(engine, i915, id) {
+ IGT_TIMEOUT(end_time);
+
+ if (!intel_engine_can_store_dword(engine)) {
+ pr_info("store-dword-imm not supported on engine=%u\n",
+ id);
+ continue;
+ }
+
+ /*
+ * Try various offsets until we timeout -- we want to avoid
+ * issues hidden by effectively always using offset = 0.
+ */
+ for_each_prime_number_from(num, 0, max) {
+ u64 offset = num * max_page_size;
+ u32 dword;
+
+ err = i915_vma_unbind(vma);
+ if (err)
+ goto out_vma_close;
+
+ err = i915_vma_pin(vma, size, max_page_size, flags | offset);
+ if (err) {
+ /*
+ * The ggtt may have some pages reserved so
+ * refrain from erroring out.
+ */
+ if (err == -ENOSPC && i915_is_ggtt(vm)) {
+ err = 0;
+ continue;
+ }
+
+ goto out_vma_close;
+ }
+
+ err = igt_check_page_sizes(vma);
+ if (err)
+ goto out_vma_unpin;
+
+ dword = offset_in_page(num) / 4;
+
+ err = gpu_write(vma, ctx, engine, dword, num + 1);
+ if (err) {
+ pr_err("gpu-write failed at offset=%llx", offset);
+ goto out_vma_unpin;
+ }
+
+ err = cpu_check(obj, dword, num + 1);
+ if (err) {
+ pr_err("cpu-check failed at offset=%llx", offset);
+ goto out_vma_unpin;
+ }
+
+ i915_vma_unpin(vma);
+
+ if (num > 0 &&
+ igt_timeout(end_time,
+ "%s timed out on engine=%u at offset=%llx, max_page_size=%x\n",
+ __func__, id, offset, max_page_size))
+ break;
+ }
+ }
+
+out_vma_unpin:
+ if (i915_vma_is_pinned(vma))
+ i915_vma_unpin(vma);
+out_vma_close:
+ i915_vma_close(vma);
+
+ return err;
+}
+
+static int igt_ppgtt_exhaust_huge(void *arg)
+{
+ struct i915_gem_context *ctx = arg;
+ struct drm_i915_private *i915 = ctx->i915;
+ unsigned long supported = INTEL_INFO(i915)->page_sizes;
+ static unsigned int pages[ARRAY_SIZE(page_sizes)];
+ struct drm_i915_gem_object *obj;
+ unsigned int size_mask;
+ unsigned int page_mask;
+ int n, i;
+ int err = -ENODEV;
+
+ /*
+ * Sanity check creating objects with a varying mix of page sizes --
+ * ensuring that our writes lands in the right place.
+ */
+
+ n = 0;
+ for_each_set_bit(i, &supported, ilog2(I915_GTT_MAX_PAGE_SIZE) + 1)
+ pages[n++] = BIT(i);
+
+ for (size_mask = 2; size_mask < BIT(n); size_mask++) {
+ unsigned int size = 0;
+
+ for (i = 0; i < n; i++) {
+ if (size_mask & BIT(i))
+ size |= pages[i];
+ }
+
+ /*
+ * For our page mask we want to enumerate all the page-size
+ * combinations which will fit into our chosen object size.
+ */
+ for (page_mask = 2; page_mask <= size_mask; page_mask++) {
+ unsigned int page_sizes = 0;
+
+ for (i = 0; i < n; i++) {
+ if (page_mask & BIT(i))
+ page_sizes |= pages[i];
+ }
+
+ /*
+ * Ensure that we can actually fill the given object
+ * with our chosen page mask.
+ */
+ if (!IS_ALIGNED(size, BIT(__ffs(page_sizes))))
+ continue;
+
+ obj = huge_pages_object(i915, size, page_sizes);
+ if (IS_ERR(obj)) {
+ err = PTR_ERR(obj);
+ goto out_device;
+ }
+
+ err = i915_gem_object_pin_pages(obj);
+ if (err) {
+ i915_gem_object_put(obj);
+
+ if (err == -ENOMEM) {
+ pr_info("unable to get pages, size=%u, pages=%u\n",
+ size, page_sizes);
+ err = 0;
+ break;
+ }
+
+ pr_err("pin_pages failed, size=%u, pages=%u\n",
+ size_mask, page_mask);
+
+ goto out_device;
+ }
+
+ /* Force the page-size for the gtt insertion */
+ obj->mm.page_sizes.sg = page_sizes;
+
+ err = igt_write_huge(ctx, obj);
+ if (err) {
+ pr_err("exhaust write-huge failed with size=%u\n",
+ size);
+ goto out_unpin;
+ }
+
+ i915_gem_object_unpin_pages(obj);
+ i915_gem_object_put(obj);
+ }
+ }
+
+ goto out_device;
+
+out_unpin:
+ i915_gem_object_unpin_pages(obj);
+ i915_gem_object_put(obj);
+out_device:
+ mkwrite_device_info(i915)->page_sizes = supported;
+
+ return err;
+}
+
+static int igt_ppgtt_internal_huge(void *arg)
+{
+ struct i915_gem_context *ctx = arg;
+ struct drm_i915_private *i915 = ctx->i915;
+ struct drm_i915_gem_object *obj;
+ static const unsigned int sizes[] = {
+ SZ_64K,
+ SZ_128K,
+ SZ_256K,
+ SZ_512K,
+ SZ_1M,
+ SZ_2M,
+ };
+ int i;
+ int err;
+
+ /*
+ * Sanity check that the HW uses huge pages correctly through internal
+ * -- ensure that our writes land in the right place.
+ */
+
+ for (i = 0; i < ARRAY_SIZE(sizes); ++i) {
+ unsigned int size = sizes[i];
+
+ obj = i915_gem_object_create_internal(i915, size);
+ if (IS_ERR(obj))
+ return PTR_ERR(obj);
+
+ err = i915_gem_object_pin_pages(obj);
+ if (err)
+ goto out_put;
+
+ if (obj->mm.page_sizes.phys < I915_GTT_PAGE_SIZE_64K) {
+ pr_info("internal unable to allocate huge-page(s) with size=%u\n",
+ size);
+ goto out_unpin;
+ }
+
+ err = igt_write_huge(ctx, obj);
+ if (err) {
+ pr_err("internal write-huge failed with size=%u\n",
+ size);
+ goto out_unpin;
+ }
+
+ i915_gem_object_unpin_pages(obj);
+ i915_gem_object_put(obj);
+ }
+
+ return 0;
+
+out_unpin:
+ i915_gem_object_unpin_pages(obj);
+out_put:
+ i915_gem_object_put(obj);
+
+ return err;
+}
+
+static inline bool igt_can_allocate_thp(struct drm_i915_private *i915)
+{
+ return i915->mm.gemfs && has_transparent_hugepage();
+}
+
+static int igt_ppgtt_gemfs_huge(void *arg)
+{
+ struct i915_gem_context *ctx = arg;
+ struct drm_i915_private *i915 = ctx->i915;
+ struct drm_i915_gem_object *obj;
+ static const unsigned int sizes[] = {
+ SZ_2M,
+ SZ_4M,
+ SZ_8M,
+ SZ_16M,
+ SZ_32M,
+ };
+ int i;
+ int err;
+
+ /*
+ * Sanity check that the HW uses huge pages correctly through gemfs --
+ * ensure that our writes land in the right place.
+ */
+
+ if (!igt_can_allocate_thp(i915)) {
+ pr_info("missing THP support, skipping\n");
+ return 0;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(sizes); ++i) {
+ unsigned int size = sizes[i];
+
+ obj = i915_gem_object_create(i915, size);
+ if (IS_ERR(obj))
+ return PTR_ERR(obj);
+
+ err = i915_gem_object_pin_pages(obj);
+ if (err)
+ goto out_put;
+
+ if (obj->mm.page_sizes.phys < I915_GTT_PAGE_SIZE_2M) {
+ pr_info("finishing test early, gemfs unable to allocate huge-page(s) with size=%u\n",
+ size);
+ goto out_unpin;
+ }
+
+ err = igt_write_huge(ctx, obj);
+ if (err) {
+ pr_err("gemfs write-huge failed with size=%u\n",
+ size);
+ goto out_unpin;
+ }
+
+ i915_gem_object_unpin_pages(obj);
+ i915_gem_object_put(obj);
+ }
+
+ return 0;
+
+out_unpin:
+ i915_gem_object_unpin_pages(obj);
+out_put:
+ i915_gem_object_put(obj);
+
+ return err;
+}
+
+static int igt_ppgtt_pin_update(void *arg)
+{
+ struct i915_gem_context *ctx = arg;
+ struct drm_i915_private *dev_priv = ctx->i915;
+ unsigned long supported = INTEL_INFO(dev_priv)->page_sizes;
+ struct i915_hw_ppgtt *ppgtt = ctx->ppgtt;
+ struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
+ unsigned int flags = PIN_USER | PIN_OFFSET_FIXED;
+ int first, last;
+ int err;
+
+ /*
+ * Make sure there's no funny business when doing a PIN_UPDATE -- in the
+ * past we had a subtle issue with being able to incorrectly do multiple
+ * alloc va ranges on the same object when doing a PIN_UPDATE, which
+ * resulted in some pretty nasty bugs, though only when using
+ * huge-gtt-pages.
+ */
+
+ if (!USES_FULL_48BIT_PPGTT(dev_priv)) {
+ pr_info("48b PPGTT not supported, skipping\n");
+ return 0;
+ }
+
+ first = ilog2(I915_GTT_PAGE_SIZE_64K);
+ last = ilog2(I915_GTT_PAGE_SIZE_2M);
+
+ for_each_set_bit_from(first, &supported, last + 1) {
+ unsigned int page_size = BIT(first);
+
+ obj = i915_gem_object_create_internal(dev_priv, page_size);
+ if (IS_ERR(obj))
+ return PTR_ERR(obj);
+
+ vma = i915_vma_instance(obj, &ppgtt->base, NULL);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ goto out_put;
+ }
+
+ err = i915_vma_pin(vma, SZ_2M, 0, flags);
+ if (err)
+ goto out_close;
+
+ if (vma->page_sizes.sg < page_size) {
+ pr_info("Unable to allocate page-size %x, finishing test early\n",
+ page_size);
+ goto out_unpin;
+ }
+
+ err = igt_check_page_sizes(vma);
+ if (err)
+ goto out_unpin;
+
+ if (vma->page_sizes.gtt != page_size) {
+ dma_addr_t addr = i915_gem_object_get_dma_address(obj, 0);
+
+ /*
+ * The only valid reason for this to ever fail would be
+ * if the dma-mapper screwed us over when we did the
+ * dma_map_sg(), since it has the final say over the dma
+ * address.
+ */
+ if (IS_ALIGNED(addr, page_size)) {
+ pr_err("page_sizes.gtt=%u, expected=%u\n",
+ vma->page_sizes.gtt, page_size);
+ err = -EINVAL;
+ } else {
+ pr_info("dma address misaligned, finishing test early\n");
+ }
+
+ goto out_unpin;
+ }
+
+ err = i915_vma_bind(vma, I915_CACHE_NONE, PIN_UPDATE);
+ if (err)
+ goto out_unpin;
+
+ i915_vma_unpin(vma);
+ i915_vma_close(vma);
+
+ i915_gem_object_put(obj);
+ }
+
+ obj = i915_gem_object_create_internal(dev_priv, PAGE_SIZE);
+ if (IS_ERR(obj))
+ return PTR_ERR(obj);
+
+ vma = i915_vma_instance(obj, &ppgtt->base, NULL);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ goto out_put;
+ }
+
+ err = i915_vma_pin(vma, 0, 0, flags);
+ if (err)
+ goto out_close;
+
+ /*
+ * Make sure we don't end up with something like where the pde is still
+ * pointing to the 2M page, and the pt we just filled-in is dangling --
+ * we can check this by writing to the first page where it would then
+ * land in the now stale 2M page.
+ */
+
+ err = gpu_write(vma, ctx, dev_priv->engine[RCS], 0, 0xdeadbeaf);
+ if (err)
+ goto out_unpin;
+
+ err = cpu_check(obj, 0, 0xdeadbeaf);
+
+out_unpin:
+ i915_vma_unpin(vma);
+out_close:
+ i915_vma_close(vma);
+out_put:
+ i915_gem_object_put(obj);
+
+ return err;
+}
+
+static int igt_tmpfs_fallback(void *arg)
+{
+ struct i915_gem_context *ctx = arg;
+ struct drm_i915_private *i915 = ctx->i915;
+ struct vfsmount *gemfs = i915->mm.gemfs;
+ struct i915_address_space *vm = ctx->ppgtt ? &ctx->ppgtt->base : &i915->ggtt.base;
+ struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
+ u32 *vaddr;
+ int err = 0;
+
+ /*
+ * Make sure that we don't burst into a ball of flames upon falling back
+ * to tmpfs, which we rely on if on the off-chance we encouter a failure
+ * when setting up gemfs.
+ */
+
+ i915->mm.gemfs = NULL;
+
+ obj = i915_gem_object_create(i915, PAGE_SIZE);
+ if (IS_ERR(obj)) {
+ err = PTR_ERR(obj);
+ goto out_restore;
+ }
+
+ vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB);
+ if (IS_ERR(vaddr)) {
+ err = PTR_ERR(vaddr);
+ goto out_put;
+ }
+ *vaddr = 0xdeadbeaf;
+
+ i915_gem_object_unpin_map(obj);
+
+ vma = i915_vma_instance(obj, vm, NULL);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ goto out_put;
+ }
+
+ err = i915_vma_pin(vma, 0, 0, PIN_USER);
+ if (err)
+ goto out_close;
+
+ err = igt_check_page_sizes(vma);
+
+ i915_vma_unpin(vma);
+out_close:
+ i915_vma_close(vma);
+out_put:
+ i915_gem_object_put(obj);
+out_restore:
+ i915->mm.gemfs = gemfs;
+
+ return err;
+}
+
+static int igt_shrink_thp(void *arg)
+{
+ struct i915_gem_context *ctx = arg;
+ struct drm_i915_private *i915 = ctx->i915;
+ struct i915_address_space *vm = ctx->ppgtt ? &ctx->ppgtt->base : &i915->ggtt.base;
+ struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
+ unsigned int flags = PIN_USER;
+ int err;
+
+ /*
+ * Sanity check shrinking huge-paged object -- make sure nothing blows
+ * up.
+ */
+
+ if (!igt_can_allocate_thp(i915)) {
+ pr_info("missing THP support, skipping\n");
+ return 0;
+ }
+
+ obj = i915_gem_object_create(i915, SZ_2M);
+ if (IS_ERR(obj))
+ return PTR_ERR(obj);
+
+ vma = i915_vma_instance(obj, vm, NULL);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ goto out_put;
+ }
+
+ err = i915_vma_pin(vma, 0, 0, flags);
+ if (err)
+ goto out_close;
+
+ if (obj->mm.page_sizes.phys < I915_GTT_PAGE_SIZE_2M) {
+ pr_info("failed to allocate THP, finishing test early\n");
+ goto out_unpin;
+ }
+
+ err = igt_check_page_sizes(vma);
+ if (err)
+ goto out_unpin;
+
+ err = gpu_write(vma, ctx, i915->engine[RCS], 0, 0xdeadbeaf);
+ if (err)
+ goto out_unpin;
+
+ i915_vma_unpin(vma);
+
+ /*
+ * Now that the pages are *unpinned* shrink-all should invoke
+ * shmem to truncate our pages.
+ */
+ i915_gem_shrink_all(i915);
+ if (!IS_ERR_OR_NULL(obj->mm.pages)) {
+ pr_err("shrink-all didn't truncate the pages\n");
+ err = -EINVAL;
+ goto out_close;
+ }
+
+ if (obj->mm.page_sizes.sg || obj->mm.page_sizes.phys) {
+ pr_err("residual page-size bits left\n");
+ err = -EINVAL;
+ goto out_close;
+ }
+
+ err = i915_vma_pin(vma, 0, 0, flags);
+ if (err)
+ goto out_close;
+
+ err = cpu_check(obj, 0, 0xdeadbeaf);
+
+out_unpin:
+ i915_vma_unpin(vma);
+out_close:
+ i915_vma_close(vma);
+out_put:
+ i915_gem_object_put(obj);
+
+ return err;
+}
+
+int i915_gem_huge_page_mock_selftests(void)
+{
+ static const struct i915_subtest tests[] = {
+ SUBTEST(igt_mock_exhaust_device_supported_pages),
+ SUBTEST(igt_mock_ppgtt_misaligned_dma),
+ SUBTEST(igt_mock_ppgtt_huge_fill),
+ SUBTEST(igt_mock_ppgtt_64K),
+ };
+ int saved_ppgtt = i915_modparams.enable_ppgtt;
+ struct drm_i915_private *dev_priv;
+ struct pci_dev *pdev;
+ struct i915_hw_ppgtt *ppgtt;
+ int err;
+
+ dev_priv = mock_gem_device();
+ if (!dev_priv)
+ return -ENOMEM;
+
+ /* Pretend to be a device which supports the 48b PPGTT */
+ i915_modparams.enable_ppgtt = 3;
+
+ pdev = dev_priv->drm.pdev;
+ dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(39));
+
+ mutex_lock(&dev_priv->drm.struct_mutex);
+ ppgtt = i915_ppgtt_create(dev_priv, ERR_PTR(-ENODEV), "mock");
+ if (IS_ERR(ppgtt)) {
+ err = PTR_ERR(ppgtt);
+ goto out_unlock;
+ }
+
+ if (!i915_vm_is_48bit(&ppgtt->base)) {
+ pr_err("failed to create 48b PPGTT\n");
+ err = -EINVAL;
+ goto out_close;
+ }
+
+ /* If we were ever hit this then it's time to mock the 64K scratch */
+ if (!i915_vm_has_scratch_64K(&ppgtt->base)) {
+ pr_err("PPGTT missing 64K scratch page\n");
+ err = -EINVAL;
+ goto out_close;
+ }
+
+ err = i915_subtests(tests, ppgtt);
+
+out_close:
+ i915_ppgtt_close(&ppgtt->base);
+ i915_ppgtt_put(ppgtt);
+
+out_unlock:
+ mutex_unlock(&dev_priv->drm.struct_mutex);
+
+ i915_modparams.enable_ppgtt = saved_ppgtt;
+
+ drm_dev_unref(&dev_priv->drm);
+
+ return err;
+}
+
+int i915_gem_huge_page_live_selftests(struct drm_i915_private *dev_priv)
+{
+ static const struct i915_subtest tests[] = {
+ SUBTEST(igt_shrink_thp),
+ SUBTEST(igt_ppgtt_pin_update),
+ SUBTEST(igt_tmpfs_fallback),
+ SUBTEST(igt_ppgtt_exhaust_huge),
+ SUBTEST(igt_ppgtt_gemfs_huge),
+ SUBTEST(igt_ppgtt_internal_huge),
+ };
+ struct drm_file *file;
+ struct i915_gem_context *ctx;
+ int err;
+
+ if (!USES_PPGTT(dev_priv)) {
+ pr_info("PPGTT not supported, skipping live-selftests\n");
+ return 0;
+ }
+
+ file = mock_file(dev_priv);
+ if (IS_ERR(file))
+ return PTR_ERR(file);
+
+ mutex_lock(&dev_priv->drm.struct_mutex);
+
+ ctx = live_context(dev_priv, file);
+ if (IS_ERR(ctx)) {
+ err = PTR_ERR(ctx);
+ goto out_unlock;
+ }
+
+ err = i915_subtests(tests, ctx);
+
+out_unlock:
+ mutex_unlock(&dev_priv->drm.struct_mutex);
+
+ mock_file_free(dev_priv, file);
+
+ return err;
+}
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/selftests/i915_gem_context.c
index fb0a58fc8348..def5052862ae 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_context.c
@@ -417,7 +417,7 @@ static int fake_aliasing_ppgtt_enable(struct drm_i915_private *i915)
if (err)
return err;
- list_for_each_entry(obj, &i915->mm.bound_list, global_link) {
+ list_for_each_entry(obj, &i915->mm.bound_list, mm.link) {
struct i915_vma *vma;
vma = i915_vma_instance(obj, &i915->ggtt.base, NULL);
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
index 5ea373221f49..f463105ff48d 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
@@ -24,6 +24,9 @@
#include "../i915_selftest.h"
+#include "lib_sw_fence.h"
+#include "mock_context.h"
+#include "mock_drm.h"
#include "mock_gem_device.h"
static int populate_ggtt(struct drm_i915_private *i915)
@@ -47,7 +50,7 @@ static int populate_ggtt(struct drm_i915_private *i915)
if (!list_empty(&i915->mm.unbound_list)) {
size = 0;
- list_for_each_entry(obj, &i915->mm.unbound_list, global_link)
+ list_for_each_entry(obj, &i915->mm.unbound_list, mm.link)
size++;
pr_err("Found %lld objects unbound!\n", size);
@@ -74,10 +77,10 @@ static void cleanup_objects(struct drm_i915_private *i915)
{
struct drm_i915_gem_object *obj, *on;
- list_for_each_entry_safe(obj, on, &i915->mm.unbound_list, global_link)
+ list_for_each_entry_safe(obj, on, &i915->mm.unbound_list, mm.link)
i915_gem_object_put(obj);
- list_for_each_entry_safe(obj, on, &i915->mm.bound_list, global_link)
+ list_for_each_entry_safe(obj, on, &i915->mm.bound_list, mm.link)
i915_gem_object_put(obj);
mutex_unlock(&i915->drm.struct_mutex);
@@ -149,8 +152,6 @@ static int igt_overcommit(void *arg)
goto cleanup;
}
- list_move(&obj->global_link, &i915->mm.unbound_list);
-
vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, 0);
if (!IS_ERR(vma) || PTR_ERR(vma) != -ENOSPC) {
pr_err("Failed to evict+insert, i915_gem_object_ggtt_pin returned err=%d\n", (int)PTR_ERR(vma));
@@ -325,6 +326,148 @@ cleanup:
return err;
}
+static int igt_evict_contexts(void *arg)
+{
+ const u64 PRETEND_GGTT_SIZE = 16ull << 20;
+ struct drm_i915_private *i915 = arg;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ struct reserved {
+ struct drm_mm_node node;
+ struct reserved *next;
+ } *reserved = NULL;
+ struct drm_mm_node hole;
+ unsigned long count;
+ int err;
+
+ /*
+ * The purpose of this test is to verify that we will trigger an
+ * eviction in the GGTT when constructing a request that requires
+ * additional space in the GGTT for pinning the context. This space
+ * is not directly tied to the request so reclaiming it requires
+ * extra work.
+ *
+ * As such this test is only meaningful for full-ppgtt environments
+ * where the GTT space of the request is separate from the GGTT
+ * allocation required to build the request.
+ */
+ if (!USES_FULL_PPGTT(i915))
+ return 0;
+
+ mutex_lock(&i915->drm.struct_mutex);
+
+ /* Reserve a block so that we know we have enough to fit a few rq */
+ memset(&hole, 0, sizeof(hole));
+ err = i915_gem_gtt_insert(&i915->ggtt.base, &hole,
+ PRETEND_GGTT_SIZE, 0, I915_COLOR_UNEVICTABLE,
+ 0, i915->ggtt.base.total,
+ PIN_NOEVICT);
+ if (err)
+ goto out_locked;
+
+ /* Make the GGTT appear small by filling it with unevictable nodes */
+ count = 0;
+ do {
+ struct reserved *r;
+
+ r = kcalloc(1, sizeof(*r), GFP_KERNEL);
+ if (!r) {
+ err = -ENOMEM;
+ goto out_locked;
+ }
+
+ if (i915_gem_gtt_insert(&i915->ggtt.base, &r->node,
+ 1ul << 20, 0, I915_COLOR_UNEVICTABLE,
+ 0, i915->ggtt.base.total,
+ PIN_NOEVICT)) {
+ kfree(r);
+ break;
+ }
+
+ r->next = reserved;
+ reserved = r;
+
+ count++;
+ } while (1);
+ drm_mm_remove_node(&hole);
+ mutex_unlock(&i915->drm.struct_mutex);
+ pr_info("Filled GGTT with %lu 1MiB nodes\n", count);
+
+ /* Overfill the GGTT with context objects and so try to evict one. */
+ for_each_engine(engine, i915, id) {
+ struct i915_sw_fence fence;
+ struct drm_file *file;
+
+ file = mock_file(i915);
+ if (IS_ERR(file))
+ return PTR_ERR(file);
+
+ count = 0;
+ mutex_lock(&i915->drm.struct_mutex);
+ onstack_fence_init(&fence);
+ do {
+ struct drm_i915_gem_request *rq;
+ struct i915_gem_context *ctx;
+
+ ctx = live_context(i915, file);
+ if (!ctx)
+ break;
+
+ /* We will need some GGTT space for the rq's context */
+ igt_evict_ctl.fail_if_busy = true;
+ rq = i915_gem_request_alloc(engine, ctx);
+ igt_evict_ctl.fail_if_busy = false;
+
+ if (IS_ERR(rq)) {
+ /* When full, fail_if_busy will trigger EBUSY */
+ if (PTR_ERR(rq) != -EBUSY) {
+ pr_err("Unexpected error from request alloc (ctx hw id %u, on %s): %d\n",
+ ctx->hw_id, engine->name,
+ (int)PTR_ERR(rq));
+ err = PTR_ERR(rq);
+ }
+ break;
+ }
+
+ /* Keep every request/ctx pinned until we are full */
+ err = i915_sw_fence_await_sw_fence_gfp(&rq->submit,
+ &fence,
+ GFP_KERNEL);
+ if (err < 0)
+ break;
+
+ i915_add_request(rq);
+ count++;
+ err = 0;
+ } while(1);
+ mutex_unlock(&i915->drm.struct_mutex);
+
+ onstack_fence_fini(&fence);
+ pr_info("Submitted %lu contexts/requests on %s\n",
+ count, engine->name);
+
+ mock_file_free(i915, file);
+ if (err)
+ break;
+ }
+
+ mutex_lock(&i915->drm.struct_mutex);
+out_locked:
+ while (reserved) {
+ struct reserved *next = reserved->next;
+
+ drm_mm_remove_node(&reserved->node);
+ kfree(reserved);
+
+ reserved = next;
+ }
+ if (drm_mm_node_allocated(&hole))
+ drm_mm_remove_node(&hole);
+ mutex_unlock(&i915->drm.struct_mutex);
+
+ return err;
+}
+
int i915_gem_evict_mock_selftests(void)
{
static const struct i915_subtest tests[] = {
@@ -348,3 +491,12 @@ int i915_gem_evict_mock_selftests(void)
drm_dev_unref(&i915->drm);
return err;
}
+
+int i915_gem_evict_live_selftests(struct drm_i915_private *i915)
+{
+ static const struct i915_subtest tests[] = {
+ SUBTEST(igt_evict_contexts),
+ };
+
+ return i915_subtests(tests, i915);
+}
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
index 6b132caffa18..9da0c9f99916 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
@@ -39,25 +39,26 @@ static void fake_free_pages(struct drm_i915_gem_object *obj,
kfree(pages);
}
-static struct sg_table *
-fake_get_pages(struct drm_i915_gem_object *obj)
+static int fake_get_pages(struct drm_i915_gem_object *obj)
{
#define GFP (GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY)
#define PFN_BIAS 0x1000
struct sg_table *pages;
struct scatterlist *sg;
+ unsigned int sg_page_sizes;
typeof(obj->base.size) rem;
pages = kmalloc(sizeof(*pages), GFP);
if (!pages)
- return ERR_PTR(-ENOMEM);
+ return -ENOMEM;
rem = round_up(obj->base.size, BIT(31)) >> 31;
if (sg_alloc_table(pages, rem, GFP)) {
kfree(pages);
- return ERR_PTR(-ENOMEM);
+ return -ENOMEM;
}
+ sg_page_sizes = 0;
rem = obj->base.size;
for (sg = pages->sgl; sg; sg = sg_next(sg)) {
unsigned long len = min_t(typeof(rem), rem, BIT(31));
@@ -66,13 +67,17 @@ fake_get_pages(struct drm_i915_gem_object *obj)
sg_set_page(sg, pfn_to_page(PFN_BIAS), len, 0);
sg_dma_address(sg) = page_to_phys(sg_page(sg));
sg_dma_len(sg) = len;
+ sg_page_sizes |= len;
rem -= len;
}
GEM_BUG_ON(rem);
obj->mm.madv = I915_MADV_DONTNEED;
- return pages;
+
+ __i915_gem_object_set_pages(obj, pages, sg_page_sizes);
+
+ return 0;
#undef GFP
}
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_object.c b/drivers/gpu/drm/i915/selftests/i915_gem_object.c
index 8f011c447e41..1b8774a42e48 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_object.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_object.c
@@ -251,14 +251,6 @@ static int check_partial_mapping(struct drm_i915_gem_object *obj,
return PTR_ERR(io);
}
- err = i915_vma_get_fence(vma);
- if (err) {
- pr_err("Failed to get fence for partial view: offset=%lu\n",
- page);
- i915_vma_unpin_iomap(vma);
- return err;
- }
-
iowrite32(page, io + n * PAGE_SIZE/sizeof(*io));
i915_vma_unpin_iomap(vma);
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_request.c b/drivers/gpu/drm/i915/selftests/i915_gem_request.c
index 6664cb2eb0b8..a999161e8db1 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_request.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_request.c
@@ -215,7 +215,9 @@ static int igt_request_rewind(void *arg)
}
i915_gem_request_get(vip);
i915_add_request(vip);
+ rcu_read_lock();
request->engine->submit_request(request);
+ rcu_read_unlock();
mutex_unlock(&i915->drm.struct_mutex);
@@ -418,7 +420,10 @@ static struct i915_vma *empty_batch(struct drm_i915_private *i915)
err = PTR_ERR(cmd);
goto err;
}
+
*cmd = MI_BATCH_BUFFER_END;
+ i915_gem_chipset_flush(i915);
+
i915_gem_object_unpin_map(obj);
err = i915_gem_object_set_to_gtt_domain(obj, false);
@@ -605,8 +610,8 @@ static struct i915_vma *recursive_batch(struct drm_i915_private *i915)
*cmd++ = lower_32_bits(vma->node.start);
}
*cmd++ = MI_BATCH_BUFFER_END; /* terminate early in case of error */
+ i915_gem_chipset_flush(i915);
- wmb();
i915_gem_object_unpin_map(obj);
return vma;
@@ -625,7 +630,7 @@ static int recursive_batch_resolve(struct i915_vma *batch)
return PTR_ERR(cmd);
*cmd = MI_BATCH_BUFFER_END;
- wmb();
+ i915_gem_chipset_flush(batch->vm->i915);
i915_gem_object_unpin_map(batch->obj);
@@ -858,7 +863,8 @@ out_request:
I915_MAP_WC);
if (!IS_ERR(cmd)) {
*cmd = MI_BATCH_BUFFER_END;
- wmb();
+ i915_gem_chipset_flush(i915);
+
i915_gem_object_unpin_map(request[id]->batch->obj);
}
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_timeline.c b/drivers/gpu/drm/i915/selftests/i915_gem_timeline.c
index 7a44dab631b8..4795877abe56 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_timeline.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_timeline.c
@@ -121,7 +121,7 @@ out:
static unsigned int random_engine(struct rnd_state *rnd)
{
- return ((u64)prandom_u32_state(rnd) * I915_NUM_ENGINES) >> 32;
+ return i915_prandom_u32_max_state(I915_NUM_ENGINES, rnd);
}
static int bench_sync(void *arg)
diff --git a/drivers/gpu/drm/i915/selftests/i915_live_selftests.h b/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
index 1519f1b7841b..d7dd98a6acad 100644
--- a/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
+++ b/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
@@ -16,5 +16,7 @@ selftest(objects, i915_gem_object_live_selftests)
selftest(dmabuf, i915_gem_dmabuf_live_selftests)
selftest(coherency, i915_gem_coherency_live_selftests)
selftest(gtt, i915_gem_gtt_live_selftests)
+selftest(evict, i915_gem_evict_live_selftests)
+selftest(hugepages, i915_gem_huge_page_live_selftests)
selftest(contexts, i915_gem_context_live_selftests)
selftest(hangcheck, intel_hangcheck_live_selftests)
diff --git a/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h b/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h
index e5a9e5dcf2f3..19c6fce837df 100644
--- a/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h
+++ b/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h
@@ -22,3 +22,4 @@ selftest(dmabuf, i915_gem_dmabuf_mock_selftests)
selftest(vma, i915_vma_mock_selftests)
selftest(evict, i915_gem_evict_mock_selftests)
selftest(gtt, i915_gem_gtt_mock_selftests)
+selftest(hugepages, i915_gem_huge_page_mock_selftests)
diff --git a/drivers/gpu/drm/i915/selftests/i915_random.c b/drivers/gpu/drm/i915/selftests/i915_random.c
index 222c511bea49..b85872cc7fbe 100644
--- a/drivers/gpu/drm/i915/selftests/i915_random.c
+++ b/drivers/gpu/drm/i915/selftests/i915_random.c
@@ -41,11 +41,6 @@ u64 i915_prandom_u64_state(struct rnd_state *rnd)
return x;
}
-static inline u32 i915_prandom_u32_max_state(u32 ep_ro, struct rnd_state *state)
-{
- return upper_32_bits((u64)prandom_u32_state(state) * ep_ro);
-}
-
void i915_random_reorder(unsigned int *order, unsigned int count,
struct rnd_state *state)
{
diff --git a/drivers/gpu/drm/i915/selftests/i915_random.h b/drivers/gpu/drm/i915/selftests/i915_random.h
index 6c9379871384..7dffedc501ca 100644
--- a/drivers/gpu/drm/i915/selftests/i915_random.h
+++ b/drivers/gpu/drm/i915/selftests/i915_random.h
@@ -43,6 +43,11 @@
u64 i915_prandom_u64_state(struct rnd_state *rnd);
+static inline u32 i915_prandom_u32_max_state(u32 ep_ro, struct rnd_state *state)
+{
+ return upper_32_bits(mul_u32_u32(prandom_u32_state(state), ep_ro));
+}
+
unsigned int *i915_random_order(unsigned int count,
struct rnd_state *state);
void i915_random_reorder(unsigned int *order,
diff --git a/drivers/gpu/drm/i915/selftests/i915_sw_fence.c b/drivers/gpu/drm/i915/selftests/i915_sw_fence.c
index 19d145d6bf52..ea01d0fe3ace 100644
--- a/drivers/gpu/drm/i915/selftests/i915_sw_fence.c
+++ b/drivers/gpu/drm/i915/selftests/i915_sw_fence.c
@@ -24,6 +24,7 @@
#include <linux/completion.h>
#include <linux/delay.h>
+#include <linux/prime_numbers.h>
#include "../i915_selftest.h"
@@ -565,6 +566,46 @@ err_in:
return ret;
}
+static int test_timer(void *arg)
+{
+ unsigned long target, delay;
+ struct timed_fence tf;
+
+ timed_fence_init(&tf, target = jiffies);
+ if (!i915_sw_fence_done(&tf.fence)) {
+ pr_err("Fence with immediate expiration not signaled\n");
+ goto err;
+ }
+ timed_fence_fini(&tf);
+
+ for_each_prime_number(delay, i915_selftest.timeout_jiffies/2) {
+ timed_fence_init(&tf, target = jiffies + delay);
+ if (i915_sw_fence_done(&tf.fence)) {
+ pr_err("Fence with future expiration (%lu jiffies) already signaled\n", delay);
+ goto err;
+ }
+
+ i915_sw_fence_wait(&tf.fence);
+ if (!i915_sw_fence_done(&tf.fence)) {
+ pr_err("Fence not signaled after wait\n");
+ goto err;
+ }
+ if (time_before(jiffies, target)) {
+ pr_err("Fence signaled too early, target=%lu, now=%lu\n",
+ target, jiffies);
+ goto err;
+ }
+
+ timed_fence_fini(&tf);
+ }
+
+ return 0;
+
+err:
+ timed_fence_fini(&tf);
+ return -EINVAL;
+}
+
int i915_sw_fence_mock_selftests(void)
{
static const struct i915_subtest tests[] = {
@@ -576,6 +617,7 @@ int i915_sw_fence_mock_selftests(void)
SUBTEST(test_C_AB),
SUBTEST(test_chain),
SUBTEST(test_ipc),
+ SUBTEST(test_timer),
};
return i915_subtests(tests, NULL);
diff --git a/drivers/gpu/drm/i915/selftests/intel_hangcheck.c b/drivers/gpu/drm/i915/selftests/intel_hangcheck.c
index 02e52a146ed8..71ce06680d66 100644
--- a/drivers/gpu/drm/i915/selftests/intel_hangcheck.c
+++ b/drivers/gpu/drm/i915/selftests/intel_hangcheck.c
@@ -165,6 +165,7 @@ static int emit_recurse_batch(struct hang *h,
*batch++ = lower_32_bits(vma->node.start);
}
*batch++ = MI_BATCH_BUFFER_END; /* not reached */
+ i915_gem_chipset_flush(h->i915);
flags = 0;
if (INTEL_GEN(vm->i915) <= 5)
@@ -231,7 +232,7 @@ static u32 hws_seqno(const struct hang *h,
static void hang_fini(struct hang *h)
{
*h->batch = MI_BATCH_BUFFER_END;
- wmb();
+ i915_gem_chipset_flush(h->i915);
i915_gem_object_unpin_map(h->obj);
i915_gem_object_put(h->obj);
@@ -275,6 +276,8 @@ static int igt_hang_sanitycheck(void *arg)
i915_gem_request_get(rq);
*h.batch = MI_BATCH_BUFFER_END;
+ i915_gem_chipset_flush(i915);
+
__i915_add_request(rq, true);
timeout = i915_wait_request(rq,
@@ -621,7 +624,15 @@ static int igt_wait_reset(void *arg)
__i915_add_request(rq, true);
if (!wait_for_hang(&h, rq)) {
- pr_err("Failed to start request %x\n", rq->fence.seqno);
+ struct drm_printer p = drm_info_printer(i915->drm.dev);
+
+ pr_err("Failed to start request %x, at %x\n",
+ rq->fence.seqno, hws_seqno(&h, rq));
+ intel_engine_dump(rq->engine, &p);
+
+ i915_reset(i915, 0);
+ i915_gem_set_wedged(i915);
+
err = -EIO;
goto out_rq;
}
@@ -708,10 +719,18 @@ static int igt_reset_queue(void *arg)
__i915_add_request(rq, true);
if (!wait_for_hang(&h, prev)) {
- pr_err("Failed to start request %x\n",
- prev->fence.seqno);
+ struct drm_printer p = drm_info_printer(i915->drm.dev);
+
+ pr_err("Failed to start request %x, at %x\n",
+ prev->fence.seqno, hws_seqno(&h, prev));
+ intel_engine_dump(rq->engine, &p);
+
i915_gem_request_put(rq);
i915_gem_request_put(prev);
+
+ i915_reset(i915, 0);
+ i915_gem_set_wedged(i915);
+
err = -EIO;
goto fini;
}
@@ -756,7 +775,7 @@ static int igt_reset_queue(void *arg)
pr_info("%s: Completed %d resets\n", engine->name, count);
*h.batch = MI_BATCH_BUFFER_END;
- wmb();
+ i915_gem_chipset_flush(i915);
i915_gem_request_put(prev);
}
@@ -806,7 +825,15 @@ static int igt_handle_error(void *arg)
__i915_add_request(rq, true);
if (!wait_for_hang(&h, rq)) {
- pr_err("Failed to start request %x\n", rq->fence.seqno);
+ struct drm_printer p = drm_info_printer(i915->drm.dev);
+
+ pr_err("Failed to start request %x, at %x\n",
+ rq->fence.seqno, hws_seqno(&h, rq));
+ intel_engine_dump(rq->engine, &p);
+
+ i915_reset(i915, 0);
+ i915_gem_set_wedged(i915);
+
err = -EIO;
goto err_request;
}
@@ -843,17 +870,24 @@ err_unlock:
int intel_hangcheck_live_selftests(struct drm_i915_private *i915)
{
static const struct i915_subtest tests[] = {
+ SUBTEST(igt_global_reset), /* attempt to recover GPU first */
SUBTEST(igt_hang_sanitycheck),
- SUBTEST(igt_global_reset),
SUBTEST(igt_reset_engine),
SUBTEST(igt_reset_active_engines),
SUBTEST(igt_wait_reset),
SUBTEST(igt_reset_queue),
SUBTEST(igt_handle_error),
};
+ int err;
if (!intel_has_gpu_reset(i915))
return 0;
- return i915_subtests(tests, i915);
+ intel_runtime_pm_get(i915);
+
+ err = i915_subtests(tests, i915);
+
+ intel_runtime_pm_put(i915);
+
+ return err;
}
diff --git a/drivers/gpu/drm/i915/selftests/lib_sw_fence.c b/drivers/gpu/drm/i915/selftests/lib_sw_fence.c
new file mode 100644
index 000000000000..3790fdf44a1a
--- /dev/null
+++ b/drivers/gpu/drm/i915/selftests/lib_sw_fence.c
@@ -0,0 +1,78 @@
+/*
+ * Copyright © 2017 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#include "lib_sw_fence.h"
+
+/* Small library of different fence types useful for writing tests */
+
+static int __i915_sw_fence_call
+nop_fence_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
+{
+ return NOTIFY_DONE;
+}
+
+void __onstack_fence_init(struct i915_sw_fence *fence,
+ const char *name,
+ struct lock_class_key *key)
+{
+ debug_fence_init_onstack(fence);
+
+ __init_waitqueue_head(&fence->wait, name, key);
+ atomic_set(&fence->pending, 1);
+ fence->flags = (unsigned long)nop_fence_notify;
+}
+
+void onstack_fence_fini(struct i915_sw_fence *fence)
+{
+ i915_sw_fence_commit(fence);
+ i915_sw_fence_fini(fence);
+}
+
+static void timed_fence_wake(unsigned long data)
+{
+ struct timed_fence *tf = (struct timed_fence *)data;
+
+ i915_sw_fence_commit(&tf->fence);
+}
+
+void timed_fence_init(struct timed_fence *tf, unsigned long expires)
+{
+ onstack_fence_init(&tf->fence);
+
+ setup_timer_on_stack(&tf->timer, timed_fence_wake, (unsigned long)tf);
+
+ if (time_after(expires, jiffies))
+ mod_timer(&tf->timer, expires);
+ else
+ i915_sw_fence_commit(&tf->fence);
+}
+
+void timed_fence_fini(struct timed_fence *tf)
+{
+ if (del_timer_sync(&tf->timer))
+ i915_sw_fence_commit(&tf->fence);
+
+ destroy_timer_on_stack(&tf->timer);
+ i915_sw_fence_fini(&tf->fence);
+}
diff --git a/drivers/gpu/drm/i915/selftests/lib_sw_fence.h b/drivers/gpu/drm/i915/selftests/lib_sw_fence.h
new file mode 100644
index 000000000000..474aafb92ae1
--- /dev/null
+++ b/drivers/gpu/drm/i915/selftests/lib_sw_fence.h
@@ -0,0 +1,42 @@
+/*
+ * lib_sw_fence.h - library routines for testing N:M synchronisation points
+ *
+ * Copyright (C) 2017 Intel Corporation
+ *
+ * This file is released under the GPLv2.
+ *
+ */
+
+#ifndef _LIB_SW_FENCE_H_
+#define _LIB_SW_FENCE_H_
+
+#include <linux/timer.h>
+
+#include "../i915_sw_fence.h"
+
+#ifdef CONFIG_LOCKDEP
+#define onstack_fence_init(fence) \
+do { \
+ static struct lock_class_key __key; \
+ \
+ __onstack_fence_init((fence), #fence, &__key); \
+} while (0)
+#else
+#define onstack_fence_init(fence) \
+ __onstack_fence_init((fence), NULL, NULL)
+#endif
+
+void __onstack_fence_init(struct i915_sw_fence *fence,
+ const char *name,
+ struct lock_class_key *key);
+void onstack_fence_fini(struct i915_sw_fence *fence);
+
+struct timed_fence {
+ struct i915_sw_fence fence;
+ struct timer_list timer;
+};
+
+void timed_fence_init(struct timed_fence *tf, unsigned long expires);
+void timed_fence_fini(struct timed_fence *tf);
+
+#endif /* _LIB_SW_FENCE_H_ */
diff --git a/drivers/gpu/drm/i915/selftests/mock_context.c b/drivers/gpu/drm/i915/selftests/mock_context.c
index 098ce643ad07..bbf80d42e793 100644
--- a/drivers/gpu/drm/i915/selftests/mock_context.c
+++ b/drivers/gpu/drm/i915/selftests/mock_context.c
@@ -73,11 +73,7 @@ err_put:
void mock_context_close(struct i915_gem_context *ctx)
{
- i915_gem_context_set_closed(ctx);
-
- i915_ppgtt_close(&ctx->ppgtt->base);
-
- i915_gem_context_put(ctx);
+ context_close(ctx);
}
void mock_init_contexts(struct drm_i915_private *i915)
diff --git a/drivers/gpu/drm/i915/selftests/mock_engine.c b/drivers/gpu/drm/i915/selftests/mock_engine.c
index fc0fd7498689..331c2b09869e 100644
--- a/drivers/gpu/drm/i915/selftests/mock_engine.c
+++ b/drivers/gpu/drm/i915/selftests/mock_engine.c
@@ -32,9 +32,9 @@ static struct mock_request *first_request(struct mock_engine *engine)
link);
}
-static void hw_delay_complete(unsigned long data)
+static void hw_delay_complete(struct timer_list *t)
{
- struct mock_engine *engine = (typeof(engine))data;
+ struct mock_engine *engine = from_timer(engine, t, hw_delay);
struct mock_request *request;
spin_lock(&engine->hw_lock);
@@ -161,9 +161,7 @@ struct intel_engine_cs *mock_engine(struct drm_i915_private *i915,
/* fake hw queue */
spin_lock_init(&engine->hw_lock);
- setup_timer(&engine->hw_delay,
- hw_delay_complete,
- (unsigned long)engine);
+ timer_setup(&engine->hw_delay, hw_delay_complete, 0);
INIT_LIST_HEAD(&engine->hw_queue);
return &engine->base;
diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_device.c b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
index 678723430d78..04eb9362f4f8 100644
--- a/drivers/gpu/drm/i915/selftests/mock_gem_device.c
+++ b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
@@ -83,6 +83,8 @@ static void mock_device_release(struct drm_device *dev)
kmem_cache_destroy(i915->vmas);
kmem_cache_destroy(i915->objects);
+ i915_gemfs_fini(i915);
+
drm_dev_fini(&i915->drm);
put_device(&i915->drm.pdev->dev);
}
@@ -146,6 +148,11 @@ struct drm_i915_private *mock_gem_device(void)
dev_set_name(&pdev->dev, "mock");
dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+#if IS_ENABLED(CONFIG_IOMMU_API) && defined(CONFIG_INTEL_IOMMU)
+ /* hack to disable iommu for the fake device; force identity mapping */
+ pdev->dev.archdata.iommu = (void *)-1;
+#endif
+
dev_pm_domain_set(&pdev->dev, &pm_domain);
pm_runtime_enable(&pdev->dev);
pm_runtime_dont_use_autosuspend(&pdev->dev);
@@ -167,6 +174,11 @@ struct drm_i915_private *mock_gem_device(void)
mkwrite_device_info(i915)->gen = -1;
+ mkwrite_device_info(i915)->page_sizes =
+ I915_GTT_PAGE_SIZE_4K |
+ I915_GTT_PAGE_SIZE_64K |
+ I915_GTT_PAGE_SIZE_2M;
+
spin_lock_init(&i915->mm.object_stat_lock);
mock_uncore_init(i915);
@@ -234,8 +246,16 @@ struct drm_i915_private *mock_gem_device(void)
if (!i915->kernel_context)
goto err_engine;
+ i915->preempt_context = mock_context(i915, NULL);
+ if (!i915->preempt_context)
+ goto err_kernel_context;
+
+ WARN_ON(i915_gemfs_init(i915));
+
return i915;
+err_kernel_context:
+ i915_gem_context_put(i915->kernel_context);
err_engine:
for_each_engine(engine, i915, id)
mock_engine_free(engine);
diff --git a/drivers/gpu/drm/i915/selftests/mock_gtt.c b/drivers/gpu/drm/i915/selftests/mock_gtt.c
index f2118cf535a0..336e1afb250f 100644
--- a/drivers/gpu/drm/i915/selftests/mock_gtt.c
+++ b/drivers/gpu/drm/i915/selftests/mock_gtt.c
@@ -43,7 +43,6 @@ static int mock_bind_ppgtt(struct i915_vma *vma,
u32 flags)
{
GEM_BUG_ON(flags & I915_VMA_GLOBAL_BIND);
- vma->pages = vma->obj->mm.pages;
vma->flags |= I915_VMA_LOCAL_BIND;
return 0;
}
@@ -84,6 +83,8 @@ mock_ppgtt(struct drm_i915_private *i915,
ppgtt->base.insert_entries = mock_insert_entries;
ppgtt->base.bind_vma = mock_bind_ppgtt;
ppgtt->base.unbind_vma = mock_unbind_ppgtt;
+ ppgtt->base.set_pages = ppgtt_set_pages;
+ ppgtt->base.clear_pages = clear_pages;
ppgtt->base.cleanup = mock_cleanup;
return ppgtt;
@@ -93,12 +94,6 @@ static int mock_bind_ggtt(struct i915_vma *vma,
enum i915_cache_level cache_level,
u32 flags)
{
- int err;
-
- err = i915_get_ggtt_vma_pages(vma);
- if (err)
- return err;
-
vma->flags |= I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND;
return 0;
}
@@ -124,6 +119,8 @@ void mock_init_ggtt(struct drm_i915_private *i915)
ggtt->base.insert_entries = mock_insert_entries;
ggtt->base.bind_vma = mock_bind_ggtt;
ggtt->base.unbind_vma = mock_unbind_ggtt;
+ ggtt->base.set_pages = ggtt_set_pages;
+ ggtt->base.clear_pages = clear_pages;
ggtt->base.cleanup = mock_cleanup;
i915_address_space_init(&ggtt->base, i915, "global");
diff --git a/drivers/gpu/drm/i915/selftests/scatterlist.c b/drivers/gpu/drm/i915/selftests/scatterlist.c
index 1cc5d2931753..cd6d2a16071f 100644
--- a/drivers/gpu/drm/i915/selftests/scatterlist.c
+++ b/drivers/gpu/drm/i915/selftests/scatterlist.c
@@ -189,6 +189,20 @@ static unsigned int random(unsigned long n,
return 1 + (prandom_u32_state(rnd) % 1024);
}
+static unsigned int random_page_size_pages(unsigned long n,
+ unsigned long count,
+ struct rnd_state *rnd)
+{
+ /* 4K, 64K, 2M */
+ static unsigned int page_count[] = {
+ BIT(12) >> PAGE_SHIFT,
+ BIT(16) >> PAGE_SHIFT,
+ BIT(21) >> PAGE_SHIFT,
+ };
+
+ return page_count[(prandom_u32_state(rnd) % 3)];
+}
+
static inline bool page_contiguous(struct page *first,
struct page *last,
unsigned long npages)
@@ -252,6 +266,7 @@ static const npages_fn_t npages_funcs[] = {
grow,
shrink,
random,
+ random_page_size_pages,
NULL,
};