summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/ttm
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/ttm')
-rw-r--r--drivers/gpu/drm/ttm/Makefile1
-rw-r--r--drivers/gpu/drm/ttm/tests/.kunitconfig4
-rw-r--r--drivers/gpu/drm/ttm/tests/Makefile6
-rw-r--r--drivers/gpu/drm/ttm/tests/ttm_device_test.c212
-rw-r--r--drivers/gpu/drm/ttm/tests/ttm_kunit_helpers.c113
-rw-r--r--drivers/gpu/drm/ttm/tests/ttm_kunit_helpers.h41
-rw-r--r--drivers/gpu/drm/ttm/tests/ttm_pool_test.c437
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c34
-rw-r--r--drivers/gpu/drm/ttm/ttm_resource.c5
9 files changed, 839 insertions, 14 deletions
diff --git a/drivers/gpu/drm/ttm/Makefile b/drivers/gpu/drm/ttm/Makefile
index f906b22959cf..dad298127226 100644
--- a/drivers/gpu/drm/ttm/Makefile
+++ b/drivers/gpu/drm/ttm/Makefile
@@ -8,3 +8,4 @@ ttm-y := ttm_tt.o ttm_bo.o ttm_bo_util.o ttm_bo_vm.o ttm_module.o \
ttm-$(CONFIG_AGP) += ttm_agp_backend.o
obj-$(CONFIG_DRM_TTM) += ttm.o
+obj-$(CONFIG_DRM_TTM_KUNIT_TEST) += tests/
diff --git a/drivers/gpu/drm/ttm/tests/.kunitconfig b/drivers/gpu/drm/ttm/tests/.kunitconfig
new file mode 100644
index 000000000000..75fdce0cd98e
--- /dev/null
+++ b/drivers/gpu/drm/ttm/tests/.kunitconfig
@@ -0,0 +1,4 @@
+CONFIG_KUNIT=y
+CONFIG_DRM=y
+CONFIG_DRM_KUNIT_TEST_HELPERS=y
+CONFIG_DRM_TTM_KUNIT_TEST=y
diff --git a/drivers/gpu/drm/ttm/tests/Makefile b/drivers/gpu/drm/ttm/tests/Makefile
new file mode 100644
index 000000000000..ec87c4fc1ad5
--- /dev/null
+++ b/drivers/gpu/drm/ttm/tests/Makefile
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0 AND MIT
+
+obj-$(CONFIG_DRM_TTM_KUNIT_TEST) += \
+ ttm_device_test.o \
+ ttm_pool_test.o \
+ ttm_kunit_helpers.o
diff --git a/drivers/gpu/drm/ttm/tests/ttm_device_test.c b/drivers/gpu/drm/ttm/tests/ttm_device_test.c
new file mode 100644
index 000000000000..b1b423b68cdf
--- /dev/null
+++ b/drivers/gpu/drm/ttm/tests/ttm_device_test.c
@@ -0,0 +1,212 @@
+// SPDX-License-Identifier: GPL-2.0 AND MIT
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+#include <drm/ttm/ttm_resource.h>
+#include <drm/ttm/ttm_device.h>
+#include <drm/ttm/ttm_placement.h>
+
+#include "ttm_kunit_helpers.h"
+
+struct ttm_device_test_case {
+ const char *description;
+ bool use_dma_alloc;
+ bool use_dma32;
+ bool pools_init_expected;
+};
+
+static void ttm_device_init_basic(struct kunit *test)
+{
+ struct ttm_test_devices *priv = test->priv;
+ struct ttm_device *ttm_dev;
+ struct ttm_resource_manager *ttm_sys_man;
+ int err;
+
+ ttm_dev = kunit_kzalloc(test, sizeof(*ttm_dev), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, ttm_dev);
+
+ err = ttm_device_kunit_init(priv, ttm_dev, false, false);
+ KUNIT_ASSERT_EQ(test, err, 0);
+
+ KUNIT_EXPECT_PTR_EQ(test, ttm_dev->funcs, &ttm_dev_funcs);
+ KUNIT_ASSERT_NOT_NULL(test, ttm_dev->wq);
+ KUNIT_ASSERT_NOT_NULL(test, ttm_dev->man_drv[TTM_PL_SYSTEM]);
+
+ ttm_sys_man = &ttm_dev->sysman;
+ KUNIT_ASSERT_NOT_NULL(test, ttm_sys_man);
+ KUNIT_EXPECT_TRUE(test, ttm_sys_man->use_tt);
+ KUNIT_EXPECT_TRUE(test, ttm_sys_man->use_type);
+ KUNIT_ASSERT_NOT_NULL(test, ttm_sys_man->func);
+
+ KUNIT_EXPECT_PTR_EQ(test, ttm_dev->dev_mapping,
+ priv->drm->anon_inode->i_mapping);
+
+ ttm_device_fini(ttm_dev);
+}
+
+static void ttm_device_init_multiple(struct kunit *test)
+{
+ struct ttm_test_devices *priv = test->priv;
+ struct ttm_device *ttm_devs;
+ unsigned int i, num_dev = 3;
+ int err;
+
+ ttm_devs = kunit_kcalloc(test, num_dev, sizeof(*ttm_devs), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, ttm_devs);
+
+ for (i = 0; i < num_dev; i++) {
+ err = ttm_device_kunit_init(priv, &ttm_devs[i], false, false);
+ KUNIT_ASSERT_EQ(test, err, 0);
+
+ KUNIT_EXPECT_PTR_EQ(test, ttm_devs[i].dev_mapping,
+ priv->drm->anon_inode->i_mapping);
+ KUNIT_ASSERT_NOT_NULL(test, ttm_devs[i].wq);
+ KUNIT_EXPECT_PTR_EQ(test, ttm_devs[i].funcs, &ttm_dev_funcs);
+ KUNIT_ASSERT_NOT_NULL(test, ttm_devs[i].man_drv[TTM_PL_SYSTEM]);
+ }
+
+ KUNIT_ASSERT_EQ(test, list_count_nodes(&ttm_devs[0].device_list), num_dev);
+
+ for (i = 0; i < num_dev; i++)
+ ttm_device_fini(&ttm_devs[i]);
+}
+
+static void ttm_device_fini_basic(struct kunit *test)
+{
+ struct ttm_test_devices *priv = test->priv;
+ struct ttm_device *ttm_dev;
+ struct ttm_resource_manager *man;
+ int err;
+
+ ttm_dev = kunit_kzalloc(test, sizeof(*ttm_dev), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, ttm_dev);
+
+ err = ttm_device_kunit_init(priv, ttm_dev, false, false);
+ KUNIT_ASSERT_EQ(test, err, 0);
+
+ man = ttm_manager_type(ttm_dev, TTM_PL_SYSTEM);
+ KUNIT_ASSERT_NOT_NULL(test, man);
+
+ ttm_device_fini(ttm_dev);
+
+ KUNIT_ASSERT_FALSE(test, man->use_type);
+ KUNIT_ASSERT_TRUE(test, list_empty(&man->lru[0]));
+ KUNIT_ASSERT_NULL(test, ttm_dev->man_drv[TTM_PL_SYSTEM]);
+}
+
+static void ttm_device_init_no_vma_man(struct kunit *test)
+{
+ struct ttm_test_devices *priv = test->priv;
+ struct drm_device *drm = priv->drm;
+ struct ttm_device *ttm_dev;
+ struct drm_vma_offset_manager *vma_man;
+ int err;
+
+ ttm_dev = kunit_kzalloc(test, sizeof(*ttm_dev), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, ttm_dev);
+
+ /* Let's pretend there's no VMA manager allocated */
+ vma_man = drm->vma_offset_manager;
+ drm->vma_offset_manager = NULL;
+
+ err = ttm_device_kunit_init(priv, ttm_dev, false, false);
+ KUNIT_EXPECT_EQ(test, err, -EINVAL);
+
+ /* Bring the manager back for a graceful cleanup */
+ drm->vma_offset_manager = vma_man;
+}
+
+static const struct ttm_device_test_case ttm_device_cases[] = {
+ {
+ .description = "No DMA allocations, no DMA32 required",
+ .use_dma_alloc = false,
+ .use_dma32 = false,
+ .pools_init_expected = false,
+ },
+ {
+ .description = "DMA allocations, DMA32 required",
+ .use_dma_alloc = true,
+ .use_dma32 = true,
+ .pools_init_expected = true,
+ },
+ {
+ .description = "No DMA allocations, DMA32 required",
+ .use_dma_alloc = false,
+ .use_dma32 = true,
+ .pools_init_expected = false,
+ },
+ {
+ .description = "DMA allocations, no DMA32 required",
+ .use_dma_alloc = true,
+ .use_dma32 = false,
+ .pools_init_expected = true,
+ },
+};
+
+static void ttm_device_case_desc(const struct ttm_device_test_case *t, char *desc)
+{
+ strscpy(desc, t->description, KUNIT_PARAM_DESC_SIZE);
+}
+
+KUNIT_ARRAY_PARAM(ttm_device, ttm_device_cases, ttm_device_case_desc);
+
+static void ttm_device_init_pools(struct kunit *test)
+{
+ struct ttm_test_devices *priv = test->priv;
+ const struct ttm_device_test_case *params = test->param_value;
+ struct ttm_device *ttm_dev;
+ struct ttm_pool *pool;
+ struct ttm_pool_type pt;
+ int err;
+
+ ttm_dev = kunit_kzalloc(test, sizeof(*ttm_dev), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, ttm_dev);
+
+ err = ttm_device_kunit_init(priv, ttm_dev,
+ params->use_dma_alloc,
+ params->use_dma32);
+ KUNIT_ASSERT_EQ(test, err, 0);
+
+ pool = &ttm_dev->pool;
+ KUNIT_ASSERT_NOT_NULL(test, pool);
+ KUNIT_EXPECT_PTR_EQ(test, pool->dev, priv->dev);
+ KUNIT_EXPECT_EQ(test, pool->use_dma_alloc, params->use_dma_alloc);
+ KUNIT_EXPECT_EQ(test, pool->use_dma32, params->use_dma32);
+
+ if (params->pools_init_expected) {
+ for (int i = 0; i < TTM_NUM_CACHING_TYPES; ++i) {
+ for (int j = 0; j <= MAX_ORDER; ++j) {
+ pt = pool->caching[i].orders[j];
+ KUNIT_EXPECT_PTR_EQ(test, pt.pool, pool);
+ KUNIT_EXPECT_EQ(test, pt.caching, i);
+ KUNIT_EXPECT_EQ(test, pt.order, j);
+
+ if (params->use_dma_alloc)
+ KUNIT_ASSERT_FALSE(test,
+ list_empty(&pt.pages));
+ }
+ }
+ }
+
+ ttm_device_fini(ttm_dev);
+}
+
+static struct kunit_case ttm_device_test_cases[] = {
+ KUNIT_CASE(ttm_device_init_basic),
+ KUNIT_CASE(ttm_device_init_multiple),
+ KUNIT_CASE(ttm_device_fini_basic),
+ KUNIT_CASE(ttm_device_init_no_vma_man),
+ KUNIT_CASE_PARAM(ttm_device_init_pools, ttm_device_gen_params),
+ {}
+};
+
+static struct kunit_suite ttm_device_test_suite = {
+ .name = "ttm_device",
+ .init = ttm_test_devices_init,
+ .exit = ttm_test_devices_fini,
+ .test_cases = ttm_device_test_cases,
+};
+
+kunit_test_suites(&ttm_device_test_suite);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/ttm/tests/ttm_kunit_helpers.c b/drivers/gpu/drm/ttm/tests/ttm_kunit_helpers.c
new file mode 100644
index 000000000000..81661d8827aa
--- /dev/null
+++ b/drivers/gpu/drm/ttm/tests/ttm_kunit_helpers.c
@@ -0,0 +1,113 @@
+// SPDX-License-Identifier: GPL-2.0 AND MIT
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+#include "ttm_kunit_helpers.h"
+
+struct ttm_device_funcs ttm_dev_funcs = {
+};
+EXPORT_SYMBOL_GPL(ttm_dev_funcs);
+
+int ttm_device_kunit_init(struct ttm_test_devices *priv,
+ struct ttm_device *ttm,
+ bool use_dma_alloc,
+ bool use_dma32)
+{
+ struct drm_device *drm = priv->drm;
+ int err;
+
+ err = ttm_device_init(ttm, &ttm_dev_funcs, drm->dev,
+ drm->anon_inode->i_mapping,
+ drm->vma_offset_manager,
+ use_dma_alloc, use_dma32);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(ttm_device_kunit_init);
+
+struct ttm_buffer_object *ttm_bo_kunit_init(struct kunit *test,
+ struct ttm_test_devices *devs,
+ size_t size)
+{
+ struct drm_gem_object gem_obj = { .size = size };
+ struct ttm_buffer_object *bo;
+
+ bo = kunit_kzalloc(test, sizeof(*bo), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, bo);
+
+ bo->base = gem_obj;
+ bo->bdev = devs->ttm_dev;
+
+ return bo;
+}
+EXPORT_SYMBOL_GPL(ttm_bo_kunit_init);
+
+struct ttm_test_devices *ttm_test_devices_basic(struct kunit *test)
+{
+ struct ttm_test_devices *devs;
+
+ devs = kunit_kzalloc(test, sizeof(*devs), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, devs);
+
+ devs->dev = drm_kunit_helper_alloc_device(test);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, devs->dev);
+
+ devs->drm = __drm_kunit_helper_alloc_drm_device(test, devs->dev,
+ sizeof(*devs->drm), 0,
+ DRIVER_GEM);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, devs->drm);
+
+ return devs;
+}
+EXPORT_SYMBOL_GPL(ttm_test_devices_basic);
+
+struct ttm_test_devices *ttm_test_devices_all(struct kunit *test)
+{
+ struct ttm_test_devices *devs;
+ struct ttm_device *ttm_dev;
+ int err;
+
+ devs = ttm_test_devices_basic(test);
+
+ ttm_dev = kunit_kzalloc(test, sizeof(*ttm_dev), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, ttm_dev);
+
+ err = ttm_device_kunit_init(devs, ttm_dev, false, false);
+ KUNIT_ASSERT_EQ(test, err, 0);
+
+ devs->ttm_dev = ttm_dev;
+
+ return devs;
+}
+EXPORT_SYMBOL_GPL(ttm_test_devices_all);
+
+void ttm_test_devices_put(struct kunit *test, struct ttm_test_devices *devs)
+{
+ if (devs->ttm_dev)
+ ttm_device_fini(devs->ttm_dev);
+
+ drm_kunit_helper_free_device(test, devs->dev);
+}
+EXPORT_SYMBOL_GPL(ttm_test_devices_put);
+
+int ttm_test_devices_init(struct kunit *test)
+{
+ struct ttm_test_devices *priv;
+
+ priv = kunit_kzalloc(test, sizeof(*priv), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, priv);
+
+ priv = ttm_test_devices_basic(test);
+ test->priv = priv;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ttm_test_devices_init);
+
+void ttm_test_devices_fini(struct kunit *test)
+{
+ ttm_test_devices_put(test, test->priv);
+}
+EXPORT_SYMBOL_GPL(ttm_test_devices_fini);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/ttm/tests/ttm_kunit_helpers.h b/drivers/gpu/drm/ttm/tests/ttm_kunit_helpers.h
new file mode 100644
index 000000000000..e261e3660d0b
--- /dev/null
+++ b/drivers/gpu/drm/ttm/tests/ttm_kunit_helpers.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: GPL-2.0 AND MIT */
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+#ifndef TTM_KUNIT_HELPERS_H
+#define TTM_KUNIT_HELPERS_H
+
+#include <drm/drm_drv.h>
+#include <drm/ttm/ttm_device.h>
+#include <drm/ttm/ttm_bo.h>
+
+#include <drm/drm_kunit_helpers.h>
+#include <kunit/test.h>
+
+extern struct ttm_device_funcs ttm_dev_funcs;
+
+struct ttm_test_devices {
+ struct drm_device *drm;
+ struct device *dev;
+ struct ttm_device *ttm_dev;
+};
+
+/* Building blocks for test-specific init functions */
+int ttm_device_kunit_init(struct ttm_test_devices *priv,
+ struct ttm_device *ttm,
+ bool use_dma_alloc,
+ bool use_dma32);
+struct ttm_buffer_object *ttm_bo_kunit_init(struct kunit *test,
+ struct ttm_test_devices *devs,
+ size_t size);
+
+struct ttm_test_devices *ttm_test_devices_basic(struct kunit *test);
+struct ttm_test_devices *ttm_test_devices_all(struct kunit *test);
+
+void ttm_test_devices_put(struct kunit *test, struct ttm_test_devices *devs);
+
+/* Generic init/fini for tests that only need DRM/TTM devices */
+int ttm_test_devices_init(struct kunit *test);
+void ttm_test_devices_fini(struct kunit *test);
+
+#endif // TTM_KUNIT_HELPERS_H
diff --git a/drivers/gpu/drm/ttm/tests/ttm_pool_test.c b/drivers/gpu/drm/ttm/tests/ttm_pool_test.c
new file mode 100644
index 000000000000..2d9cae8cd984
--- /dev/null
+++ b/drivers/gpu/drm/ttm/tests/ttm_pool_test.c
@@ -0,0 +1,437 @@
+// SPDX-License-Identifier: GPL-2.0 AND MIT
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+#include <linux/mm.h>
+
+#include <drm/ttm/ttm_tt.h>
+#include <drm/ttm/ttm_pool.h>
+
+#include "ttm_kunit_helpers.h"
+
+struct ttm_pool_test_case {
+ const char *description;
+ unsigned int order;
+ bool use_dma_alloc;
+};
+
+struct ttm_pool_test_priv {
+ struct ttm_test_devices *devs;
+
+ /* Used to create mock ttm_tts */
+ struct ttm_buffer_object *mock_bo;
+};
+
+static struct ttm_operation_ctx simple_ctx = {
+ .interruptible = true,
+ .no_wait_gpu = false,
+};
+
+static int ttm_pool_test_init(struct kunit *test)
+{
+ struct ttm_pool_test_priv *priv;
+
+ priv = kunit_kzalloc(test, sizeof(*priv), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, priv);
+
+ priv->devs = ttm_test_devices_basic(test);
+ test->priv = priv;
+
+ return 0;
+}
+
+static void ttm_pool_test_fini(struct kunit *test)
+{
+ struct ttm_pool_test_priv *priv = test->priv;
+
+ ttm_test_devices_put(test, priv->devs);
+}
+
+static struct ttm_tt *ttm_tt_kunit_init(struct kunit *test,
+ uint32_t page_flags,
+ enum ttm_caching caching,
+ size_t size)
+{
+ struct ttm_pool_test_priv *priv = test->priv;
+ struct ttm_buffer_object *bo;
+ struct ttm_tt *tt;
+ int err;
+
+ bo = ttm_bo_kunit_init(test, priv->devs, size);
+ KUNIT_ASSERT_NOT_NULL(test, bo);
+ priv->mock_bo = bo;
+
+ tt = kunit_kzalloc(test, sizeof(*tt), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, tt);
+
+ err = ttm_tt_init(tt, priv->mock_bo, page_flags, caching, 0);
+ KUNIT_ASSERT_EQ(test, err, 0);
+
+ return tt;
+}
+
+static struct ttm_pool *ttm_pool_pre_populated(struct kunit *test,
+ size_t size,
+ enum ttm_caching caching)
+{
+ struct ttm_pool_test_priv *priv = test->priv;
+ struct ttm_test_devices *devs = priv->devs;
+ struct ttm_pool *pool;
+ struct ttm_tt *tt;
+ unsigned long order = __fls(size / PAGE_SIZE);
+ int err;
+
+ tt = ttm_tt_kunit_init(test, order, caching, size);
+ KUNIT_ASSERT_NOT_NULL(test, tt);
+
+ pool = kunit_kzalloc(test, sizeof(*pool), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, pool);
+
+ ttm_pool_init(pool, devs->dev, NUMA_NO_NODE, true, false);
+
+ err = ttm_pool_alloc(pool, tt, &simple_ctx);
+ KUNIT_ASSERT_EQ(test, err, 0);
+
+ ttm_pool_free(pool, tt);
+ ttm_tt_fini(tt);
+
+ return pool;
+}
+
+static const struct ttm_pool_test_case ttm_pool_basic_cases[] = {
+ {
+ .description = "One page",
+ .order = 0,
+ },
+ {
+ .description = "More than one page",
+ .order = 2,
+ },
+ {
+ .description = "Above the allocation limit",
+ .order = MAX_ORDER + 1,
+ },
+ {
+ .description = "One page, with coherent DMA mappings enabled",
+ .order = 0,
+ .use_dma_alloc = true,
+ },
+ {
+ .description = "Above the allocation limit, with coherent DMA mappings enabled",
+ .order = MAX_ORDER + 1,
+ .use_dma_alloc = true,
+ },
+};
+
+static void ttm_pool_alloc_case_desc(const struct ttm_pool_test_case *t,
+ char *desc)
+{
+ strscpy(desc, t->description, KUNIT_PARAM_DESC_SIZE);
+}
+
+KUNIT_ARRAY_PARAM(ttm_pool_alloc_basic, ttm_pool_basic_cases,
+ ttm_pool_alloc_case_desc);
+
+static void ttm_pool_alloc_basic(struct kunit *test)
+{
+ struct ttm_pool_test_priv *priv = test->priv;
+ struct ttm_test_devices *devs = priv->devs;
+ const struct ttm_pool_test_case *params = test->param_value;
+ struct ttm_tt *tt;
+ struct ttm_pool *pool;
+ struct page *fst_page, *last_page;
+ enum ttm_caching caching = ttm_uncached;
+ unsigned int expected_num_pages = 1 << params->order;
+ size_t size = expected_num_pages * PAGE_SIZE;
+ int err;
+
+ tt = ttm_tt_kunit_init(test, 0, caching, size);
+ KUNIT_ASSERT_NOT_NULL(test, tt);
+
+ pool = kunit_kzalloc(test, sizeof(*pool), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, pool);
+
+ ttm_pool_init(pool, devs->dev, NUMA_NO_NODE, params->use_dma_alloc,
+ false);
+
+ KUNIT_ASSERT_PTR_EQ(test, pool->dev, devs->dev);
+ KUNIT_ASSERT_EQ(test, pool->nid, NUMA_NO_NODE);
+ KUNIT_ASSERT_EQ(test, pool->use_dma_alloc, params->use_dma_alloc);
+
+ err = ttm_pool_alloc(pool, tt, &simple_ctx);
+ KUNIT_ASSERT_EQ(test, err, 0);
+ KUNIT_ASSERT_EQ(test, tt->num_pages, expected_num_pages);
+
+ fst_page = tt->pages[0];
+ last_page = tt->pages[tt->num_pages - 1];
+
+ if (params->order <= MAX_ORDER) {
+ if (params->use_dma_alloc) {
+ KUNIT_ASSERT_NOT_NULL(test, (void *)fst_page->private);
+ KUNIT_ASSERT_NOT_NULL(test, (void *)last_page->private);
+ } else {
+ KUNIT_ASSERT_EQ(test, fst_page->private, params->order);
+ }
+ } else {
+ if (params->use_dma_alloc) {
+ KUNIT_ASSERT_NOT_NULL(test, (void *)fst_page->private);
+ KUNIT_ASSERT_NULL(test, (void *)last_page->private);
+ } else {
+ /*
+ * We expect to alloc one big block, followed by
+ * order 0 blocks
+ */
+ KUNIT_ASSERT_EQ(test, fst_page->private,
+ min_t(unsigned int, MAX_ORDER,
+ params->order));
+ KUNIT_ASSERT_EQ(test, last_page->private, 0);
+ }
+ }
+
+ ttm_pool_free(pool, tt);
+ ttm_tt_fini(tt);
+ ttm_pool_fini(pool);
+}
+
+static void ttm_pool_alloc_basic_dma_addr(struct kunit *test)
+{
+ struct ttm_pool_test_priv *priv = test->priv;
+ struct ttm_test_devices *devs = priv->devs;
+ const struct ttm_pool_test_case *params = test->param_value;
+ struct ttm_tt *tt;
+ struct ttm_pool *pool;
+ struct ttm_buffer_object *bo;
+ dma_addr_t dma1, dma2;
+ enum ttm_caching caching = ttm_uncached;
+ unsigned int expected_num_pages = 1 << params->order;
+ size_t size = expected_num_pages * PAGE_SIZE;
+ int err;
+
+ tt = kunit_kzalloc(test, sizeof(*tt), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, tt);
+
+ bo = ttm_bo_kunit_init(test, devs, size);
+ KUNIT_ASSERT_NOT_NULL(test, bo);
+
+ err = ttm_sg_tt_init(tt, bo, 0, caching);
+ KUNIT_ASSERT_EQ(test, err, 0);
+
+ pool = kunit_kzalloc(test, sizeof(*pool), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, pool);
+
+ ttm_pool_init(pool, devs->dev, NUMA_NO_NODE, true, false);
+
+ err = ttm_pool_alloc(pool, tt, &simple_ctx);
+ KUNIT_ASSERT_EQ(test, err, 0);
+ KUNIT_ASSERT_EQ(test, tt->num_pages, expected_num_pages);
+
+ dma1 = tt->dma_address[0];
+ dma2 = tt->dma_address[tt->num_pages - 1];
+
+ KUNIT_ASSERT_NOT_NULL(test, (void *)(uintptr_t)dma1);
+ KUNIT_ASSERT_NOT_NULL(test, (void *)(uintptr_t)dma2);
+
+ ttm_pool_free(pool, tt);
+ ttm_tt_fini(tt);
+ ttm_pool_fini(pool);
+}
+
+static void ttm_pool_alloc_order_caching_match(struct kunit *test)
+{
+ struct ttm_tt *tt;
+ struct ttm_pool *pool;
+ struct ttm_pool_type *pt;
+ enum ttm_caching caching = ttm_uncached;
+ unsigned int order = 0;
+ size_t size = PAGE_SIZE;
+ int err;
+
+ pool = ttm_pool_pre_populated(test, size, caching);
+
+ pt = &pool->caching[caching].orders[order];
+ KUNIT_ASSERT_FALSE(test, list_empty(&pt->pages));
+
+ tt = ttm_tt_kunit_init(test, 0, caching, size);
+ KUNIT_ASSERT_NOT_NULL(test, tt);
+
+ err = ttm_pool_alloc(pool, tt, &simple_ctx);
+ KUNIT_ASSERT_EQ(test, err, 0);
+
+ KUNIT_ASSERT_TRUE(test, list_empty(&pt->pages));
+
+ ttm_pool_free(pool, tt);
+ ttm_tt_fini(tt);
+ ttm_pool_fini(pool);
+}
+
+static void ttm_pool_alloc_caching_mismatch(struct kunit *test)
+{
+ struct ttm_tt *tt;
+ struct ttm_pool *pool;
+ struct ttm_pool_type *pt_pool, *pt_tt;
+ enum ttm_caching tt_caching = ttm_uncached;
+ enum ttm_caching pool_caching = ttm_cached;
+ size_t size = PAGE_SIZE;
+ unsigned int order = 0;
+ int err;
+
+ pool = ttm_pool_pre_populated(test, size, pool_caching);
+
+ pt_pool = &pool->caching[pool_caching].orders[order];
+ pt_tt = &pool->caching[tt_caching].orders[order];
+
+ tt = ttm_tt_kunit_init(test, 0, tt_caching, size);
+ KUNIT_ASSERT_NOT_NULL(test, tt);
+
+ KUNIT_ASSERT_FALSE(test, list_empty(&pt_pool->pages));
+ KUNIT_ASSERT_TRUE(test, list_empty(&pt_tt->pages));
+
+ err = ttm_pool_alloc(pool, tt, &simple_ctx);
+ KUNIT_ASSERT_EQ(test, err, 0);
+
+ ttm_pool_free(pool, tt);
+ ttm_tt_fini(tt);
+
+ KUNIT_ASSERT_FALSE(test, list_empty(&pt_pool->pages));
+ KUNIT_ASSERT_FALSE(test, list_empty(&pt_tt->pages));
+
+ ttm_pool_fini(pool);
+}
+
+static void ttm_pool_alloc_order_mismatch(struct kunit *test)
+{
+ struct ttm_tt *tt;
+ struct ttm_pool *pool;
+ struct ttm_pool_type *pt_pool, *pt_tt;
+ enum ttm_caching caching = ttm_uncached;
+ unsigned int order = 2;
+ size_t fst_size = (1 << order) * PAGE_SIZE;
+ size_t snd_size = PAGE_SIZE;
+ int err;
+
+ pool = ttm_pool_pre_populated(test, fst_size, caching);
+
+ pt_pool = &pool->caching[caching].orders[order];
+ pt_tt = &pool->caching[caching].orders[0];
+
+ tt = ttm_tt_kunit_init(test, 0, caching, snd_size);
+ KUNIT_ASSERT_NOT_NULL(test, tt);
+
+ KUNIT_ASSERT_FALSE(test, list_empty(&pt_pool->pages));
+ KUNIT_ASSERT_TRUE(test, list_empty(&pt_tt->pages));
+
+ err = ttm_pool_alloc(pool, tt, &simple_ctx);
+ KUNIT_ASSERT_EQ(test, err, 0);
+
+ ttm_pool_free(pool, tt);
+ ttm_tt_fini(tt);
+
+ KUNIT_ASSERT_FALSE(test, list_empty(&pt_pool->pages));
+ KUNIT_ASSERT_FALSE(test, list_empty(&pt_tt->pages));
+
+ ttm_pool_fini(pool);
+}
+
+static void ttm_pool_free_dma_alloc(struct kunit *test)
+{
+ struct ttm_pool_test_priv *priv = test->priv;
+ struct ttm_test_devices *devs = priv->devs;
+ struct ttm_tt *tt;
+ struct ttm_pool *pool;
+ struct ttm_pool_type *pt;
+ enum ttm_caching caching = ttm_uncached;
+ unsigned int order = 2;
+ size_t size = (1 << order) * PAGE_SIZE;
+
+ tt = ttm_tt_kunit_init(test, 0, caching, size);
+ KUNIT_ASSERT_NOT_NULL(test, tt);
+
+ pool = kunit_kzalloc(test, sizeof(*pool), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, pool);
+
+ ttm_pool_init(pool, devs->dev, NUMA_NO_NODE, true, false);
+ ttm_pool_alloc(pool, tt, &simple_ctx);
+
+ pt = &pool->caching[caching].orders[order];
+ KUNIT_ASSERT_TRUE(test, list_empty(&pt->pages));
+
+ ttm_pool_free(pool, tt);
+ ttm_tt_fini(tt);
+
+ KUNIT_ASSERT_FALSE(test, list_empty(&pt->pages));
+
+ ttm_pool_fini(pool);
+}
+
+static void ttm_pool_free_no_dma_alloc(struct kunit *test)
+{
+ struct ttm_pool_test_priv *priv = test->priv;
+ struct ttm_test_devices *devs = priv->devs;
+ struct ttm_tt *tt;
+ struct ttm_pool *pool;
+ struct ttm_pool_type *pt;
+ enum ttm_caching caching = ttm_uncached;
+ unsigned int order = 2;
+ size_t size = (1 << order) * PAGE_SIZE;
+
+ tt = ttm_tt_kunit_init(test, 0, caching, size);
+ KUNIT_ASSERT_NOT_NULL(test, tt);
+
+ pool = kunit_kzalloc(test, sizeof(*pool), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, pool);
+
+ ttm_pool_init(pool, devs->dev, NUMA_NO_NODE, false, false);
+ ttm_pool_alloc(pool, tt, &simple_ctx);
+
+ pt = &pool->caching[caching].orders[order];
+ KUNIT_ASSERT_TRUE(test, list_is_singular(&pt->pages));
+
+ ttm_pool_free(pool, tt);
+ ttm_tt_fini(tt);
+
+ KUNIT_ASSERT_TRUE(test, list_is_singular(&pt->pages));
+
+ ttm_pool_fini(pool);
+}
+
+static void ttm_pool_fini_basic(struct kunit *test)
+{
+ struct ttm_pool *pool;
+ struct ttm_pool_type *pt;
+ enum ttm_caching caching = ttm_uncached;
+ unsigned int order = 0;
+ size_t size = PAGE_SIZE;
+
+ pool = ttm_pool_pre_populated(test, size, caching);
+ pt = &pool->caching[caching].orders[order];
+
+ KUNIT_ASSERT_FALSE(test, list_empty(&pt->pages));
+
+ ttm_pool_fini(pool);
+
+ KUNIT_ASSERT_TRUE(test, list_empty(&pt->pages));
+}
+
+static struct kunit_case ttm_pool_test_cases[] = {
+ KUNIT_CASE_PARAM(ttm_pool_alloc_basic, ttm_pool_alloc_basic_gen_params),
+ KUNIT_CASE_PARAM(ttm_pool_alloc_basic_dma_addr,
+ ttm_pool_alloc_basic_gen_params),
+ KUNIT_CASE(ttm_pool_alloc_order_caching_match),
+ KUNIT_CASE(ttm_pool_alloc_caching_mismatch),
+ KUNIT_CASE(ttm_pool_alloc_order_mismatch),
+ KUNIT_CASE(ttm_pool_free_dma_alloc),
+ KUNIT_CASE(ttm_pool_free_no_dma_alloc),
+ KUNIT_CASE(ttm_pool_fini_basic),
+ {}
+};
+
+static struct kunit_suite ttm_pool_test_suite = {
+ .name = "ttm_pool",
+ .init = ttm_pool_test_init,
+ .exit = ttm_pool_test_fini,
+ .test_cases = ttm_pool_test_cases,
+};
+
+kunit_test_suites(&ttm_pool_test_suite);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index bd5dae4d1624..e58b7e249816 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -345,6 +345,7 @@ static void ttm_bo_release(struct kref *kref)
if (!dma_resv_test_signaled(bo->base.resv,
DMA_RESV_USAGE_BOOKKEEP) ||
+ (want_init_on_free() && (bo->ttm != NULL)) ||
!dma_resv_trylock(bo->base.resv)) {
/* The BO is not idle, resurrect it for delayed destroy */
ttm_bo_flush_all_fences(bo);
@@ -458,18 +459,18 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo,
goto out;
}
-bounce:
- ret = ttm_bo_handle_move_mem(bo, evict_mem, true, ctx, &hop);
- if (ret == -EMULTIHOP) {
+ do {
+ ret = ttm_bo_handle_move_mem(bo, evict_mem, true, ctx, &hop);
+ if (ret != -EMULTIHOP)
+ break;
+
ret = ttm_bo_bounce_temp_buffer(bo, &evict_mem, ctx, &hop);
- if (ret) {
- if (ret != -ERESTARTSYS && ret != -EINTR)
- pr_err("Buffer eviction failed\n");
- ttm_resource_free(bo, &evict_mem);
- goto out;
- }
- /* try and move to final place now. */
- goto bounce;
+ } while (!ret);
+
+ if (ret) {
+ ttm_resource_free(bo, &evict_mem);
+ if (ret != -ERESTARTSYS && ret != -EINTR)
+ pr_err("Buffer eviction failed\n");
}
out:
return ret;
@@ -517,6 +518,13 @@ static bool ttm_bo_evict_swapout_allowable(struct ttm_buffer_object *bo,
{
bool ret = false;
+ if (bo->pin_count) {
+ *locked = false;
+ if (busy)
+ *busy = false;
+ return false;
+ }
+
if (bo->base.resv == ctx->resv) {
dma_resv_assert_held(bo->base.resv);
if (ctx->allow_res_evict)
@@ -1154,7 +1162,6 @@ int ttm_bo_swapout(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx,
* Move to system cached
*/
if (bo->resource->mem_type != TTM_PL_SYSTEM) {
- struct ttm_operation_ctx ctx = { false, false };
struct ttm_resource *evict_mem;
struct ttm_place hop;
@@ -1164,9 +1171,10 @@ int ttm_bo_swapout(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx,
if (unlikely(ret))
goto out;
- ret = ttm_bo_handle_move_mem(bo, evict_mem, true, &ctx, &hop);
+ ret = ttm_bo_handle_move_mem(bo, evict_mem, true, ctx, &hop);
if (unlikely(ret != 0)) {
WARN(ret == -EMULTIHOP, "Unexpected multihop in swaput - likely driver bug.\n");
+ ttm_resource_free(bo, &evict_mem);
goto out;
}
}
diff --git a/drivers/gpu/drm/ttm/ttm_resource.c b/drivers/gpu/drm/ttm/ttm_resource.c
index 7333f7a87a2f..46ff9c75bb12 100644
--- a/drivers/gpu/drm/ttm/ttm_resource.c
+++ b/drivers/gpu/drm/ttm/ttm_resource.c
@@ -86,6 +86,8 @@ static void ttm_lru_bulk_move_pos_tail(struct ttm_lru_bulk_move_pos *pos,
struct ttm_resource *res)
{
if (pos->last != res) {
+ if (pos->first == res)
+ pos->first = list_next_entry(res, lru);
list_move(&res->lru, &pos->last->lru);
pos->last = res;
}
@@ -111,7 +113,8 @@ static void ttm_lru_bulk_move_del(struct ttm_lru_bulk_move *bulk,
{
struct ttm_lru_bulk_move_pos *pos = ttm_lru_bulk_move_pos(bulk, res);
- if (unlikely(pos->first == res && pos->last == res)) {
+ if (unlikely(WARN_ON(!pos->first || !pos->last) ||
+ (pos->first == res && pos->last == res))) {
pos->first = NULL;
pos->last = NULL;
} else if (pos->first == res) {