summaryrefslogtreecommitdiff
path: root/tools
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2023-04-27 19:08:08 +0300
committerLinus Torvalds <torvalds@linux-foundation.org>2023-04-27 19:08:08 +0300
commit6df969b77ecc2ba21dcd0e57f416e58bec2a5ca1 (patch)
treed6010c95bb9434e718761fe2b8877a86fb3042f5 /tools
parent32f7ad0fbe7521de2a5e8f79c33d46110247fd7c (diff)
parent62e37c86bf0718e1ec0156c7a88a43ced6cdf201 (diff)
downloadlinux-6df969b77ecc2ba21dcd0e57f416e58bec2a5ca1.tar.xz
Merge tag 'for-linus-iommufd' of git://git.kernel.org/pub/scm/linux/kernel/git/jgg/iommufd
Pull iommufd updates from Jason Gunthorpe: "Two series: - Reorganize how the hardware page table objects are managed, particularly their destruction flow. Increase the selftest test coverage in this area by creating a more complete mock iommu driver. This is preparation to add a replace operation for HWPT binding, which is done but waiting for the VFIO parts to complete so there is a user. - Split the iommufd support for "access" to make it two step - allocate an access then link it to an IOAS. Update VFIO and have VFIO always create an access even for the VFIO mdevs that never do DMA. This is also preperation for the replace VFIO series that will allow replace to work on access types as well. Three minor fixes: - Sykzaller found the selftest code didn't check for overflow when processing user VAs - smatch noted a .data item should have been static - Add a selftest that reproduces a syzkaller bug for batch carry already fixed in rc" * tag 'for-linus-iommufd' of git://git.kernel.org/pub/scm/linux/kernel/git/jgg/iommufd: (21 commits) iommufd/selftest: Cover domain unmap with huge pages and access iommufd/selftest: Set varaiable mock_iommu_device storage-class-specifier to static vfio: Check the presence for iommufd callbacks in __vfio_register_dev() vfio/mdev: Uses the vfio emulated iommufd ops set in the mdev sample drivers vfio-iommufd: Make vfio_iommufd_emulated_bind() return iommufd_access ID vfio-iommufd: No need to record iommufd_ctx in vfio_device iommufd: Create access in vfio_iommufd_emulated_bind() iommu/iommufd: Pass iommufd_ctx pointer in iommufd_get_ioas() iommufd/selftest: Catch overflow of uptr and length iommufd/selftest: Add a selftest for iommufd_device_attach() with a hwpt argument iommufd/selftest: Make selftest create a more complete mock device iommufd/selftest: Rename the remaining mock device_id's to stdev_id iommufd/selftest: Rename domain_id to hwpt_id for FIXTURE iommufd_mock_domain iommufd/selftest: Rename domain_id to stdev_id for FIXTURE iommufd_ioas iommufd/selftest: Rename the sefltest 'device_id' to 'stdev_id' iommufd: Make iommufd_hw_pagetable_alloc() do iopt_table_add_domain() iommufd: Move iommufd_device to iommufd_private.h iommufd: Move ioas related HWPT destruction into iommufd_hw_pagetable_destroy() iommufd: Consistently manage hwpt_item iommufd: Add iommufd_lock_obj() around the auto-domains hwpts ...
Diffstat (limited to 'tools')
-rw-r--r--tools/testing/selftests/iommu/iommufd.c104
-rw-r--r--tools/testing/selftests/iommu/iommufd_fail_nth.c38
-rw-r--r--tools/testing/selftests/iommu/iommufd_utils.h16
3 files changed, 97 insertions, 61 deletions
diff --git a/tools/testing/selftests/iommu/iommufd.c b/tools/testing/selftests/iommu/iommufd.c
index fa08209268c4..e4a6b33cfde4 100644
--- a/tools/testing/selftests/iommu/iommufd.c
+++ b/tools/testing/selftests/iommu/iommufd.c
@@ -186,7 +186,8 @@ FIXTURE(iommufd_ioas)
{
int fd;
uint32_t ioas_id;
- uint32_t domain_id;
+ uint32_t stdev_id;
+ uint32_t hwpt_id;
uint64_t base_iova;
};
@@ -212,7 +213,8 @@ FIXTURE_SETUP(iommufd_ioas)
}
for (i = 0; i != variant->mock_domains; i++) {
- test_cmd_mock_domain(self->ioas_id, NULL, &self->domain_id);
+ test_cmd_mock_domain(self->ioas_id, &self->stdev_id,
+ &self->hwpt_id);
self->base_iova = MOCK_APERTURE_START;
}
}
@@ -249,8 +251,8 @@ TEST_F(iommufd_ioas, ioas_auto_destroy)
TEST_F(iommufd_ioas, ioas_destroy)
{
- if (self->domain_id) {
- /* IOAS cannot be freed while a domain is on it */
+ if (self->stdev_id) {
+ /* IOAS cannot be freed while a device has a HWPT using it */
EXPECT_ERRNO(EBUSY,
_test_ioctl_destroy(self->fd, self->ioas_id));
} else {
@@ -259,11 +261,21 @@ TEST_F(iommufd_ioas, ioas_destroy)
}
}
+TEST_F(iommufd_ioas, hwpt_attach)
+{
+ /* Create a device attached directly to a hwpt */
+ if (self->stdev_id) {
+ test_cmd_mock_domain(self->hwpt_id, NULL, NULL);
+ } else {
+ test_err_mock_domain(ENOENT, self->hwpt_id, NULL, NULL);
+ }
+}
+
TEST_F(iommufd_ioas, ioas_area_destroy)
{
/* Adding an area does not change ability to destroy */
test_ioctl_ioas_map_fixed(buffer, PAGE_SIZE, self->base_iova);
- if (self->domain_id)
+ if (self->stdev_id)
EXPECT_ERRNO(EBUSY,
_test_ioctl_destroy(self->fd, self->ioas_id));
else
@@ -382,7 +394,7 @@ TEST_F(iommufd_ioas, area_auto_iova)
for (i = 0; i != 10; i++) {
size_t length = PAGE_SIZE * (i + 1);
- if (self->domain_id) {
+ if (self->stdev_id) {
test_ioctl_ioas_map(buffer, length, &iovas[i]);
} else {
test_ioctl_ioas_map((void *)(1UL << 31), length,
@@ -418,7 +430,7 @@ TEST_F(iommufd_ioas, area_auto_iova)
ioctl(self->fd, IOMMU_IOAS_ALLOW_IOVAS, &allow_cmd));
/* Allocate from an allowed region */
- if (self->domain_id) {
+ if (self->stdev_id) {
ranges[0].start = MOCK_APERTURE_START + PAGE_SIZE;
ranges[0].last = MOCK_APERTURE_START + PAGE_SIZE * 600 - 1;
} else {
@@ -525,7 +537,7 @@ TEST_F(iommufd_ioas, iova_ranges)
/* Range can be read */
ASSERT_EQ(0, ioctl(self->fd, IOMMU_IOAS_IOVA_RANGES, &ranges_cmd));
EXPECT_EQ(1, ranges_cmd.num_iovas);
- if (!self->domain_id) {
+ if (!self->stdev_id) {
EXPECT_EQ(0, ranges[0].start);
EXPECT_EQ(SIZE_MAX, ranges[0].last);
EXPECT_EQ(1, ranges_cmd.out_iova_alignment);
@@ -550,7 +562,7 @@ TEST_F(iommufd_ioas, iova_ranges)
&test_cmd));
ranges_cmd.num_iovas = BUFFER_SIZE / sizeof(*ranges);
ASSERT_EQ(0, ioctl(self->fd, IOMMU_IOAS_IOVA_RANGES, &ranges_cmd));
- if (!self->domain_id) {
+ if (!self->stdev_id) {
EXPECT_EQ(2, ranges_cmd.num_iovas);
EXPECT_EQ(0, ranges[0].start);
EXPECT_EQ(PAGE_SIZE - 1, ranges[0].last);
@@ -565,7 +577,7 @@ TEST_F(iommufd_ioas, iova_ranges)
/* Buffer too small */
memset(ranges, 0, BUFFER_SIZE);
ranges_cmd.num_iovas = 1;
- if (!self->domain_id) {
+ if (!self->stdev_id) {
EXPECT_ERRNO(EMSGSIZE, ioctl(self->fd, IOMMU_IOAS_IOVA_RANGES,
&ranges_cmd));
EXPECT_EQ(2, ranges_cmd.num_iovas);
@@ -582,6 +594,40 @@ TEST_F(iommufd_ioas, iova_ranges)
EXPECT_EQ(0, ranges[1].last);
}
+TEST_F(iommufd_ioas, access_domain_destory)
+{
+ struct iommu_test_cmd access_cmd = {
+ .size = sizeof(access_cmd),
+ .op = IOMMU_TEST_OP_ACCESS_PAGES,
+ .access_pages = { .iova = self->base_iova + PAGE_SIZE,
+ .length = PAGE_SIZE},
+ };
+ size_t buf_size = 2 * HUGEPAGE_SIZE;
+ uint8_t *buf;
+
+ buf = mmap(0, buf_size, PROT_READ | PROT_WRITE,
+ MAP_SHARED | MAP_ANONYMOUS | MAP_HUGETLB | MAP_POPULATE, -1,
+ 0);
+ ASSERT_NE(MAP_FAILED, buf);
+ test_ioctl_ioas_map_fixed(buf, buf_size, self->base_iova);
+
+ test_cmd_create_access(self->ioas_id, &access_cmd.id,
+ MOCK_FLAGS_ACCESS_CREATE_NEEDS_PIN_PAGES);
+ access_cmd.access_pages.uptr = (uintptr_t)buf + PAGE_SIZE;
+ ASSERT_EQ(0,
+ ioctl(self->fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_PAGES),
+ &access_cmd));
+
+ /* Causes a complicated unpin across a huge page boundary */
+ if (self->stdev_id)
+ test_ioctl_destroy(self->stdev_id);
+
+ test_cmd_destroy_access_pages(
+ access_cmd.id, access_cmd.access_pages.out_access_pages_id);
+ test_cmd_destroy_access(access_cmd.id);
+ ASSERT_EQ(0, munmap(buf, buf_size));
+}
+
TEST_F(iommufd_ioas, access_pin)
{
struct iommu_test_cmd access_cmd = {
@@ -605,7 +651,7 @@ TEST_F(iommufd_ioas, access_pin)
MOCK_FLAGS_ACCESS_CREATE_NEEDS_PIN_PAGES);
for (npages = 1; npages < BUFFER_SIZE / PAGE_SIZE; npages++) {
- uint32_t mock_device_id;
+ uint32_t mock_stdev_id;
uint32_t mock_hwpt_id;
access_cmd.access_pages.length = npages * PAGE_SIZE;
@@ -637,15 +683,14 @@ TEST_F(iommufd_ioas, access_pin)
ASSERT_EQ(0, ioctl(self->fd,
_IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_PAGES),
&access_cmd));
- test_cmd_mock_domain(self->ioas_id, &mock_device_id,
+ test_cmd_mock_domain(self->ioas_id, &mock_stdev_id,
&mock_hwpt_id);
check_map_cmd.id = mock_hwpt_id;
ASSERT_EQ(0, ioctl(self->fd,
_IOMMU_TEST_CMD(IOMMU_TEST_OP_MD_CHECK_MAP),
&check_map_cmd));
- test_ioctl_destroy(mock_device_id);
- test_ioctl_destroy(mock_hwpt_id);
+ test_ioctl_destroy(mock_stdev_id);
test_cmd_destroy_access_pages(
access_cmd.id,
access_cmd.access_pages.out_access_pages_id);
@@ -789,7 +834,7 @@ TEST_F(iommufd_ioas, fork_gone)
ASSERT_NE(-1, child);
ASSERT_EQ(child, waitpid(child, NULL, 0));
- if (self->domain_id) {
+ if (self->stdev_id) {
/*
* If a domain already existed then everything was pinned within
* the fork, so this copies from one domain to another.
@@ -988,8 +1033,8 @@ FIXTURE(iommufd_mock_domain)
{
int fd;
uint32_t ioas_id;
- uint32_t domain_id;
- uint32_t domain_ids[2];
+ uint32_t hwpt_id;
+ uint32_t hwpt_ids[2];
int mmap_flags;
size_t mmap_buf_size;
};
@@ -1008,11 +1053,11 @@ FIXTURE_SETUP(iommufd_mock_domain)
ASSERT_NE(-1, self->fd);
test_ioctl_ioas_alloc(&self->ioas_id);
- ASSERT_GE(ARRAY_SIZE(self->domain_ids), variant->mock_domains);
+ ASSERT_GE(ARRAY_SIZE(self->hwpt_ids), variant->mock_domains);
for (i = 0; i != variant->mock_domains; i++)
- test_cmd_mock_domain(self->ioas_id, NULL, &self->domain_ids[i]);
- self->domain_id = self->domain_ids[0];
+ test_cmd_mock_domain(self->ioas_id, NULL, &self->hwpt_ids[i]);
+ self->hwpt_id = self->hwpt_ids[0];
self->mmap_flags = MAP_SHARED | MAP_ANONYMOUS;
self->mmap_buf_size = PAGE_SIZE * 8;
@@ -1061,7 +1106,7 @@ FIXTURE_VARIANT_ADD(iommufd_mock_domain, two_domains_hugepage)
struct iommu_test_cmd check_map_cmd = { \
.size = sizeof(check_map_cmd), \
.op = IOMMU_TEST_OP_MD_CHECK_MAP, \
- .id = self->domain_id, \
+ .id = self->hwpt_id, \
.check_map = { .iova = _iova, \
.length = _length, \
.uptr = (uintptr_t)(_ptr) }, \
@@ -1070,8 +1115,8 @@ FIXTURE_VARIANT_ADD(iommufd_mock_domain, two_domains_hugepage)
ioctl(self->fd, \
_IOMMU_TEST_CMD(IOMMU_TEST_OP_MD_CHECK_MAP), \
&check_map_cmd)); \
- if (self->domain_ids[1]) { \
- check_map_cmd.id = self->domain_ids[1]; \
+ if (self->hwpt_ids[1]) { \
+ check_map_cmd.id = self->hwpt_ids[1]; \
ASSERT_EQ(0, \
ioctl(self->fd, \
_IOMMU_TEST_CMD( \
@@ -1197,15 +1242,15 @@ TEST_F(iommufd_mock_domain, all_aligns_copy)
for (; end < buf_size; end += MOCK_PAGE_SIZE) {
size_t length = end - start;
unsigned int old_id;
- uint32_t mock_device_id;
+ uint32_t mock_stdev_id;
__u64 iova;
test_ioctl_ioas_map(buf + start, length, &iova);
/* Add and destroy a domain while the area exists */
- old_id = self->domain_ids[1];
- test_cmd_mock_domain(self->ioas_id, &mock_device_id,
- &self->domain_ids[1]);
+ old_id = self->hwpt_ids[1];
+ test_cmd_mock_domain(self->ioas_id, &mock_stdev_id,
+ &self->hwpt_ids[1]);
check_mock_iova(buf + start, iova, length);
check_refs(buf + start / PAGE_SIZE * PAGE_SIZE,
@@ -1213,9 +1258,8 @@ TEST_F(iommufd_mock_domain, all_aligns_copy)
start / PAGE_SIZE * PAGE_SIZE,
1);
- test_ioctl_destroy(mock_device_id);
- test_ioctl_destroy(self->domain_ids[1]);
- self->domain_ids[1] = old_id;
+ test_ioctl_destroy(mock_stdev_id);
+ self->hwpt_ids[1] = old_id;
test_ioctl_ioas_unmap(iova, length);
}
diff --git a/tools/testing/selftests/iommu/iommufd_fail_nth.c b/tools/testing/selftests/iommu/iommufd_fail_nth.c
index 9713111b820d..d9afcb23810e 100644
--- a/tools/testing/selftests/iommu/iommufd_fail_nth.c
+++ b/tools/testing/selftests/iommu/iommufd_fail_nth.c
@@ -297,7 +297,7 @@ TEST_FAIL_NTH(basic_fail_nth, basic)
TEST_FAIL_NTH(basic_fail_nth, map_domain)
{
uint32_t ioas_id;
- __u32 device_id;
+ __u32 stdev_id;
__u32 hwpt_id;
__u64 iova;
@@ -313,7 +313,7 @@ TEST_FAIL_NTH(basic_fail_nth, map_domain)
fail_nth_enable();
- if (_test_cmd_mock_domain(self->fd, ioas_id, &device_id, &hwpt_id))
+ if (_test_cmd_mock_domain(self->fd, ioas_id, &stdev_id, &hwpt_id))
return -1;
if (_test_ioctl_ioas_map(self->fd, ioas_id, buffer, 262144, &iova,
@@ -321,12 +321,10 @@ TEST_FAIL_NTH(basic_fail_nth, map_domain)
IOMMU_IOAS_MAP_READABLE))
return -1;
- if (_test_ioctl_destroy(self->fd, device_id))
- return -1;
- if (_test_ioctl_destroy(self->fd, hwpt_id))
+ if (_test_ioctl_destroy(self->fd, stdev_id))
return -1;
- if (_test_cmd_mock_domain(self->fd, ioas_id, &device_id, &hwpt_id))
+ if (_test_cmd_mock_domain(self->fd, ioas_id, &stdev_id, &hwpt_id))
return -1;
return 0;
}
@@ -334,8 +332,8 @@ TEST_FAIL_NTH(basic_fail_nth, map_domain)
TEST_FAIL_NTH(basic_fail_nth, map_two_domains)
{
uint32_t ioas_id;
- __u32 device_id2;
- __u32 device_id;
+ __u32 stdev_id2;
+ __u32 stdev_id;
__u32 hwpt_id2;
__u32 hwpt_id;
__u64 iova;
@@ -350,12 +348,12 @@ TEST_FAIL_NTH(basic_fail_nth, map_two_domains)
if (_test_ioctl_set_temp_memory_limit(self->fd, 32))
return -1;
- if (_test_cmd_mock_domain(self->fd, ioas_id, &device_id, &hwpt_id))
+ if (_test_cmd_mock_domain(self->fd, ioas_id, &stdev_id, &hwpt_id))
return -1;
fail_nth_enable();
- if (_test_cmd_mock_domain(self->fd, ioas_id, &device_id2, &hwpt_id2))
+ if (_test_cmd_mock_domain(self->fd, ioas_id, &stdev_id2, &hwpt_id2))
return -1;
if (_test_ioctl_ioas_map(self->fd, ioas_id, buffer, 262144, &iova,
@@ -363,19 +361,15 @@ TEST_FAIL_NTH(basic_fail_nth, map_two_domains)
IOMMU_IOAS_MAP_READABLE))
return -1;
- if (_test_ioctl_destroy(self->fd, device_id))
- return -1;
- if (_test_ioctl_destroy(self->fd, hwpt_id))
+ if (_test_ioctl_destroy(self->fd, stdev_id))
return -1;
- if (_test_ioctl_destroy(self->fd, device_id2))
- return -1;
- if (_test_ioctl_destroy(self->fd, hwpt_id2))
+ if (_test_ioctl_destroy(self->fd, stdev_id2))
return -1;
- if (_test_cmd_mock_domain(self->fd, ioas_id, &device_id, &hwpt_id))
+ if (_test_cmd_mock_domain(self->fd, ioas_id, &stdev_id, &hwpt_id))
return -1;
- if (_test_cmd_mock_domain(self->fd, ioas_id, &device_id2, &hwpt_id2))
+ if (_test_cmd_mock_domain(self->fd, ioas_id, &stdev_id2, &hwpt_id2))
return -1;
return 0;
}
@@ -518,7 +512,7 @@ TEST_FAIL_NTH(basic_fail_nth, access_pin_domain)
{
uint32_t access_pages_id;
uint32_t ioas_id;
- __u32 device_id;
+ __u32 stdev_id;
__u32 hwpt_id;
__u64 iova;
@@ -532,7 +526,7 @@ TEST_FAIL_NTH(basic_fail_nth, access_pin_domain)
if (_test_ioctl_set_temp_memory_limit(self->fd, 32))
return -1;
- if (_test_cmd_mock_domain(self->fd, ioas_id, &device_id, &hwpt_id))
+ if (_test_cmd_mock_domain(self->fd, ioas_id, &stdev_id, &hwpt_id))
return -1;
if (_test_ioctl_ioas_map(self->fd, ioas_id, buffer, BUFFER_SIZE, &iova,
@@ -570,9 +564,7 @@ TEST_FAIL_NTH(basic_fail_nth, access_pin_domain)
return -1;
self->access_id = 0;
- if (_test_ioctl_destroy(self->fd, device_id))
- return -1;
- if (_test_ioctl_destroy(self->fd, hwpt_id))
+ if (_test_ioctl_destroy(self->fd, stdev_id))
return -1;
return 0;
}
diff --git a/tools/testing/selftests/iommu/iommufd_utils.h b/tools/testing/selftests/iommu/iommufd_utils.h
index 0d1f46369c2a..85d6662ef8e8 100644
--- a/tools/testing/selftests/iommu/iommufd_utils.h
+++ b/tools/testing/selftests/iommu/iommufd_utils.h
@@ -38,7 +38,7 @@ static unsigned long BUFFER_SIZE;
&test_cmd)); \
})
-static int _test_cmd_mock_domain(int fd, unsigned int ioas_id, __u32 *device_id,
+static int _test_cmd_mock_domain(int fd, unsigned int ioas_id, __u32 *stdev_id,
__u32 *hwpt_id)
{
struct iommu_test_cmd cmd = {
@@ -52,19 +52,19 @@ static int _test_cmd_mock_domain(int fd, unsigned int ioas_id, __u32 *device_id,
ret = ioctl(fd, IOMMU_TEST_CMD, &cmd);
if (ret)
return ret;
- if (device_id)
- *device_id = cmd.mock_domain.out_device_id;
+ if (stdev_id)
+ *stdev_id = cmd.mock_domain.out_stdev_id;
assert(cmd.id != 0);
if (hwpt_id)
*hwpt_id = cmd.mock_domain.out_hwpt_id;
return 0;
}
-#define test_cmd_mock_domain(ioas_id, device_id, hwpt_id) \
- ASSERT_EQ(0, _test_cmd_mock_domain(self->fd, ioas_id, device_id, \
- hwpt_id))
-#define test_err_mock_domain(_errno, ioas_id, device_id, hwpt_id) \
+#define test_cmd_mock_domain(ioas_id, stdev_id, hwpt_id) \
+ ASSERT_EQ(0, \
+ _test_cmd_mock_domain(self->fd, ioas_id, stdev_id, hwpt_id))
+#define test_err_mock_domain(_errno, ioas_id, stdev_id, hwpt_id) \
EXPECT_ERRNO(_errno, _test_cmd_mock_domain(self->fd, ioas_id, \
- device_id, hwpt_id))
+ stdev_id, hwpt_id))
static int _test_cmd_create_access(int fd, unsigned int ioas_id,
__u32 *access_id, unsigned int flags)