summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--drivers/iommu/Kconfig2
-rw-r--r--drivers/iommu/arm/arm-smmu-v3/Makefile3
-rw-r--r--drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c1
-rw-r--r--drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-test.c3
-rw-r--r--drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c8
5 files changed, 15 insertions, 2 deletions
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig
index 66325210c8c9..c04584be3089 100644
--- a/drivers/iommu/Kconfig
+++ b/drivers/iommu/Kconfig
@@ -415,7 +415,7 @@ config ARM_SMMU_V3_SVA
and PRI.
config ARM_SMMU_V3_KUNIT_TEST
- bool "KUnit tests for arm-smmu-v3 driver" if !KUNIT_ALL_TESTS
+ tristate "KUnit tests for arm-smmu-v3 driver" if !KUNIT_ALL_TESTS
depends on KUNIT
depends on ARM_SMMU_V3_SVA
default KUNIT_ALL_TESTS
diff --git a/drivers/iommu/arm/arm-smmu-v3/Makefile b/drivers/iommu/arm/arm-smmu-v3/Makefile
index 0b97054b3929..014a997753a8 100644
--- a/drivers/iommu/arm/arm-smmu-v3/Makefile
+++ b/drivers/iommu/arm/arm-smmu-v3/Makefile
@@ -2,5 +2,6 @@
obj-$(CONFIG_ARM_SMMU_V3) += arm_smmu_v3.o
arm_smmu_v3-objs-y += arm-smmu-v3.o
arm_smmu_v3-objs-$(CONFIG_ARM_SMMU_V3_SVA) += arm-smmu-v3-sva.o
-arm_smmu_v3-objs-$(CONFIG_ARM_SMMU_V3_KUNIT_TEST) += arm-smmu-v3-test.o
arm_smmu_v3-objs := $(arm_smmu_v3-objs-y)
+
+obj-$(CONFIG_ARM_SMMU_V3_KUNIT_TEST) += arm-smmu-v3-test.o
diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c
index 34a977a0767d..e490ffb38015 100644
--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c
@@ -185,6 +185,7 @@ void arm_smmu_make_sva_cd(struct arm_smmu_cd *target,
*/
target->data[3] = cpu_to_le64(read_sysreg(mair_el1));
}
+EXPORT_SYMBOL_IF_KUNIT(arm_smmu_make_sva_cd);
static struct arm_smmu_ctx_desc *arm_smmu_alloc_shared_cd(struct mm_struct *mm)
{
diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-test.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-test.c
index 417804392ff0..315e487fd990 100644
--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-test.c
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-test.c
@@ -463,3 +463,6 @@ static struct kunit_suite arm_smmu_v3_test_module = {
.test_cases = arm_smmu_v3_test_cases,
};
kunit_test_suites(&arm_smmu_v3_test_module);
+
+MODULE_IMPORT_NS(EXPORTED_FOR_KUNIT_TESTING);
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
index 1ad0937760c6..aa62f0ecd053 100644
--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
@@ -1007,6 +1007,7 @@ void arm_smmu_get_ste_used(const __le64 *ent, __le64 *used_bits)
if (cfg == STRTAB_STE_0_CFG_BYPASS)
used_bits[1] |= cpu_to_le64(STRTAB_STE_1_SHCFG);
}
+EXPORT_SYMBOL_IF_KUNIT(arm_smmu_get_ste_used);
/*
* Figure out if we can do a hitless update of entry to become target. Returns a
@@ -1141,6 +1142,7 @@ void arm_smmu_write_entry(struct arm_smmu_entry_writer *writer, __le64 *entry,
entry_set(writer, entry, target, 0, NUM_ENTRY_QWORDS));
}
}
+EXPORT_SYMBOL_IF_KUNIT(arm_smmu_write_entry);
static void arm_smmu_sync_cd(struct arm_smmu_master *master,
int ssid, bool leaf)
@@ -1268,6 +1270,7 @@ void arm_smmu_get_cd_used(const __le64 *ent, __le64 *used_bits)
used_bits[1] &= ~cpu_to_le64(CTXDESC_CD_1_TTB0_MASK);
}
}
+EXPORT_SYMBOL_IF_KUNIT(arm_smmu_get_cd_used);
static void arm_smmu_cd_writer_sync_entry(struct arm_smmu_entry_writer *writer)
{
@@ -1332,6 +1335,7 @@ void arm_smmu_make_s1_cd(struct arm_smmu_cd *target,
CTXDESC_CD_1_TTB0_MASK);
target->data[3] = cpu_to_le64(pgtbl_cfg->arm_lpae_s1_cfg.mair);
}
+EXPORT_SYMBOL_IF_KUNIT(arm_smmu_make_s1_cd);
void arm_smmu_clear_cd(struct arm_smmu_master *master, ioasid_t ssid)
{
@@ -1515,6 +1519,7 @@ void arm_smmu_make_abort_ste(struct arm_smmu_ste *target)
STRTAB_STE_0_V |
FIELD_PREP(STRTAB_STE_0_CFG, STRTAB_STE_0_CFG_ABORT));
}
+EXPORT_SYMBOL_IF_KUNIT(arm_smmu_make_abort_ste);
VISIBLE_IF_KUNIT
void arm_smmu_make_bypass_ste(struct arm_smmu_device *smmu,
@@ -1529,6 +1534,7 @@ void arm_smmu_make_bypass_ste(struct arm_smmu_device *smmu,
target->data[1] = cpu_to_le64(FIELD_PREP(STRTAB_STE_1_SHCFG,
STRTAB_STE_1_SHCFG_INCOMING));
}
+EXPORT_SYMBOL_IF_KUNIT(arm_smmu_make_bypass_ste);
VISIBLE_IF_KUNIT
void arm_smmu_make_cdtable_ste(struct arm_smmu_ste *target,
@@ -1580,6 +1586,7 @@ void arm_smmu_make_cdtable_ste(struct arm_smmu_ste *target,
cpu_to_le64(FIELD_PREP(STRTAB_STE_2_S2VMID, 0));
}
}
+EXPORT_SYMBOL_IF_KUNIT(arm_smmu_make_cdtable_ste);
VISIBLE_IF_KUNIT
void arm_smmu_make_s2_domain_ste(struct arm_smmu_ste *target,
@@ -1627,6 +1634,7 @@ void arm_smmu_make_s2_domain_ste(struct arm_smmu_ste *target,
target->data[3] = cpu_to_le64(pgtbl_cfg->arm_lpae_s2_cfg.vttbr &
STRTAB_STE_3_S2TTB_MASK);
}
+EXPORT_SYMBOL_IF_KUNIT(arm_smmu_make_s2_domain_ste);
/*
* This can safely directly manipulate the STE memory without a sync sequence