summaryrefslogtreecommitdiff
path: root/drivers/iommu
diff options
context:
space:
mode:
authorJean-Philippe Brucker <jean-philippe@linaro.org>2020-09-18 13:18:50 +0300
committerWill Deacon <will@kernel.org>2020-09-29 01:48:06 +0300
commitd744f9e6c222c8086376bc56c86e4380595350bd (patch)
tree7d321bd8985731609a9733e8cfa299bb230674a9 /drivers/iommu
parent3e63033675c9b1e79b14655481a8c4ecaf23821f (diff)
downloadlinux-d744f9e6c222c8086376bc56c86e4380595350bd.tar.xz
iommu/arm-smmu-v3: Check for SVA features
Aggregate all sanity-checks for sharing CPU page tables with the SMMU under a single ARM_SMMU_FEAT_SVA bit. For PCIe SVA, users also need to check FEAT_ATS and FEAT_PRI. For platform SVA, they will have to check FEAT_STALLS. Introduce ARM_SMMU_FEAT_BTM (Broadcast TLB Maintenance), but don't enable it at the moment. Since the entire VMID space is shared with the CPU, enabling DVM (by clearing SMMU_CR2.PTM) could result in over-invalidation and affect performance of stage-2 mappings. Signed-off-by: Jean-Philippe Brucker <jean-philippe@linaro.org> Reviewed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com> Cc: Suzuki K Poulose <suzuki.poulose@arm.com> Link: https://lore.kernel.org/r/20200918101852.582559-11-jean-philippe@linaro.org Signed-off-by: Will Deacon <will@kernel.org>
Diffstat (limited to 'drivers/iommu')
-rw-r--r--drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c45
-rw-r--r--drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c3
-rw-r--r--drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h10
3 files changed, 58 insertions, 0 deletions
diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c
index ef3fcfa72187..cb94c0924196 100644
--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c
@@ -152,3 +152,48 @@ static void arm_smmu_free_shared_cd(struct arm_smmu_ctx_desc *cd)
kfree(cd);
}
}
+
+bool arm_smmu_sva_supported(struct arm_smmu_device *smmu)
+{
+ unsigned long reg, fld;
+ unsigned long oas;
+ unsigned long asid_bits;
+ u32 feat_mask = ARM_SMMU_FEAT_BTM | ARM_SMMU_FEAT_COHERENCY;
+
+ if (vabits_actual == 52)
+ feat_mask |= ARM_SMMU_FEAT_VAX;
+
+ if ((smmu->features & feat_mask) != feat_mask)
+ return false;
+
+ if (!(smmu->pgsize_bitmap & PAGE_SIZE))
+ return false;
+
+ /*
+ * Get the smallest PA size of all CPUs (sanitized by cpufeature). We're
+ * not even pretending to support AArch32 here. Abort if the MMU outputs
+ * addresses larger than what we support.
+ */
+ reg = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
+ fld = cpuid_feature_extract_unsigned_field(reg, ID_AA64MMFR0_PARANGE_SHIFT);
+ oas = id_aa64mmfr0_parange_to_phys_shift(fld);
+ if (smmu->oas < oas)
+ return false;
+
+ /* We can support bigger ASIDs than the CPU, but not smaller */
+ fld = cpuid_feature_extract_unsigned_field(reg, ID_AA64MMFR0_ASID_SHIFT);
+ asid_bits = fld ? 16 : 8;
+ if (smmu->asid_bits < asid_bits)
+ return false;
+
+ /*
+ * See max_pinned_asids in arch/arm64/mm/context.c. The following is
+ * generally the maximum number of bindable processes.
+ */
+ if (arm64_kernel_unmapped_at_el0())
+ asid_bits--;
+ dev_dbg(smmu->dev, "%d shared contexts\n", (1 << asid_bits) -
+ num_possible_cpus() - 2);
+
+ return true;
+}
diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
index 14108f1cd2ea..cc02d6d815e6 100644
--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
@@ -3274,6 +3274,9 @@ static int arm_smmu_device_hw_probe(struct arm_smmu_device *smmu)
smmu->ias = max(smmu->ias, smmu->oas);
+ if (arm_smmu_sva_supported(smmu))
+ smmu->features |= ARM_SMMU_FEAT_SVA;
+
dev_info(smmu->dev, "ias %lu-bit, oas %lu-bit (features 0x%08x)\n",
smmu->ias, smmu->oas, smmu->features);
return 0;
diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h
index 1529714ce006..454a935de445 100644
--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h
@@ -602,6 +602,8 @@ struct arm_smmu_device {
#define ARM_SMMU_FEAT_STALL_FORCE (1 << 13)
#define ARM_SMMU_FEAT_VAX (1 << 14)
#define ARM_SMMU_FEAT_RANGE_INV (1 << 15)
+#define ARM_SMMU_FEAT_BTM (1 << 16)
+#define ARM_SMMU_FEAT_SVA (1 << 17)
u32 features;
#define ARM_SMMU_OPT_SKIP_PREFETCH (1 << 0)
@@ -684,4 +686,12 @@ int arm_smmu_write_ctx_desc(struct arm_smmu_domain *smmu_domain, int ssid,
void arm_smmu_tlb_inv_asid(struct arm_smmu_device *smmu, u16 asid);
bool arm_smmu_free_asid(struct arm_smmu_ctx_desc *cd);
+#ifdef CONFIG_ARM_SMMU_V3_SVA
+bool arm_smmu_sva_supported(struct arm_smmu_device *smmu);
+#else /* CONFIG_ARM_SMMU_V3_SVA */
+static inline bool arm_smmu_sva_supported(struct arm_smmu_device *smmu)
+{
+ return false;
+}
+#endif /* CONFIG_ARM_SMMU_V3_SVA */
#endif /* _ARM_SMMU_V3_H */