summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2019-03-10 22:29:52 +0300
committerLinus Torvalds <torvalds@linux-foundation.org>2019-03-10 22:29:52 +0300
commitbb97be23db2a296c5f8b8b4c40feb0435b068c5e (patch)
treea9155e90b8cc786ddee3e87f962d9efbb2674c89 /include
parentb7a7d1c1ec688104fdc922568c26395a756f616d (diff)
parentd05e4c8600c36084ce9de6249bb972c9bdd75b7e (diff)
downloadlinux-bb97be23db2a296c5f8b8b4c40feb0435b068c5e.tar.xz
Merge tag 'iommu-updates-v5.1' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu
Pull IOMMU updates from Joerg Roedel: - A big cleanup and optimization patch-set for the Tegra GART driver - Documentation updates and fixes for the IOMMU-API - Support for page request in Intel VT-d scalable mode - Intel VT-d dma_[un]map_resource() support - Updates to the ATS enabling code for PCI (acked by Bjorn) and Intel VT-d to align with the latest version of the ATS spec - Relaxed IRQ source checking in the Intel VT-d driver for some aliased devices, needed for future devices which send IRQ messages from more than on request-ID - IRQ remapping driver for Hyper-V - Patches to make generic IOVA and IO-Page-Table code usable outside of the IOMMU code - Various other small fixes and cleanups * tag 'iommu-updates-v5.1' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu: (60 commits) iommu/vt-d: Get domain ID before clear pasid entry iommu/vt-d: Fix NULL pointer reference in intel_svm_bind_mm() iommu/vt-d: Set context field after value initialized iommu/vt-d: Disable ATS support on untrusted devices iommu/mediatek: Fix semicolon code style issue MAINTAINERS: Add Hyper-V IOMMU driver into Hyper-V CORE AND DRIVERS scope iommu/hyper-v: Add Hyper-V stub IOMMU driver x86/Hyper-V: Set x2apic destination mode to physical when x2apic is available PCI/ATS: Add inline to pci_prg_resp_pasid_required() iommu/vt-d: Check identity map for hot-added devices iommu: Fix IOMMU debugfs fallout iommu: Document iommu_ops.is_attach_deferred() iommu: Document iommu_ops.iotlb_sync_map() iommu/vt-d: Enable ATS only if the device uses page aligned address. PCI/ATS: Add pci_ats_page_aligned() interface iommu/vt-d: Fix PRI/PASID dependency issue. PCI/ATS: Add pci_prg_resp_pasid_required() interface. iommu/vt-d: Allow interrupts from the entire bus for aliased devices iommu/vt-d: Add helper to set an IRTE to verify only the bus number iommu: Fix flush_tlb_all typo ...
Diffstat (limited to 'include')
-rw-r--r--include/linux/intel-iommu.h21
-rw-r--r--include/linux/intel-svm.h2
-rw-r--r--include/linux/io-pgtable.h213
-rw-r--r--include/linux/iommu.h6
-rw-r--r--include/linux/pci-ats.h5
-rw-r--r--include/linux/pci.h2
-rw-r--r--include/soc/tegra/mc.h27
-rw-r--r--include/uapi/linux/pci_regs.h2
8 files changed, 263 insertions, 15 deletions
diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h
index 0605f3bf6e79..fa364de9db18 100644
--- a/include/linux/intel-iommu.h
+++ b/include/linux/intel-iommu.h
@@ -374,20 +374,17 @@ enum {
#define QI_DEV_EIOTLB_PFSID(pfsid) (((u64)(pfsid & 0xf) << 12) | ((u64)(pfsid & 0xfff) << 52))
#define QI_DEV_EIOTLB_MAX_INVS 32
-#define QI_PGRP_IDX(idx) (((u64)(idx)) << 55)
-#define QI_PGRP_PRIV(priv) (((u64)(priv)) << 32)
-#define QI_PGRP_RESP_CODE(res) ((u64)(res))
-#define QI_PGRP_PASID(pasid) (((u64)(pasid)) << 32)
-#define QI_PGRP_DID(did) (((u64)(did)) << 16)
+/* Page group response descriptor QW0 */
#define QI_PGRP_PASID_P(p) (((u64)(p)) << 4)
+#define QI_PGRP_PDP(p) (((u64)(p)) << 5)
+#define QI_PGRP_RESP_CODE(res) (((u64)(res)) << 12)
+#define QI_PGRP_DID(rid) (((u64)(rid)) << 16)
+#define QI_PGRP_PASID(pasid) (((u64)(pasid)) << 32)
+
+/* Page group response descriptor QW1 */
+#define QI_PGRP_LPIG(x) (((u64)(x)) << 2)
+#define QI_PGRP_IDX(idx) (((u64)(idx)) << 3)
-#define QI_PSTRM_ADDR(addr) (((u64)(addr)) & VTD_PAGE_MASK)
-#define QI_PSTRM_DEVFN(devfn) (((u64)(devfn)) << 4)
-#define QI_PSTRM_RESP_CODE(res) ((u64)(res))
-#define QI_PSTRM_IDX(idx) (((u64)(idx)) << 55)
-#define QI_PSTRM_PRIV(priv) (((u64)(priv)) << 32)
-#define QI_PSTRM_BUS(bus) (((u64)(bus)) << 24)
-#define QI_PSTRM_PASID(pasid) (((u64)(pasid)) << 4)
#define QI_RESP_SUCCESS 0x0
#define QI_RESP_INVALID 0x1
diff --git a/include/linux/intel-svm.h b/include/linux/intel-svm.h
index 99bc5b3ae26e..e3f76315ca4d 100644
--- a/include/linux/intel-svm.h
+++ b/include/linux/intel-svm.h
@@ -20,7 +20,7 @@ struct device;
struct svm_dev_ops {
void (*fault_cb)(struct device *dev, int pasid, u64 address,
- u32 private, int rwxp, int response);
+ void *private, int rwxp, int response);
};
/* Values for rxwp in fault_cb callback */
diff --git a/include/linux/io-pgtable.h b/include/linux/io-pgtable.h
new file mode 100644
index 000000000000..47d5ae559329
--- /dev/null
+++ b/include/linux/io-pgtable.h
@@ -0,0 +1,213 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __IO_PGTABLE_H
+#define __IO_PGTABLE_H
+#include <linux/bitops.h>
+
+/*
+ * Public API for use by IOMMU drivers
+ */
+enum io_pgtable_fmt {
+ ARM_32_LPAE_S1,
+ ARM_32_LPAE_S2,
+ ARM_64_LPAE_S1,
+ ARM_64_LPAE_S2,
+ ARM_V7S,
+ IO_PGTABLE_NUM_FMTS,
+};
+
+/**
+ * struct iommu_gather_ops - IOMMU callbacks for TLB and page table management.
+ *
+ * @tlb_flush_all: Synchronously invalidate the entire TLB context.
+ * @tlb_add_flush: Queue up a TLB invalidation for a virtual address range.
+ * @tlb_sync: Ensure any queued TLB invalidation has taken effect, and
+ * any corresponding page table updates are visible to the
+ * IOMMU.
+ *
+ * Note that these can all be called in atomic context and must therefore
+ * not block.
+ */
+struct iommu_gather_ops {
+ void (*tlb_flush_all)(void *cookie);
+ void (*tlb_add_flush)(unsigned long iova, size_t size, size_t granule,
+ bool leaf, void *cookie);
+ void (*tlb_sync)(void *cookie);
+};
+
+/**
+ * struct io_pgtable_cfg - Configuration data for a set of page tables.
+ *
+ * @quirks: A bitmap of hardware quirks that require some special
+ * action by the low-level page table allocator.
+ * @pgsize_bitmap: A bitmap of page sizes supported by this set of page
+ * tables.
+ * @ias: Input address (iova) size, in bits.
+ * @oas: Output address (paddr) size, in bits.
+ * @tlb: TLB management callbacks for this set of tables.
+ * @iommu_dev: The device representing the DMA configuration for the
+ * page table walker.
+ */
+struct io_pgtable_cfg {
+ /*
+ * IO_PGTABLE_QUIRK_ARM_NS: (ARM formats) Set NS and NSTABLE bits in
+ * stage 1 PTEs, for hardware which insists on validating them
+ * even in non-secure state where they should normally be ignored.
+ *
+ * IO_PGTABLE_QUIRK_NO_PERMS: Ignore the IOMMU_READ, IOMMU_WRITE and
+ * IOMMU_NOEXEC flags and map everything with full access, for
+ * hardware which does not implement the permissions of a given
+ * format, and/or requires some format-specific default value.
+ *
+ * IO_PGTABLE_QUIRK_TLBI_ON_MAP: If the format forbids caching invalid
+ * (unmapped) entries but the hardware might do so anyway, perform
+ * TLB maintenance when mapping as well as when unmapping.
+ *
+ * IO_PGTABLE_QUIRK_ARM_MTK_4GB: (ARM v7s format) Set bit 9 in all
+ * PTEs, for Mediatek IOMMUs which treat it as a 33rd address bit
+ * when the SoC is in "4GB mode" and they can only access the high
+ * remap of DRAM (0x1_00000000 to 0x1_ffffffff).
+ *
+ * IO_PGTABLE_QUIRK_NO_DMA: Guarantees that the tables will only ever
+ * be accessed by a fully cache-coherent IOMMU or CPU (e.g. for a
+ * software-emulated IOMMU), such that pagetable updates need not
+ * be treated as explicit DMA data.
+ *
+ * IO_PGTABLE_QUIRK_NON_STRICT: Skip issuing synchronous leaf TLBIs
+ * on unmap, for DMA domains using the flush queue mechanism for
+ * delayed invalidation.
+ */
+ #define IO_PGTABLE_QUIRK_ARM_NS BIT(0)
+ #define IO_PGTABLE_QUIRK_NO_PERMS BIT(1)
+ #define IO_PGTABLE_QUIRK_TLBI_ON_MAP BIT(2)
+ #define IO_PGTABLE_QUIRK_ARM_MTK_4GB BIT(3)
+ #define IO_PGTABLE_QUIRK_NO_DMA BIT(4)
+ #define IO_PGTABLE_QUIRK_NON_STRICT BIT(5)
+ unsigned long quirks;
+ unsigned long pgsize_bitmap;
+ unsigned int ias;
+ unsigned int oas;
+ const struct iommu_gather_ops *tlb;
+ struct device *iommu_dev;
+
+ /* Low-level data specific to the table format */
+ union {
+ struct {
+ u64 ttbr[2];
+ u64 tcr;
+ u64 mair[2];
+ } arm_lpae_s1_cfg;
+
+ struct {
+ u64 vttbr;
+ u64 vtcr;
+ } arm_lpae_s2_cfg;
+
+ struct {
+ u32 ttbr[2];
+ u32 tcr;
+ u32 nmrr;
+ u32 prrr;
+ } arm_v7s_cfg;
+ };
+};
+
+/**
+ * struct io_pgtable_ops - Page table manipulation API for IOMMU drivers.
+ *
+ * @map: Map a physically contiguous memory region.
+ * @unmap: Unmap a physically contiguous memory region.
+ * @iova_to_phys: Translate iova to physical address.
+ *
+ * These functions map directly onto the iommu_ops member functions with
+ * the same names.
+ */
+struct io_pgtable_ops {
+ int (*map)(struct io_pgtable_ops *ops, unsigned long iova,
+ phys_addr_t paddr, size_t size, int prot);
+ size_t (*unmap)(struct io_pgtable_ops *ops, unsigned long iova,
+ size_t size);
+ phys_addr_t (*iova_to_phys)(struct io_pgtable_ops *ops,
+ unsigned long iova);
+};
+
+/**
+ * alloc_io_pgtable_ops() - Allocate a page table allocator for use by an IOMMU.
+ *
+ * @fmt: The page table format.
+ * @cfg: The page table configuration. This will be modified to represent
+ * the configuration actually provided by the allocator (e.g. the
+ * pgsize_bitmap may be restricted).
+ * @cookie: An opaque token provided by the IOMMU driver and passed back to
+ * the callback routines in cfg->tlb.
+ */
+struct io_pgtable_ops *alloc_io_pgtable_ops(enum io_pgtable_fmt fmt,
+ struct io_pgtable_cfg *cfg,
+ void *cookie);
+
+/**
+ * free_io_pgtable_ops() - Free an io_pgtable_ops structure. The caller
+ * *must* ensure that the page table is no longer
+ * live, but the TLB can be dirty.
+ *
+ * @ops: The ops returned from alloc_io_pgtable_ops.
+ */
+void free_io_pgtable_ops(struct io_pgtable_ops *ops);
+
+
+/*
+ * Internal structures for page table allocator implementations.
+ */
+
+/**
+ * struct io_pgtable - Internal structure describing a set of page tables.
+ *
+ * @fmt: The page table format.
+ * @cookie: An opaque token provided by the IOMMU driver and passed back to
+ * any callback routines.
+ * @cfg: A copy of the page table configuration.
+ * @ops: The page table operations in use for this set of page tables.
+ */
+struct io_pgtable {
+ enum io_pgtable_fmt fmt;
+ void *cookie;
+ struct io_pgtable_cfg cfg;
+ struct io_pgtable_ops ops;
+};
+
+#define io_pgtable_ops_to_pgtable(x) container_of((x), struct io_pgtable, ops)
+
+static inline void io_pgtable_tlb_flush_all(struct io_pgtable *iop)
+{
+ iop->cfg.tlb->tlb_flush_all(iop->cookie);
+}
+
+static inline void io_pgtable_tlb_add_flush(struct io_pgtable *iop,
+ unsigned long iova, size_t size, size_t granule, bool leaf)
+{
+ iop->cfg.tlb->tlb_add_flush(iova, size, granule, leaf, iop->cookie);
+}
+
+static inline void io_pgtable_tlb_sync(struct io_pgtable *iop)
+{
+ iop->cfg.tlb->tlb_sync(iop->cookie);
+}
+
+/**
+ * struct io_pgtable_init_fns - Alloc/free a set of page tables for a
+ * particular format.
+ *
+ * @alloc: Allocate a set of page tables described by cfg.
+ * @free: Free the page tables associated with iop.
+ */
+struct io_pgtable_init_fns {
+ struct io_pgtable *(*alloc)(struct io_pgtable_cfg *cfg, void *cookie);
+ void (*free)(struct io_pgtable *iop);
+};
+
+extern struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s1_init_fns;
+extern struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s2_init_fns;
+extern struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s1_init_fns;
+extern struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s2_init_fns;
+extern struct io_pgtable_init_fns io_pgtable_arm_v7s_init_fns;
+
+#endif /* __IO_PGTABLE_H */
diff --git a/include/linux/iommu.h b/include/linux/iommu.h
index e90da6b6f3d1..ffbbc7e39cee 100644
--- a/include/linux/iommu.h
+++ b/include/linux/iommu.h
@@ -167,8 +167,9 @@ struct iommu_resv_region {
* @detach_dev: detach device from an iommu domain
* @map: map a physically contiguous memory region to an iommu domain
* @unmap: unmap a physically contiguous memory region from an iommu domain
- * @flush_tlb_all: Synchronously flush all hardware TLBs for this domain
+ * @flush_iotlb_all: Synchronously flush all hardware TLBs for this domain
* @iotlb_range_add: Add a given iova range to the flush queue for this domain
+ * @iotlb_sync_map: Sync mappings created recently using @map to the hardware
* @iotlb_sync: Flush all queued ranges from the hardware TLBs and empty flush
* queue
* @iova_to_phys: translate iova to physical address
@@ -183,6 +184,8 @@ struct iommu_resv_region {
* @domain_window_enable: Configure and enable a particular window for a domain
* @domain_window_disable: Disable a particular window for a domain
* @of_xlate: add OF master IDs to iommu grouping
+ * @is_attach_deferred: Check if domain attach should be deferred from iommu
+ * driver init to device driver init (default no)
* @pgsize_bitmap: bitmap of all possible supported page sizes
*/
struct iommu_ops {
@@ -201,6 +204,7 @@ struct iommu_ops {
void (*flush_iotlb_all)(struct iommu_domain *domain);
void (*iotlb_range_add)(struct iommu_domain *domain,
unsigned long iova, size_t size);
+ void (*iotlb_sync_map)(struct iommu_domain *domain);
void (*iotlb_sync)(struct iommu_domain *domain);
phys_addr_t (*iova_to_phys)(struct iommu_domain *domain, dma_addr_t iova);
int (*add_device)(struct device *dev);
diff --git a/include/linux/pci-ats.h b/include/linux/pci-ats.h
index 7c4b8e27268c..1ebb88e7c184 100644
--- a/include/linux/pci-ats.h
+++ b/include/linux/pci-ats.h
@@ -40,6 +40,7 @@ void pci_disable_pasid(struct pci_dev *pdev);
void pci_restore_pasid_state(struct pci_dev *pdev);
int pci_pasid_features(struct pci_dev *pdev);
int pci_max_pasids(struct pci_dev *pdev);
+int pci_prg_resp_pasid_required(struct pci_dev *pdev);
#else /* CONFIG_PCI_PASID */
@@ -66,6 +67,10 @@ static inline int pci_max_pasids(struct pci_dev *pdev)
return -EINVAL;
}
+static inline int pci_prg_resp_pasid_required(struct pci_dev *pdev)
+{
+ return 0;
+}
#endif /* CONFIG_PCI_PASID */
diff --git a/include/linux/pci.h b/include/linux/pci.h
index 4eca42cf611b..77448215ef5b 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -1527,11 +1527,13 @@ void pci_ats_init(struct pci_dev *dev);
int pci_enable_ats(struct pci_dev *dev, int ps);
void pci_disable_ats(struct pci_dev *dev);
int pci_ats_queue_depth(struct pci_dev *dev);
+int pci_ats_page_aligned(struct pci_dev *dev);
#else
static inline void pci_ats_init(struct pci_dev *d) { }
static inline int pci_enable_ats(struct pci_dev *d, int ps) { return -ENODEV; }
static inline void pci_disable_ats(struct pci_dev *d) { }
static inline int pci_ats_queue_depth(struct pci_dev *d) { return -ENODEV; }
+static inline int pci_ats_page_aligned(struct pci_dev *dev) { return 0; }
#endif
#ifdef CONFIG_PCIE_PTM
diff --git a/include/soc/tegra/mc.h b/include/soc/tegra/mc.h
index b43f37fea096..e489a028ec9f 100644
--- a/include/soc/tegra/mc.h
+++ b/include/soc/tegra/mc.h
@@ -9,6 +9,7 @@
#ifndef __SOC_TEGRA_MC_H__
#define __SOC_TEGRA_MC_H__
+#include <linux/err.h>
#include <linux/reset-controller.h>
#include <linux/types.h>
@@ -77,6 +78,7 @@ struct tegra_smmu_soc {
struct tegra_mc;
struct tegra_smmu;
+struct gart_device;
#ifdef CONFIG_TEGRA_IOMMU_SMMU
struct tegra_smmu *tegra_smmu_probe(struct device *dev,
@@ -96,6 +98,28 @@ static inline void tegra_smmu_remove(struct tegra_smmu *smmu)
}
#endif
+#ifdef CONFIG_TEGRA_IOMMU_GART
+struct gart_device *tegra_gart_probe(struct device *dev, struct tegra_mc *mc);
+int tegra_gart_suspend(struct gart_device *gart);
+int tegra_gart_resume(struct gart_device *gart);
+#else
+static inline struct gart_device *
+tegra_gart_probe(struct device *dev, struct tegra_mc *mc)
+{
+ return ERR_PTR(-ENODEV);
+}
+
+static inline int tegra_gart_suspend(struct gart_device *gart)
+{
+ return -ENODEV;
+}
+
+static inline int tegra_gart_resume(struct gart_device *gart)
+{
+ return -ENODEV;
+}
+#endif
+
struct tegra_mc_reset {
const char *name;
unsigned long id;
@@ -144,7 +168,8 @@ struct tegra_mc_soc {
struct tegra_mc {
struct device *dev;
struct tegra_smmu *smmu;
- void __iomem *regs, *regs2;
+ struct gart_device *gart;
+ void __iomem *regs;
struct clk *clk;
int irq;
diff --git a/include/uapi/linux/pci_regs.h b/include/uapi/linux/pci_regs.h
index e1e9888c85e6..5c98133f2c94 100644
--- a/include/uapi/linux/pci_regs.h
+++ b/include/uapi/linux/pci_regs.h
@@ -866,6 +866,7 @@
#define PCI_ATS_CAP 0x04 /* ATS Capability Register */
#define PCI_ATS_CAP_QDEP(x) ((x) & 0x1f) /* Invalidate Queue Depth */
#define PCI_ATS_MAX_QDEP 32 /* Max Invalidate Queue Depth */
+#define PCI_ATS_CAP_PAGE_ALIGNED 0x0020 /* Page Aligned Request */
#define PCI_ATS_CTRL 0x06 /* ATS Control Register */
#define PCI_ATS_CTRL_ENABLE 0x8000 /* ATS Enable */
#define PCI_ATS_CTRL_STU(x) ((x) & 0x1f) /* Smallest Translation Unit */
@@ -880,6 +881,7 @@
#define PCI_PRI_STATUS_RF 0x001 /* Response Failure */
#define PCI_PRI_STATUS_UPRGI 0x002 /* Unexpected PRG index */
#define PCI_PRI_STATUS_STOPPED 0x100 /* PRI Stopped */
+#define PCI_PRI_STATUS_PASID 0x8000 /* PRG Response PASID Required */
#define PCI_PRI_MAX_REQ 0x08 /* PRI max reqs supported */
#define PCI_PRI_ALLOC_REQ 0x0c /* PRI max reqs allowed */
#define PCI_EXT_CAP_PRI_SIZEOF 16