summaryrefslogtreecommitdiff
path: root/include/linux/iova.h
diff options
context:
space:
mode:
authorJoerg Roedel <jroedel@suse.de>2017-08-10 15:44:28 +0300
committerJoerg Roedel <jroedel@suse.de>2017-08-15 19:23:50 +0300
commit42f87e71c3df12d8f29ec1bb7b47772ffaeaf1ee (patch)
treee87f5bea061b489bdc818faf1402f03ce853fec2 /include/linux/iova.h
parentda4b02750a9fe1d1c4d047d14e69ec7542dddeb3 (diff)
downloadlinux-42f87e71c3df12d8f29ec1bb7b47772ffaeaf1ee.tar.xz
iommu/iova: Add flush-queue data structures
This patch adds the basic data-structures to implement flush-queues in the generic IOVA code. It also adds the initialization and destroy routines for these data structures. The initialization routine is designed so that the use of this feature is optional for the users of IOVA code. Signed-off-by: Joerg Roedel <jroedel@suse.de>
Diffstat (limited to 'include/linux/iova.h')
-rw-r--r--include/linux/iova.h41
1 files changed, 41 insertions, 0 deletions
diff --git a/include/linux/iova.h b/include/linux/iova.h
index e0a892ae45c0..8aa10896150e 100644
--- a/include/linux/iova.h
+++ b/include/linux/iova.h
@@ -36,6 +36,30 @@ struct iova_rcache {
struct iova_cpu_rcache __percpu *cpu_rcaches;
};
+struct iova_domain;
+
+/* Call-Back from IOVA code into IOMMU drivers */
+typedef void (* iova_flush_cb)(struct iova_domain *domain);
+
+/* Destructor for per-entry data */
+typedef void (* iova_entry_dtor)(unsigned long data);
+
+/* Number of entries per Flush Queue */
+#define IOVA_FQ_SIZE 256
+
+/* Flush Queue entry for defered flushing */
+struct iova_fq_entry {
+ unsigned long iova_pfn;
+ unsigned long pages;
+ unsigned long data;
+};
+
+/* Per-CPU Flush Queue structure */
+struct iova_fq {
+ struct iova_fq_entry entries[IOVA_FQ_SIZE];
+ unsigned head, tail;
+};
+
/* holds all the iova translations for a domain */
struct iova_domain {
spinlock_t iova_rbtree_lock; /* Lock to protect update of rbtree */
@@ -45,6 +69,14 @@ struct iova_domain {
unsigned long start_pfn; /* Lower limit for this domain */
unsigned long dma_32bit_pfn;
struct iova_rcache rcaches[IOVA_RANGE_CACHE_MAX_SIZE]; /* IOVA range caches */
+
+ iova_flush_cb flush_cb; /* Call-Back function to flush IOMMU
+ TLBs */
+
+ iova_entry_dtor entry_dtor; /* IOMMU driver specific destructor for
+ iova entry */
+
+ struct iova_fq __percpu *fq; /* Flush Queue */
};
static inline unsigned long iova_size(struct iova *iova)
@@ -102,6 +134,8 @@ struct iova *reserve_iova(struct iova_domain *iovad, unsigned long pfn_lo,
void copy_reserved_iova(struct iova_domain *from, struct iova_domain *to);
void init_iova_domain(struct iova_domain *iovad, unsigned long granule,
unsigned long start_pfn, unsigned long pfn_32bit);
+int init_iova_flush_queue(struct iova_domain *iovad,
+ iova_flush_cb flush_cb, iova_entry_dtor entry_dtor);
struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn);
void put_iova_domain(struct iova_domain *iovad);
struct iova *split_and_remove_iova(struct iova_domain *iovad,
@@ -174,6 +208,13 @@ static inline void init_iova_domain(struct iova_domain *iovad,
{
}
+static inline int init_iova_flush_queue(struct iova_domain *iovad,
+ iova_flush_cb flush_cb,
+ iova_entry_dtor entry_dtor)
+{
+ return -ENODEV;
+}
+
static inline struct iova *find_iova(struct iova_domain *iovad,
unsigned long pfn)
{