summaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/sparc/include/asm/iommu_64.h7
-rw-r--r--arch/sparc/kernel/iommu.c188
-rw-r--r--arch/sparc/kernel/iommu_common.h8
-rw-r--r--arch/sparc/kernel/ldc.c185
-rw-r--r--arch/sparc/kernel/pci_sun4v.c193
5 files changed, 324 insertions, 257 deletions
diff --git a/arch/sparc/include/asm/iommu_64.h b/arch/sparc/include/asm/iommu_64.h
index e3cd4493d81d..2b9321ab064d 100644
--- a/arch/sparc/include/asm/iommu_64.h
+++ b/arch/sparc/include/asm/iommu_64.h
@@ -16,7 +16,6 @@
#define IOPTE_WRITE 0x0000000000000002UL
#define IOMMU_NUM_CTXS 4096
-#include <linux/iommu-common.h>
struct iommu_arena {
unsigned long *map;
@@ -25,10 +24,11 @@ struct iommu_arena {
};
struct iommu {
- struct iommu_table tbl;
spinlock_t lock;
- u32 dma_addr_mask;
+ struct iommu_arena arena;
+ void (*flush_all)(struct iommu *);
iopte_t *page_table;
+ u32 page_table_map_base;
unsigned long iommu_control;
unsigned long iommu_tsbbase;
unsigned long iommu_flush;
@@ -40,6 +40,7 @@ struct iommu {
unsigned long dummy_page_pa;
unsigned long ctx_lowest_free;
DECLARE_BITMAP(ctx_bitmap, IOMMU_NUM_CTXS);
+ u32 dma_addr_mask;
};
struct strbuf {
diff --git a/arch/sparc/kernel/iommu.c b/arch/sparc/kernel/iommu.c
index 9b16b341b6ae..bfa4d0c2df42 100644
--- a/arch/sparc/kernel/iommu.c
+++ b/arch/sparc/kernel/iommu.c
@@ -13,15 +13,11 @@
#include <linux/errno.h>
#include <linux/iommu-helper.h>
#include <linux/bitmap.h>
-#include <linux/hash.h>
-#include <linux/iommu-common.h>
#ifdef CONFIG_PCI
#include <linux/pci.h>
#endif
-static DEFINE_PER_CPU(unsigned int, iommu_pool_hash);
-
#include <asm/iommu.h>
#include "iommu_common.h"
@@ -49,9 +45,8 @@ static DEFINE_PER_CPU(unsigned int, iommu_pool_hash);
"i" (ASI_PHYS_BYPASS_EC_E))
/* Must be invoked under the IOMMU lock. */
-static void iommu_flushall(struct iommu_table *iommu_table)
+static void iommu_flushall(struct iommu *iommu)
{
- struct iommu *iommu = container_of(iommu_table, struct iommu, tbl);
if (iommu->iommu_flushinv) {
iommu_write(iommu->iommu_flushinv, ~(u64)0);
} else {
@@ -92,22 +87,93 @@ static inline void iopte_make_dummy(struct iommu *iommu, iopte_t *iopte)
iopte_val(*iopte) = val;
}
-static struct iommu_tbl_ops iommu_sparc_ops = {
- .reset = iommu_flushall
-};
-
-static void setup_iommu_pool_hash(void)
+/* Based almost entirely upon the ppc64 iommu allocator. If you use the 'handle'
+ * facility it must all be done in one pass while under the iommu lock.
+ *
+ * On sun4u platforms, we only flush the IOMMU once every time we've passed
+ * over the entire page table doing allocations. Therefore we only ever advance
+ * the hint and cannot backtrack it.
+ */
+unsigned long iommu_range_alloc(struct device *dev,
+ struct iommu *iommu,
+ unsigned long npages,
+ unsigned long *handle)
{
- unsigned int i;
- static bool do_once;
+ unsigned long n, end, start, limit, boundary_size;
+ struct iommu_arena *arena = &iommu->arena;
+ int pass = 0;
- if (do_once)
- return;
- do_once = true;
- for_each_possible_cpu(i)
- per_cpu(iommu_pool_hash, i) = hash_32(i, IOMMU_POOL_HASHBITS);
+ /* This allocator was derived from x86_64's bit string search */
+
+ /* Sanity check */
+ if (unlikely(npages == 0)) {
+ if (printk_ratelimit())
+ WARN_ON(1);
+ return DMA_ERROR_CODE;
+ }
+
+ if (handle && *handle)
+ start = *handle;
+ else
+ start = arena->hint;
+
+ limit = arena->limit;
+
+ /* The case below can happen if we have a small segment appended
+ * to a large, or when the previous alloc was at the very end of
+ * the available space. If so, go back to the beginning and flush.
+ */
+ if (start >= limit) {
+ start = 0;
+ if (iommu->flush_all)
+ iommu->flush_all(iommu);
+ }
+
+ again:
+
+ if (dev)
+ boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
+ 1 << IO_PAGE_SHIFT);
+ else
+ boundary_size = ALIGN(1UL << 32, 1 << IO_PAGE_SHIFT);
+
+ n = iommu_area_alloc(arena->map, limit, start, npages,
+ iommu->page_table_map_base >> IO_PAGE_SHIFT,
+ boundary_size >> IO_PAGE_SHIFT, 0);
+ if (n == -1) {
+ if (likely(pass < 1)) {
+ /* First failure, rescan from the beginning. */
+ start = 0;
+ if (iommu->flush_all)
+ iommu->flush_all(iommu);
+ pass++;
+ goto again;
+ } else {
+ /* Second failure, give up */
+ return DMA_ERROR_CODE;
+ }
+ }
+
+ end = n + npages;
+
+ arena->hint = end;
+
+ /* Update handle for SG allocations */
+ if (handle)
+ *handle = end;
+
+ return n;
}
+void iommu_range_free(struct iommu *iommu, dma_addr_t dma_addr, unsigned long npages)
+{
+ struct iommu_arena *arena = &iommu->arena;
+ unsigned long entry;
+
+ entry = (dma_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT;
+
+ bitmap_clear(arena->map, entry, npages);
+}
int iommu_table_init(struct iommu *iommu, int tsbsize,
u32 dma_offset, u32 dma_addr_mask,
@@ -121,22 +187,22 @@ int iommu_table_init(struct iommu *iommu, int tsbsize,
/* Setup initial software IOMMU state. */
spin_lock_init(&iommu->lock);
iommu->ctx_lowest_free = 1;
- iommu->tbl.page_table_map_base = dma_offset;
+ iommu->page_table_map_base = dma_offset;
iommu->dma_addr_mask = dma_addr_mask;
/* Allocate and initialize the free area map. */
sz = num_tsb_entries / 8;
sz = (sz + 7UL) & ~7UL;
- iommu->tbl.map = kmalloc_node(sz, GFP_KERNEL, numa_node);
- if (!iommu->tbl.map)
+ iommu->arena.map = kmalloc_node(sz, GFP_KERNEL, numa_node);
+ if (!iommu->arena.map) {
+ printk(KERN_ERR "IOMMU: Error, kmalloc(arena.map) failed.\n");
return -ENOMEM;
- memset(iommu->tbl.map, 0, sz);
- if (tlb_type != hypervisor)
- iommu_sparc_ops.reset = NULL; /* not needed on on sun4v */
+ }
+ memset(iommu->arena.map, 0, sz);
+ iommu->arena.limit = num_tsb_entries;
- setup_iommu_pool_hash();
- iommu_tbl_pool_init(&iommu->tbl, num_tsb_entries, IO_PAGE_SHIFT,
- &iommu_sparc_ops, false, 1);
+ if (tlb_type != hypervisor)
+ iommu->flush_all = iommu_flushall;
/* Allocate and initialize the dummy page which we
* set inactive IO PTEs to point to.
@@ -169,20 +235,18 @@ out_free_dummy_page:
iommu->dummy_page = 0UL;
out_free_map:
- kfree(iommu->tbl.map);
- iommu->tbl.map = NULL;
+ kfree(iommu->arena.map);
+ iommu->arena.map = NULL;
return -ENOMEM;
}
-static inline iopte_t *alloc_npages(struct device *dev,
- struct iommu *iommu,
+static inline iopte_t *alloc_npages(struct device *dev, struct iommu *iommu,
unsigned long npages)
{
unsigned long entry;
- entry = iommu_tbl_range_alloc(dev, &iommu->tbl, npages, NULL,
- __this_cpu_read(iommu_pool_hash));
+ entry = iommu_range_alloc(dev, iommu, npages, NULL);
if (unlikely(entry == DMA_ERROR_CODE))
return NULL;
@@ -220,7 +284,7 @@ static void *dma_4u_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_addrp, gfp_t gfp,
struct dma_attrs *attrs)
{
- unsigned long order, first_page;
+ unsigned long flags, order, first_page;
struct iommu *iommu;
struct page *page;
int npages, nid;
@@ -242,14 +306,16 @@ static void *dma_4u_alloc_coherent(struct device *dev, size_t size,
iommu = dev->archdata.iommu;
+ spin_lock_irqsave(&iommu->lock, flags);
iopte = alloc_npages(dev, iommu, size >> IO_PAGE_SHIFT);
+ spin_unlock_irqrestore(&iommu->lock, flags);
if (unlikely(iopte == NULL)) {
free_pages(first_page, order);
return NULL;
}
- *dma_addrp = (iommu->tbl.page_table_map_base +
+ *dma_addrp = (iommu->page_table_map_base +
((iopte - iommu->page_table) << IO_PAGE_SHIFT));
ret = (void *) first_page;
npages = size >> IO_PAGE_SHIFT;
@@ -270,12 +336,16 @@ static void dma_4u_free_coherent(struct device *dev, size_t size,
struct dma_attrs *attrs)
{
struct iommu *iommu;
- unsigned long order, npages;
+ unsigned long flags, order, npages;
npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT;
iommu = dev->archdata.iommu;
- iommu_tbl_range_free(&iommu->tbl, dvma, npages, false, NULL);
+ spin_lock_irqsave(&iommu->lock, flags);
+
+ iommu_range_free(iommu, dvma, npages);
+
+ spin_unlock_irqrestore(&iommu->lock, flags);
order = get_order(size);
if (order < 10)
@@ -305,8 +375,8 @@ static dma_addr_t dma_4u_map_page(struct device *dev, struct page *page,
npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK);
npages >>= IO_PAGE_SHIFT;
- base = alloc_npages(dev, iommu, npages);
spin_lock_irqsave(&iommu->lock, flags);
+ base = alloc_npages(dev, iommu, npages);
ctx = 0;
if (iommu->iommu_ctxflush)
ctx = iommu_alloc_ctx(iommu);
@@ -315,7 +385,7 @@ static dma_addr_t dma_4u_map_page(struct device *dev, struct page *page,
if (unlikely(!base))
goto bad;
- bus_addr = (iommu->tbl.page_table_map_base +
+ bus_addr = (iommu->page_table_map_base +
((base - iommu->page_table) << IO_PAGE_SHIFT));
ret = bus_addr | (oaddr & ~IO_PAGE_MASK);
base_paddr = __pa(oaddr & IO_PAGE_MASK);
@@ -426,7 +496,7 @@ static void dma_4u_unmap_page(struct device *dev, dma_addr_t bus_addr,
npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);
npages >>= IO_PAGE_SHIFT;
base = iommu->page_table +
- ((bus_addr - iommu->tbl.page_table_map_base) >> IO_PAGE_SHIFT);
+ ((bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
bus_addr &= IO_PAGE_MASK;
spin_lock_irqsave(&iommu->lock, flags);
@@ -445,11 +515,11 @@ static void dma_4u_unmap_page(struct device *dev, dma_addr_t bus_addr,
for (i = 0; i < npages; i++)
iopte_make_dummy(iommu, base + i);
+ iommu_range_free(iommu, bus_addr, npages);
+
iommu_free_ctx(iommu, ctx);
- spin_unlock_irqrestore(&iommu->lock, flags);
- iommu_tbl_range_free(&iommu->tbl, bus_addr, npages,
- false, NULL);
+ spin_unlock_irqrestore(&iommu->lock, flags);
}
static int dma_4u_map_sg(struct device *dev, struct scatterlist *sglist,
@@ -497,7 +567,7 @@ static int dma_4u_map_sg(struct device *dev, struct scatterlist *sglist,
max_seg_size = dma_get_max_seg_size(dev);
seg_boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
IO_PAGE_SIZE) >> IO_PAGE_SHIFT;
- base_shift = iommu->tbl.page_table_map_base >> IO_PAGE_SHIFT;
+ base_shift = iommu->page_table_map_base >> IO_PAGE_SHIFT;
for_each_sg(sglist, s, nelems, i) {
unsigned long paddr, npages, entry, out_entry = 0, slen;
iopte_t *base;
@@ -511,8 +581,7 @@ static int dma_4u_map_sg(struct device *dev, struct scatterlist *sglist,
/* Allocate iommu entries for that segment */
paddr = (unsigned long) SG_ENT_PHYS_ADDRESS(s);
npages = iommu_num_pages(paddr, slen, IO_PAGE_SIZE);
- entry = iommu_tbl_range_alloc(dev, &iommu->tbl, npages, &handle,
- __this_cpu_read(iommu_pool_hash));
+ entry = iommu_range_alloc(dev, iommu, npages, &handle);
/* Handle failure */
if (unlikely(entry == DMA_ERROR_CODE)) {
@@ -525,7 +594,7 @@ static int dma_4u_map_sg(struct device *dev, struct scatterlist *sglist,
base = iommu->page_table + entry;
/* Convert entry to a dma_addr_t */
- dma_addr = iommu->tbl.page_table_map_base +
+ dma_addr = iommu->page_table_map_base +
(entry << IO_PAGE_SHIFT);
dma_addr |= (s->offset & ~IO_PAGE_MASK);
@@ -585,17 +654,15 @@ iommu_map_failed:
vaddr = s->dma_address & IO_PAGE_MASK;
npages = iommu_num_pages(s->dma_address, s->dma_length,
IO_PAGE_SIZE);
+ iommu_range_free(iommu, vaddr, npages);
- entry = (vaddr - iommu->tbl.page_table_map_base)
+ entry = (vaddr - iommu->page_table_map_base)
>> IO_PAGE_SHIFT;
base = iommu->page_table + entry;
for (j = 0; j < npages; j++)
iopte_make_dummy(iommu, base + j);
- iommu_tbl_range_free(&iommu->tbl, vaddr, npages,
- false, NULL);
-
s->dma_address = DMA_ERROR_CODE;
s->dma_length = 0;
}
@@ -610,19 +677,17 @@ iommu_map_failed:
/* If contexts are being used, they are the same in all of the mappings
* we make for a particular SG.
*/
-static unsigned long fetch_sg_ctx(struct iommu *iommu,
- struct scatterlist *sg)
+static unsigned long fetch_sg_ctx(struct iommu *iommu, struct scatterlist *sg)
{
unsigned long ctx = 0;
if (iommu->iommu_ctxflush) {
iopte_t *base;
u32 bus_addr;
- struct iommu_table *tbl = &iommu->tbl;
bus_addr = sg->dma_address & IO_PAGE_MASK;
base = iommu->page_table +
- ((bus_addr - tbl->page_table_map_base) >> IO_PAGE_SHIFT);
+ ((bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
ctx = (iopte_val(*base) & IOPTE_CONTEXT) >> 47UL;
}
@@ -658,8 +723,9 @@ static void dma_4u_unmap_sg(struct device *dev, struct scatterlist *sglist,
if (!len)
break;
npages = iommu_num_pages(dma_handle, len, IO_PAGE_SIZE);
+ iommu_range_free(iommu, dma_handle, npages);
- entry = ((dma_handle - iommu->tbl.page_table_map_base)
+ entry = ((dma_handle - iommu->page_table_map_base)
>> IO_PAGE_SHIFT);
base = iommu->page_table + entry;
@@ -671,8 +737,6 @@ static void dma_4u_unmap_sg(struct device *dev, struct scatterlist *sglist,
for (i = 0; i < npages; i++)
iopte_make_dummy(iommu, base + i);
- iommu_tbl_range_free(&iommu->tbl, dma_handle, npages, false,
- NULL);
sg = sg_next(sg);
}
@@ -706,10 +770,9 @@ static void dma_4u_sync_single_for_cpu(struct device *dev,
if (iommu->iommu_ctxflush &&
strbuf->strbuf_ctxflush) {
iopte_t *iopte;
- struct iommu_table *tbl = &iommu->tbl;
iopte = iommu->page_table +
- ((bus_addr - tbl->page_table_map_base)>>IO_PAGE_SHIFT);
+ ((bus_addr - iommu->page_table_map_base)>>IO_PAGE_SHIFT);
ctx = (iopte_val(*iopte) & IOPTE_CONTEXT) >> 47UL;
}
@@ -742,10 +805,9 @@ static void dma_4u_sync_sg_for_cpu(struct device *dev,
if (iommu->iommu_ctxflush &&
strbuf->strbuf_ctxflush) {
iopte_t *iopte;
- struct iommu_table *tbl = &iommu->tbl;
- iopte = iommu->page_table + ((sglist[0].dma_address -
- tbl->page_table_map_base) >> IO_PAGE_SHIFT);
+ iopte = iommu->page_table +
+ ((sglist[0].dma_address - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
ctx = (iopte_val(*iopte) & IOPTE_CONTEXT) >> 47UL;
}
diff --git a/arch/sparc/kernel/iommu_common.h b/arch/sparc/kernel/iommu_common.h
index f4be0d724fc6..1ec0de4156e7 100644
--- a/arch/sparc/kernel/iommu_common.h
+++ b/arch/sparc/kernel/iommu_common.h
@@ -48,4 +48,12 @@ static inline int is_span_boundary(unsigned long entry,
return iommu_is_span_boundary(entry, nr, shift, boundary_size);
}
+unsigned long iommu_range_alloc(struct device *dev,
+ struct iommu *iommu,
+ unsigned long npages,
+ unsigned long *handle);
+void iommu_range_free(struct iommu *iommu,
+ dma_addr_t dma_addr,
+ unsigned long npages);
+
#endif /* _IOMMU_COMMON_H */
diff --git a/arch/sparc/kernel/ldc.c b/arch/sparc/kernel/ldc.c
index d485697c37c0..274a9f59d95c 100644
--- a/arch/sparc/kernel/ldc.c
+++ b/arch/sparc/kernel/ldc.c
@@ -15,8 +15,6 @@
#include <linux/list.h>
#include <linux/init.h>
#include <linux/bitmap.h>
-#include <linux/hash.h>
-#include <linux/iommu-common.h>
#include <asm/hypervisor.h>
#include <asm/iommu.h>
@@ -29,11 +27,6 @@
#define DRV_MODULE_VERSION "1.1"
#define DRV_MODULE_RELDATE "July 22, 2008"
-#define COOKIE_PGSZ_CODE 0xf000000000000000ULL
-#define COOKIE_PGSZ_CODE_SHIFT 60ULL
-
-static DEFINE_PER_CPU(unsigned int, ldc_pool_hash);
-
static char version[] =
DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
#define LDC_PACKET_SIZE 64
@@ -105,10 +98,10 @@ static const struct ldc_mode_ops stream_ops;
int ldom_domaining_enabled;
struct ldc_iommu {
- /* Protects ldc_unmap. */
+ /* Protects arena alloc/free. */
spinlock_t lock;
+ struct iommu_arena arena;
struct ldc_mtable_entry *page_table;
- struct iommu_table iommu_table;
};
struct ldc_channel {
@@ -1005,85 +998,31 @@ static void free_queue(unsigned long num_entries, struct ldc_packet *q)
free_pages((unsigned long)q, order);
}
-static unsigned long ldc_cookie_to_index(u64 cookie, void *arg)
-{
- u64 szcode = cookie >> COOKIE_PGSZ_CODE_SHIFT;
- /* struct ldc_iommu *ldc_iommu = (struct ldc_iommu *)arg; */
-
- cookie &= ~COOKIE_PGSZ_CODE;
-
- return (cookie >> (13ULL + (szcode * 3ULL)));
-}
-
-struct ldc_demap_arg {
- struct ldc_iommu *ldc_iommu;
- u64 cookie;
- unsigned long id;
-};
-
-static void ldc_demap(void *arg, unsigned long entry, unsigned long npages)
-{
- struct ldc_demap_arg *ldc_demap_arg = arg;
- struct ldc_iommu *iommu = ldc_demap_arg->ldc_iommu;
- unsigned long id = ldc_demap_arg->id;
- u64 cookie = ldc_demap_arg->cookie;
- struct ldc_mtable_entry *base;
- unsigned long i, shift;
-
- shift = (cookie >> COOKIE_PGSZ_CODE_SHIFT) * 3;
- base = iommu->page_table + entry;
- for (i = 0; i < npages; i++) {
- if (base->cookie)
- sun4v_ldc_revoke(id, cookie + (i << shift),
- base->cookie);
- base->mte = 0;
- }
-}
-
/* XXX Make this configurable... XXX */
#define LDC_IOTABLE_SIZE (8 * 1024)
-struct iommu_tbl_ops ldc_iommu_ops = {
- .cookie_to_index = ldc_cookie_to_index,
- .demap = ldc_demap,
-};
-
-static void setup_ldc_pool_hash(void)
-{
- unsigned int i;
- static bool do_once;
-
- if (do_once)
- return;
- do_once = true;
- for_each_possible_cpu(i)
- per_cpu(ldc_pool_hash, i) = hash_32(i, IOMMU_POOL_HASHBITS);
-}
-
-
-static int ldc_iommu_init(const char *name, struct ldc_channel *lp)
+static int ldc_iommu_init(struct ldc_channel *lp)
{
unsigned long sz, num_tsb_entries, tsbsize, order;
- struct ldc_iommu *ldc_iommu = &lp->iommu;
- struct iommu_table *iommu = &ldc_iommu->iommu_table;
+ struct ldc_iommu *iommu = &lp->iommu;
struct ldc_mtable_entry *table;
unsigned long hv_err;
int err;
num_tsb_entries = LDC_IOTABLE_SIZE;
tsbsize = num_tsb_entries * sizeof(struct ldc_mtable_entry);
- setup_ldc_pool_hash();
- spin_lock_init(&ldc_iommu->lock);
+
+ spin_lock_init(&iommu->lock);
sz = num_tsb_entries / 8;
sz = (sz + 7UL) & ~7UL;
- iommu->map = kzalloc(sz, GFP_KERNEL);
- if (!iommu->map) {
+ iommu->arena.map = kzalloc(sz, GFP_KERNEL);
+ if (!iommu->arena.map) {
printk(KERN_ERR PFX "Alloc of arena map failed, sz=%lu\n", sz);
return -ENOMEM;
}
- iommu_tbl_pool_init(iommu, num_tsb_entries, PAGE_SHIFT,
- &ldc_iommu_ops, false, 1);
+
+ iommu->arena.limit = num_tsb_entries;
order = get_order(tsbsize);
@@ -1098,7 +1037,7 @@ static int ldc_iommu_init(const char *name, struct ldc_channel *lp)
memset(table, 0, PAGE_SIZE << order);
- ldc_iommu->page_table = table;
+ iommu->page_table = table;
hv_err = sun4v_ldc_set_map_table(lp->id, __pa(table),
num_tsb_entries);
@@ -1110,32 +1049,31 @@ static int ldc_iommu_init(const char *name, struct ldc_channel *lp)
out_free_table:
free_pages((unsigned long) table, order);
- ldc_iommu->page_table = NULL;
+ iommu->page_table = NULL;
out_free_map:
- kfree(iommu->map);
- iommu->map = NULL;
+ kfree(iommu->arena.map);
+ iommu->arena.map = NULL;
return err;
}
static void ldc_iommu_release(struct ldc_channel *lp)
{
- struct ldc_iommu *ldc_iommu = &lp->iommu;
- struct iommu_table *iommu = &ldc_iommu->iommu_table;
+ struct ldc_iommu *iommu = &lp->iommu;
unsigned long num_tsb_entries, tsbsize, order;
(void) sun4v_ldc_set_map_table(lp->id, 0, 0);
- num_tsb_entries = iommu->poolsize * iommu->nr_pools;
+ num_tsb_entries = iommu->arena.limit;
tsbsize = num_tsb_entries * sizeof(struct ldc_mtable_entry);
order = get_order(tsbsize);
- free_pages((unsigned long) ldc_iommu->page_table, order);
- ldc_iommu->page_table = NULL;
+ free_pages((unsigned long) iommu->page_table, order);
+ iommu->page_table = NULL;
- kfree(iommu->map);
- iommu->map = NULL;
+ kfree(iommu->arena.map);
+ iommu->arena.map = NULL;
}
struct ldc_channel *ldc_alloc(unsigned long id,
@@ -1202,7 +1140,7 @@ struct ldc_channel *ldc_alloc(unsigned long id,
lp->id = id;
- err = ldc_iommu_init(name, lp);
+ err = ldc_iommu_init(lp);
if (err)
goto out_free_ldc;
@@ -1947,6 +1885,40 @@ int ldc_read(struct ldc_channel *lp, void *buf, unsigned int size)
}
EXPORT_SYMBOL(ldc_read);
+static long arena_alloc(struct ldc_iommu *iommu, unsigned long npages)
+{
+ struct iommu_arena *arena = &iommu->arena;
+ unsigned long n, start, end, limit;
+ int pass;
+
+ limit = arena->limit;
+ start = arena->hint;
+ pass = 0;
+
+again:
+ n = bitmap_find_next_zero_area(arena->map, limit, start, npages, 0);
+ end = n + npages;
+ if (unlikely(end >= limit)) {
+ if (likely(pass < 1)) {
+ limit = start;
+ start = 0;
+ pass++;
+ goto again;
+ } else {
+ /* Scanned the whole thing, give up. */
+ return -1;
+ }
+ }
+ bitmap_set(arena->map, n, npages);
+
+ arena->hint = end;
+
+ return n;
+}
+
+#define COOKIE_PGSZ_CODE 0xf000000000000000ULL
+#define COOKIE_PGSZ_CODE_SHIFT 60ULL
+
static u64 pagesize_code(void)
{
switch (PAGE_SIZE) {
@@ -1973,14 +1945,23 @@ static u64 make_cookie(u64 index, u64 pgsz_code, u64 page_offset)
page_offset);
}
+static u64 cookie_to_index(u64 cookie, unsigned long *shift)
+{
+ u64 szcode = cookie >> COOKIE_PGSZ_CODE_SHIFT;
+
+ cookie &= ~COOKIE_PGSZ_CODE;
+
+ *shift = szcode * 3;
+
+ return (cookie >> (13ULL + (szcode * 3ULL)));
+}
static struct ldc_mtable_entry *alloc_npages(struct ldc_iommu *iommu,
unsigned long npages)
{
long entry;
- entry = iommu_tbl_range_alloc(NULL, &iommu->iommu_table, npages,
- NULL, __this_cpu_read(ldc_pool_hash));
+ entry = arena_alloc(iommu, npages);
if (unlikely(entry < 0))
return NULL;
@@ -2109,7 +2090,7 @@ int ldc_map_sg(struct ldc_channel *lp,
struct ldc_trans_cookie *cookies, int ncookies,
unsigned int map_perm)
{
- unsigned long i, npages;
+ unsigned long i, npages, flags;
struct ldc_mtable_entry *base;
struct cookie_state state;
struct ldc_iommu *iommu;
@@ -2128,7 +2109,9 @@ int ldc_map_sg(struct ldc_channel *lp,
iommu = &lp->iommu;
+ spin_lock_irqsave(&iommu->lock, flags);
base = alloc_npages(iommu, npages);
+ spin_unlock_irqrestore(&iommu->lock, flags);
if (!base)
return -ENOMEM;
@@ -2153,7 +2136,7 @@ int ldc_map_single(struct ldc_channel *lp,
struct ldc_trans_cookie *cookies, int ncookies,
unsigned int map_perm)
{
- unsigned long npages, pa;
+ unsigned long npages, pa, flags;
struct ldc_mtable_entry *base;
struct cookie_state state;
struct ldc_iommu *iommu;
@@ -2169,7 +2152,9 @@ int ldc_map_single(struct ldc_channel *lp,
iommu = &lp->iommu;
+ spin_lock_irqsave(&iommu->lock, flags);
base = alloc_npages(iommu, npages);
+ spin_unlock_irqrestore(&iommu->lock, flags);
if (!base)
return -ENOMEM;
@@ -2187,29 +2172,35 @@ int ldc_map_single(struct ldc_channel *lp,
}
EXPORT_SYMBOL(ldc_map_single);
-
static void free_npages(unsigned long id, struct ldc_iommu *iommu,
u64 cookie, u64 size)
{
- unsigned long npages;
- struct ldc_demap_arg demap_arg;
-
- demap_arg.ldc_iommu = iommu;
- demap_arg.cookie = cookie;
- demap_arg.id = id;
+ struct iommu_arena *arena = &iommu->arena;
+ unsigned long i, shift, index, npages;
+ struct ldc_mtable_entry *base;
npages = PAGE_ALIGN(((cookie & ~PAGE_MASK) + size)) >> PAGE_SHIFT;
- iommu_tbl_range_free(&iommu->iommu_table, cookie, npages, true,
- &demap_arg);
+ index = cookie_to_index(cookie, &shift);
+ base = iommu->page_table + index;
+
+ BUG_ON(index > arena->limit ||
+ (index + npages) > arena->limit);
+ for (i = 0; i < npages; i++) {
+ if (base->cookie)
+ sun4v_ldc_revoke(id, cookie + (i << shift),
+ base->cookie);
+ base->mte = 0;
+ __clear_bit(index + i, arena->map);
+ }
}
void ldc_unmap(struct ldc_channel *lp, struct ldc_trans_cookie *cookies,
int ncookies)
{
struct ldc_iommu *iommu = &lp->iommu;
- int i;
unsigned long flags;
+ int i;
spin_lock_irqsave(&iommu->lock, flags);
for (i = 0; i < ncookies; i++) {
diff --git a/arch/sparc/kernel/pci_sun4v.c b/arch/sparc/kernel/pci_sun4v.c
index 9b76b9d639e1..47ddbd496a1e 100644
--- a/arch/sparc/kernel/pci_sun4v.c
+++ b/arch/sparc/kernel/pci_sun4v.c
@@ -15,8 +15,6 @@
#include <linux/export.h>
#include <linux/log2.h>
#include <linux/of_device.h>
-#include <linux/hash.h>
-#include <linux/iommu-common.h>
#include <asm/iommu.h>
#include <asm/irq.h>
@@ -30,7 +28,6 @@
#define DRIVER_NAME "pci_sun4v"
#define PFX DRIVER_NAME ": "
-static DEFINE_PER_CPU(unsigned int, iommu_pool_hash);
static unsigned long vpci_major = 1;
static unsigned long vpci_minor = 1;
@@ -158,13 +155,14 @@ static void *dma_4v_alloc_coherent(struct device *dev, size_t size,
iommu = dev->archdata.iommu;
- entry = iommu_tbl_range_alloc(dev, &iommu->tbl, npages, NULL,
- __this_cpu_read(iommu_pool_hash));
+ spin_lock_irqsave(&iommu->lock, flags);
+ entry = iommu_range_alloc(dev, iommu, npages, NULL);
+ spin_unlock_irqrestore(&iommu->lock, flags);
if (unlikely(entry == DMA_ERROR_CODE))
goto range_alloc_fail;
- *dma_addrp = (iommu->tbl.page_table_map_base +
+ *dma_addrp = (iommu->page_table_map_base +
(entry << IO_PAGE_SHIFT));
ret = (void *) first_page;
first_page = __pa(first_page);
@@ -190,46 +188,45 @@ static void *dma_4v_alloc_coherent(struct device *dev, size_t size,
return ret;
iommu_map_fail:
- iommu_tbl_range_free(&iommu->tbl, *dma_addrp, npages, false, NULL);
+ /* Interrupts are disabled. */
+ spin_lock(&iommu->lock);
+ iommu_range_free(iommu, *dma_addrp, npages);
+ spin_unlock_irqrestore(&iommu->lock, flags);
range_alloc_fail:
free_pages(first_page, order);
return NULL;
}
-static void dma_4v_iommu_demap(void *demap_arg, unsigned long entry,
- unsigned long npages)
-{
- u32 devhandle = *(u32 *)demap_arg;
- unsigned long num, flags;
-
- local_irq_save(flags);
- do {
- num = pci_sun4v_iommu_demap(devhandle,
- HV_PCI_TSBID(0, entry),
- npages);
-
- entry += num;
- npages -= num;
- } while (npages != 0);
- local_irq_restore(flags);
-}
-
static void dma_4v_free_coherent(struct device *dev, size_t size, void *cpu,
dma_addr_t dvma, struct dma_attrs *attrs)
{
struct pci_pbm_info *pbm;
struct iommu *iommu;
- unsigned long order, npages, entry;
+ unsigned long flags, order, npages, entry;
u32 devhandle;
npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT;
iommu = dev->archdata.iommu;
pbm = dev->archdata.host_controller;
devhandle = pbm->devhandle;
- entry = ((dvma - iommu->tbl.page_table_map_base) >> IO_PAGE_SHIFT);
- dma_4v_iommu_demap(&devhandle, entry, npages);
- iommu_tbl_range_free(&iommu->tbl, dvma, npages, false, NULL);
+ entry = ((dvma - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
+
+ spin_lock_irqsave(&iommu->lock, flags);
+
+ iommu_range_free(iommu, dvma, npages);
+
+ do {
+ unsigned long num;
+
+ num = pci_sun4v_iommu_demap(devhandle, HV_PCI_TSBID(0, entry),
+ npages);
+ entry += num;
+ npages -= num;
+ } while (npages != 0);
+
+ spin_unlock_irqrestore(&iommu->lock, flags);
+
order = get_order(size);
if (order < 10)
free_pages((unsigned long)cpu, order);
@@ -256,13 +253,14 @@ static dma_addr_t dma_4v_map_page(struct device *dev, struct page *page,
npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK);
npages >>= IO_PAGE_SHIFT;
- entry = iommu_tbl_range_alloc(dev, &iommu->tbl, npages, NULL,
- __this_cpu_read(iommu_pool_hash));
+ spin_lock_irqsave(&iommu->lock, flags);
+ entry = iommu_range_alloc(dev, iommu, npages, NULL);
+ spin_unlock_irqrestore(&iommu->lock, flags);
if (unlikely(entry == DMA_ERROR_CODE))
goto bad;
- bus_addr = (iommu->tbl.page_table_map_base +
+ bus_addr = (iommu->page_table_map_base +
(entry << IO_PAGE_SHIFT));
ret = bus_addr | (oaddr & ~IO_PAGE_MASK);
base_paddr = __pa(oaddr & IO_PAGE_MASK);
@@ -292,7 +290,11 @@ bad:
return DMA_ERROR_CODE;
iommu_map_fail:
- iommu_tbl_range_free(&iommu->tbl, bus_addr, npages, false, NULL);
+ /* Interrupts are disabled. */
+ spin_lock(&iommu->lock);
+ iommu_range_free(iommu, bus_addr, npages);
+ spin_unlock_irqrestore(&iommu->lock, flags);
+
return DMA_ERROR_CODE;
}
@@ -302,7 +304,7 @@ static void dma_4v_unmap_page(struct device *dev, dma_addr_t bus_addr,
{
struct pci_pbm_info *pbm;
struct iommu *iommu;
- unsigned long npages;
+ unsigned long flags, npages;
long entry;
u32 devhandle;
@@ -319,9 +321,22 @@ static void dma_4v_unmap_page(struct device *dev, dma_addr_t bus_addr,
npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);
npages >>= IO_PAGE_SHIFT;
bus_addr &= IO_PAGE_MASK;
- entry = (bus_addr - iommu->tbl.page_table_map_base) >> IO_PAGE_SHIFT;
- dma_4v_iommu_demap(&devhandle, entry, npages);
- iommu_tbl_range_free(&iommu->tbl, bus_addr, npages, false, NULL);
+
+ spin_lock_irqsave(&iommu->lock, flags);
+
+ iommu_range_free(iommu, bus_addr, npages);
+
+ entry = (bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT;
+ do {
+ unsigned long num;
+
+ num = pci_sun4v_iommu_demap(devhandle, HV_PCI_TSBID(0, entry),
+ npages);
+ entry += num;
+ npages -= num;
+ } while (npages != 0);
+
+ spin_unlock_irqrestore(&iommu->lock, flags);
}
static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
@@ -356,14 +371,14 @@ static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
/* Init first segment length for backout at failure */
outs->dma_length = 0;
- local_irq_save(flags);
+ spin_lock_irqsave(&iommu->lock, flags);
iommu_batch_start(dev, prot, ~0UL);
max_seg_size = dma_get_max_seg_size(dev);
seg_boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
IO_PAGE_SIZE) >> IO_PAGE_SHIFT;
- base_shift = iommu->tbl.page_table_map_base >> IO_PAGE_SHIFT;
+ base_shift = iommu->page_table_map_base >> IO_PAGE_SHIFT;
for_each_sg(sglist, s, nelems, i) {
unsigned long paddr, npages, entry, out_entry = 0, slen;
@@ -376,8 +391,7 @@ static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
/* Allocate iommu entries for that segment */
paddr = (unsigned long) SG_ENT_PHYS_ADDRESS(s);
npages = iommu_num_pages(paddr, slen, IO_PAGE_SIZE);
- entry = iommu_tbl_range_alloc(dev, &iommu->tbl, npages, &handle,
- __this_cpu_read(iommu_pool_hash));
+ entry = iommu_range_alloc(dev, iommu, npages, &handle);
/* Handle failure */
if (unlikely(entry == DMA_ERROR_CODE)) {
@@ -390,7 +404,7 @@ static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
iommu_batch_new_entry(entry);
/* Convert entry to a dma_addr_t */
- dma_addr = iommu->tbl.page_table_map_base +
+ dma_addr = iommu->page_table_map_base +
(entry << IO_PAGE_SHIFT);
dma_addr |= (s->offset & ~IO_PAGE_MASK);
@@ -437,7 +451,7 @@ static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
if (unlikely(err < 0L))
goto iommu_map_failed;
- local_irq_restore(flags);
+ spin_unlock_irqrestore(&iommu->lock, flags);
if (outcount < incount) {
outs = sg_next(outs);
@@ -455,8 +469,7 @@ iommu_map_failed:
vaddr = s->dma_address & IO_PAGE_MASK;
npages = iommu_num_pages(s->dma_address, s->dma_length,
IO_PAGE_SIZE);
- iommu_tbl_range_free(&iommu->tbl, vaddr, npages,
- false, NULL);
+ iommu_range_free(iommu, vaddr, npages);
/* XXX demap? XXX */
s->dma_address = DMA_ERROR_CODE;
s->dma_length = 0;
@@ -464,7 +477,7 @@ iommu_map_failed:
if (s == outs)
break;
}
- local_irq_restore(flags);
+ spin_unlock_irqrestore(&iommu->lock, flags);
return 0;
}
@@ -476,7 +489,7 @@ static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist,
struct pci_pbm_info *pbm;
struct scatterlist *sg;
struct iommu *iommu;
- unsigned long flags, entry;
+ unsigned long flags;
u32 devhandle;
BUG_ON(direction == DMA_NONE);
@@ -485,27 +498,33 @@ static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist,
pbm = dev->archdata.host_controller;
devhandle = pbm->devhandle;
- local_irq_save(flags);
+ spin_lock_irqsave(&iommu->lock, flags);
sg = sglist;
while (nelems--) {
dma_addr_t dma_handle = sg->dma_address;
unsigned int len = sg->dma_length;
- unsigned long npages;
- struct iommu_table *tbl = &iommu->tbl;
- unsigned long shift = IO_PAGE_SHIFT;
+ unsigned long npages, entry;
if (!len)
break;
npages = iommu_num_pages(dma_handle, len, IO_PAGE_SIZE);
- entry = ((dma_handle - tbl->page_table_map_base) >> shift);
- dma_4v_iommu_demap(&devhandle, entry, npages);
- iommu_tbl_range_free(&iommu->tbl, dma_handle, npages,
- false, NULL);
+ iommu_range_free(iommu, dma_handle, npages);
+
+ entry = ((dma_handle - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
+ while (npages) {
+ unsigned long num;
+
+ num = pci_sun4v_iommu_demap(devhandle, HV_PCI_TSBID(0, entry),
+ npages);
+ entry += num;
+ npages -= num;
+ }
+
sg = sg_next(sg);
}
- local_irq_restore(flags);
+ spin_unlock_irqrestore(&iommu->lock, flags);
}
static struct dma_map_ops sun4v_dma_ops = {
@@ -517,8 +536,6 @@ static struct dma_map_ops sun4v_dma_ops = {
.unmap_sg = dma_4v_unmap_sg,
};
-static struct iommu_tbl_ops dma_4v_iommu_ops;
-
static void pci_sun4v_scan_bus(struct pci_pbm_info *pbm, struct device *parent)
{
struct property *prop;
@@ -533,33 +550,30 @@ static void pci_sun4v_scan_bus(struct pci_pbm_info *pbm, struct device *parent)
}
static unsigned long probe_existing_entries(struct pci_pbm_info *pbm,
- struct iommu_table *iommu)
+ struct iommu *iommu)
{
- struct iommu_pool *pool;
- unsigned long i, pool_nr, cnt = 0;
+ struct iommu_arena *arena = &iommu->arena;
+ unsigned long i, cnt = 0;
u32 devhandle;
devhandle = pbm->devhandle;
- for (pool_nr = 0; pool_nr < iommu->nr_pools; pool_nr++) {
- pool = &(iommu->arena_pool[pool_nr]);
- for (i = pool->start; i <= pool->end; i++) {
- unsigned long ret, io_attrs, ra;
-
- ret = pci_sun4v_iommu_getmap(devhandle,
- HV_PCI_TSBID(0, i),
- &io_attrs, &ra);
- if (ret == HV_EOK) {
- if (page_in_phys_avail(ra)) {
- pci_sun4v_iommu_demap(devhandle,
- HV_PCI_TSBID(0,
- i), 1);
- } else {
- cnt++;
- __set_bit(i, iommu->map);
- }
+ for (i = 0; i < arena->limit; i++) {
+ unsigned long ret, io_attrs, ra;
+
+ ret = pci_sun4v_iommu_getmap(devhandle,
+ HV_PCI_TSBID(0, i),
+ &io_attrs, &ra);
+ if (ret == HV_EOK) {
+ if (page_in_phys_avail(ra)) {
+ pci_sun4v_iommu_demap(devhandle,
+ HV_PCI_TSBID(0, i), 1);
+ } else {
+ cnt++;
+ __set_bit(i, arena->map);
}
}
}
+
return cnt;
}
@@ -587,22 +601,22 @@ static int pci_sun4v_iommu_init(struct pci_pbm_info *pbm)
dma_offset = vdma[0];
/* Setup initial software IOMMU state. */
+ spin_lock_init(&iommu->lock);
iommu->ctx_lowest_free = 1;
- iommu->tbl.page_table_map_base = dma_offset;
+ iommu->page_table_map_base = dma_offset;
iommu->dma_addr_mask = dma_mask;
/* Allocate and initialize the free area map. */
sz = (num_tsb_entries + 7) / 8;
sz = (sz + 7UL) & ~7UL;
- iommu->tbl.map = kzalloc(sz, GFP_KERNEL);
- if (!iommu->tbl.map) {
+ iommu->arena.map = kzalloc(sz, GFP_KERNEL);
+ if (!iommu->arena.map) {
printk(KERN_ERR PFX "Error, kmalloc(arena.map) failed.\n");
return -ENOMEM;
}
- iommu_tbl_pool_init(&iommu->tbl, num_tsb_entries, IO_PAGE_SHIFT,
- &dma_4v_iommu_ops, false /* no large_pool */,
- 0 /* default npools */);
- sz = probe_existing_entries(pbm, &iommu->tbl);
+ iommu->arena.limit = num_tsb_entries;
+
+ sz = probe_existing_entries(pbm, iommu);
if (sz)
printk("%s: Imported %lu TSB entries from OBP\n",
pbm->name, sz);
@@ -1001,17 +1015,8 @@ static struct platform_driver pci_sun4v_driver = {
.probe = pci_sun4v_probe,
};
-static void setup_iommu_pool_hash(void)
-{
- unsigned int i;
-
- for_each_possible_cpu(i)
- per_cpu(iommu_pool_hash, i) = hash_32(i, IOMMU_POOL_HASHBITS);
-}
-
static int __init pci_sun4v_init(void)
{
- setup_iommu_pool_hash();
return platform_driver_register(&pci_sun4v_driver);
}