summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorRussell King <rmk+kernel@arm.linux.org.uk>2009-12-18 02:22:23 +0300
committerRussell King <rmk+kernel@arm.linux.org.uk>2009-12-18 02:22:23 +0300
commit6665398afafcb1c75d933c1452a9010644aba3e6 (patch)
tree6a6dce2ac7835de25f422330ea224a01eef55635
parentc0caac93f873cd3402b63246bf94d904afc4f5fd (diff)
parentbf32eb85492af197ea5ff20e0be56f667a80584d (diff)
downloadlinux-6665398afafcb1c75d933c1452a9010644aba3e6.tar.xz
Merge branch 'cache' (early part)
-rw-r--r--arch/arm/common/dmabounce.c12
-rw-r--r--arch/arm/include/asm/cacheflush.h17
-rw-r--r--arch/arm/mm/cache-fa.S11
-rw-r--r--arch/arm/mm/cache-l2x0.c93
-rw-r--r--arch/arm/mm/cache-v3.S9
-rw-r--r--arch/arm/mm/cache-v4.S9
-rw-r--r--arch/arm/mm/cache-v4wb.S11
-rw-r--r--arch/arm/mm/cache-v4wt.S11
-rw-r--r--arch/arm/mm/cache-v6.S11
-rw-r--r--arch/arm/mm/cache-v7.S13
-rw-r--r--arch/arm/mm/flush.c4
-rw-r--r--arch/arm/mm/highmem.c2
-rw-r--r--arch/arm/mm/nommu.c2
-rw-r--r--arch/arm/mm/proc-arm1020.S11
-rw-r--r--arch/arm/mm/proc-arm1020e.S11
-rw-r--r--arch/arm/mm/proc-arm1022.S11
-rw-r--r--arch/arm/mm/proc-arm1026.S11
-rw-r--r--arch/arm/mm/proc-arm920.S11
-rw-r--r--arch/arm/mm/proc-arm922.S11
-rw-r--r--arch/arm/mm/proc-arm925.S11
-rw-r--r--arch/arm/mm/proc-arm926.S11
-rw-r--r--arch/arm/mm/proc-arm940.S9
-rw-r--r--arch/arm/mm/proc-arm946.S11
-rw-r--r--arch/arm/mm/proc-feroceon.S15
-rw-r--r--arch/arm/mm/proc-mohawk.S11
-rw-r--r--arch/arm/mm/proc-syms.c3
-rw-r--r--arch/arm/mm/proc-xsc3.S11
-rw-r--r--arch/arm/mm/proc-xscale.S13
-rw-r--r--drivers/mtd/maps/pxa2xx-flash.c13
29 files changed, 224 insertions, 155 deletions
diff --git a/arch/arm/common/dmabounce.c b/arch/arm/common/dmabounce.c
index 5a375e5fef21..bc90364a96c7 100644
--- a/arch/arm/common/dmabounce.c
+++ b/arch/arm/common/dmabounce.c
@@ -308,15 +308,11 @@ static inline void unmap_single(struct device *dev, dma_addr_t dma_addr,
memcpy(ptr, buf->safe, size);
/*
- * DMA buffers must have the same cache properties
- * as if they were really used for DMA - which means
- * data must be written back to RAM. Note that
- * we don't use dmac_flush_range() here for the
- * bidirectional case because we know the cache
- * lines will be coherent with the data written.
+ * Since we may have written to a page cache page,
+ * we need to ensure that the data will be coherent
+ * with user mappings.
*/
- dmac_clean_range(ptr, ptr + size);
- outer_clean_range(__pa(ptr), __pa(ptr) + size);
+ __cpuc_flush_kernel_dcache_area(ptr, size);
}
free_safe_buffer(dev->archdata.dmabounce, buf);
}
diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
index 73eceb87e588..730aefcfbee3 100644
--- a/arch/arm/include/asm/cacheflush.h
+++ b/arch/arm/include/asm/cacheflush.h
@@ -211,7 +211,7 @@ struct cpu_cache_fns {
void (*coherent_kern_range)(unsigned long, unsigned long);
void (*coherent_user_range)(unsigned long, unsigned long);
- void (*flush_kern_dcache_page)(void *);
+ void (*flush_kern_dcache_area)(void *, size_t);
void (*dma_inv_range)(const void *, const void *);
void (*dma_clean_range)(const void *, const void *);
@@ -236,7 +236,7 @@ extern struct cpu_cache_fns cpu_cache;
#define __cpuc_flush_user_range cpu_cache.flush_user_range
#define __cpuc_coherent_kern_range cpu_cache.coherent_kern_range
#define __cpuc_coherent_user_range cpu_cache.coherent_user_range
-#define __cpuc_flush_dcache_page cpu_cache.flush_kern_dcache_page
+#define __cpuc_flush_dcache_area cpu_cache.flush_kern_dcache_area
/*
* These are private to the dma-mapping API. Do not use directly.
@@ -255,14 +255,14 @@ extern struct cpu_cache_fns cpu_cache;
#define __cpuc_flush_user_range __glue(_CACHE,_flush_user_cache_range)
#define __cpuc_coherent_kern_range __glue(_CACHE,_coherent_kern_range)
#define __cpuc_coherent_user_range __glue(_CACHE,_coherent_user_range)
-#define __cpuc_flush_dcache_page __glue(_CACHE,_flush_kern_dcache_page)
+#define __cpuc_flush_dcache_area __glue(_CACHE,_flush_kern_dcache_area)
extern void __cpuc_flush_kern_all(void);
extern void __cpuc_flush_user_all(void);
extern void __cpuc_flush_user_range(unsigned long, unsigned long, unsigned int);
extern void __cpuc_coherent_kern_range(unsigned long, unsigned long);
extern void __cpuc_coherent_user_range(unsigned long, unsigned long);
-extern void __cpuc_flush_dcache_page(void *);
+extern void __cpuc_flush_dcache_area(void *, size_t);
/*
* These are private to the dma-mapping API. Do not use directly.
@@ -448,7 +448,7 @@ static inline void flush_kernel_dcache_page(struct page *page)
{
/* highmem pages are always flushed upon kunmap already */
if ((cache_is_vivt() || cache_is_vipt_aliasing()) && !PageHighMem(page))
- __cpuc_flush_dcache_page(page_address(page));
+ __cpuc_flush_dcache_area(page_address(page), PAGE_SIZE);
}
#define flush_dcache_mmap_lock(mapping) \
@@ -465,13 +465,6 @@ static inline void flush_kernel_dcache_page(struct page *page)
*/
#define flush_icache_page(vma,page) do { } while (0)
-static inline void flush_ioremap_region(unsigned long phys, void __iomem *virt,
- unsigned offset, size_t size)
-{
- const void *start = (void __force *)virt + offset;
- dmac_inv_range(start, start + size);
-}
-
/*
* flush_cache_vmap() is used when creating mappings (eg, via vmap,
* vmalloc, ioremap etc) in kernel space for pages. On non-VIPT
diff --git a/arch/arm/mm/cache-fa.S b/arch/arm/mm/cache-fa.S
index b63a8f7b95cf..a89444a3c016 100644
--- a/arch/arm/mm/cache-fa.S
+++ b/arch/arm/mm/cache-fa.S
@@ -127,15 +127,16 @@ ENTRY(fa_coherent_user_range)
mov pc, lr
/*
- * flush_kern_dcache_page(kaddr)
+ * flush_kern_dcache_area(void *addr, size_t size)
*
* Ensure that the data held in the page kaddr is written back
* to the page in question.
*
- * - kaddr - kernel address (guaranteed to be page aligned)
+ * - addr - kernel address
+ * - size - size of region
*/
-ENTRY(fa_flush_kern_dcache_page)
- add r1, r0, #PAGE_SZ
+ENTRY(fa_flush_kern_dcache_area)
+ add r1, r0, r1
1: mcr p15, 0, r0, c7, c14, 1 @ clean & invalidate D line
add r0, r0, #CACHE_DLINESIZE
cmp r0, r1
@@ -213,7 +214,7 @@ ENTRY(fa_cache_fns)
.long fa_flush_user_cache_range
.long fa_coherent_kern_range
.long fa_coherent_user_range
- .long fa_flush_kern_dcache_page
+ .long fa_flush_kern_dcache_area
.long fa_dma_inv_range
.long fa_dma_clean_range
.long fa_dma_flush_range
diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c
index 747f9a9021bb..cb8fc6573b1b 100644
--- a/arch/arm/mm/cache-l2x0.c
+++ b/arch/arm/mm/cache-l2x0.c
@@ -28,69 +28,120 @@
static void __iomem *l2x0_base;
static DEFINE_SPINLOCK(l2x0_lock);
-static inline void sync_writel(unsigned long val, unsigned long reg,
- unsigned long complete_mask)
+static inline void cache_wait(void __iomem *reg, unsigned long mask)
{
- unsigned long flags;
-
- spin_lock_irqsave(&l2x0_lock, flags);
- writel(val, l2x0_base + reg);
/* wait for the operation to complete */
- while (readl(l2x0_base + reg) & complete_mask)
+ while (readl(reg) & mask)
;
- spin_unlock_irqrestore(&l2x0_lock, flags);
}
static inline void cache_sync(void)
{
- sync_writel(0, L2X0_CACHE_SYNC, 1);
+ void __iomem *base = l2x0_base;
+ writel(0, base + L2X0_CACHE_SYNC);
+ cache_wait(base + L2X0_CACHE_SYNC, 1);
}
static inline void l2x0_inv_all(void)
{
+ unsigned long flags;
+
/* invalidate all ways */
- sync_writel(0xff, L2X0_INV_WAY, 0xff);
+ spin_lock_irqsave(&l2x0_lock, flags);
+ writel(0xff, l2x0_base + L2X0_INV_WAY);
+ cache_wait(l2x0_base + L2X0_INV_WAY, 0xff);
cache_sync();
+ spin_unlock_irqrestore(&l2x0_lock, flags);
}
static void l2x0_inv_range(unsigned long start, unsigned long end)
{
- unsigned long addr;
+ void __iomem *base = l2x0_base;
+ unsigned long flags;
+ spin_lock_irqsave(&l2x0_lock, flags);
if (start & (CACHE_LINE_SIZE - 1)) {
start &= ~(CACHE_LINE_SIZE - 1);
- sync_writel(start, L2X0_CLEAN_INV_LINE_PA, 1);
+ cache_wait(base + L2X0_CLEAN_INV_LINE_PA, 1);
+ writel(start, base + L2X0_CLEAN_INV_LINE_PA);
start += CACHE_LINE_SIZE;
}
if (end & (CACHE_LINE_SIZE - 1)) {
end &= ~(CACHE_LINE_SIZE - 1);
- sync_writel(end, L2X0_CLEAN_INV_LINE_PA, 1);
+ cache_wait(base + L2X0_CLEAN_INV_LINE_PA, 1);
+ writel(end, base + L2X0_CLEAN_INV_LINE_PA);
}
- for (addr = start; addr < end; addr += CACHE_LINE_SIZE)
- sync_writel(addr, L2X0_INV_LINE_PA, 1);
+ while (start < end) {
+ unsigned long blk_end = start + min(end - start, 4096UL);
+
+ while (start < blk_end) {
+ cache_wait(base + L2X0_INV_LINE_PA, 1);
+ writel(start, base + L2X0_INV_LINE_PA);
+ start += CACHE_LINE_SIZE;
+ }
+
+ if (blk_end < end) {
+ spin_unlock_irqrestore(&l2x0_lock, flags);
+ spin_lock_irqsave(&l2x0_lock, flags);
+ }
+ }
+ cache_wait(base + L2X0_INV_LINE_PA, 1);
cache_sync();
+ spin_unlock_irqrestore(&l2x0_lock, flags);
}
static void l2x0_clean_range(unsigned long start, unsigned long end)
{
- unsigned long addr;
+ void __iomem *base = l2x0_base;
+ unsigned long flags;
+ spin_lock_irqsave(&l2x0_lock, flags);
start &= ~(CACHE_LINE_SIZE - 1);
- for (addr = start; addr < end; addr += CACHE_LINE_SIZE)
- sync_writel(addr, L2X0_CLEAN_LINE_PA, 1);
+ while (start < end) {
+ unsigned long blk_end = start + min(end - start, 4096UL);
+
+ while (start < blk_end) {
+ cache_wait(base + L2X0_CLEAN_LINE_PA, 1);
+ writel(start, base + L2X0_CLEAN_LINE_PA);
+ start += CACHE_LINE_SIZE;
+ }
+
+ if (blk_end < end) {
+ spin_unlock_irqrestore(&l2x0_lock, flags);
+ spin_lock_irqsave(&l2x0_lock, flags);
+ }
+ }
+ cache_wait(base + L2X0_CLEAN_LINE_PA, 1);
cache_sync();
+ spin_unlock_irqrestore(&l2x0_lock, flags);
}
static void l2x0_flush_range(unsigned long start, unsigned long end)
{
- unsigned long addr;
+ void __iomem *base = l2x0_base;
+ unsigned long flags;
+ spin_lock_irqsave(&l2x0_lock, flags);
start &= ~(CACHE_LINE_SIZE - 1);
- for (addr = start; addr < end; addr += CACHE_LINE_SIZE)
- sync_writel(addr, L2X0_CLEAN_INV_LINE_PA, 1);
+ while (start < end) {
+ unsigned long blk_end = start + min(end - start, 4096UL);
+
+ while (start < blk_end) {
+ cache_wait(base + L2X0_CLEAN_INV_LINE_PA, 1);
+ writel(start, base + L2X0_CLEAN_INV_LINE_PA);
+ start += CACHE_LINE_SIZE;
+ }
+
+ if (blk_end < end) {
+ spin_unlock_irqrestore(&l2x0_lock, flags);
+ spin_lock_irqsave(&l2x0_lock, flags);
+ }
+ }
+ cache_wait(base + L2X0_CLEAN_INV_LINE_PA, 1);
cache_sync();
+ spin_unlock_irqrestore(&l2x0_lock, flags);
}
void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask)
diff --git a/arch/arm/mm/cache-v3.S b/arch/arm/mm/cache-v3.S
index 8a4abebc478a..2a482731ea36 100644
--- a/arch/arm/mm/cache-v3.S
+++ b/arch/arm/mm/cache-v3.S
@@ -72,14 +72,15 @@ ENTRY(v3_coherent_user_range)
mov pc, lr
/*
- * flush_kern_dcache_page(void *page)
+ * flush_kern_dcache_area(void *page, size_t size)
*
* Ensure no D cache aliasing occurs, either with itself or
* the I cache
*
- * - addr - page aligned address
+ * - addr - kernel address
+ * - size - region size
*/
-ENTRY(v3_flush_kern_dcache_page)
+ENTRY(v3_flush_kern_dcache_area)
/* FALLTHROUGH */
/*
@@ -129,7 +130,7 @@ ENTRY(v3_cache_fns)
.long v3_flush_user_cache_range
.long v3_coherent_kern_range
.long v3_coherent_user_range
- .long v3_flush_kern_dcache_page
+ .long v3_flush_kern_dcache_area
.long v3_dma_inv_range
.long v3_dma_clean_range
.long v3_dma_flush_range
diff --git a/arch/arm/mm/cache-v4.S b/arch/arm/mm/cache-v4.S
index 3668611cb400..5c7da3e372e9 100644
--- a/arch/arm/mm/cache-v4.S
+++ b/arch/arm/mm/cache-v4.S
@@ -82,14 +82,15 @@ ENTRY(v4_coherent_user_range)
mov pc, lr
/*
- * flush_kern_dcache_page(void *page)
+ * flush_kern_dcache_area(void *addr, size_t size)
*
* Ensure no D cache aliasing occurs, either with itself or
* the I cache
*
- * - addr - page aligned address
+ * - addr - kernel address
+ * - size - region size
*/
-ENTRY(v4_flush_kern_dcache_page)
+ENTRY(v4_flush_kern_dcache_area)
/* FALLTHROUGH */
/*
@@ -141,7 +142,7 @@ ENTRY(v4_cache_fns)
.long v4_flush_user_cache_range
.long v4_coherent_kern_range
.long v4_coherent_user_range
- .long v4_flush_kern_dcache_page
+ .long v4_flush_kern_dcache_area
.long v4_dma_inv_range
.long v4_dma_clean_range
.long v4_dma_flush_range
diff --git a/arch/arm/mm/cache-v4wb.S b/arch/arm/mm/cache-v4wb.S
index 2ebc1b3bf856..3dbedf1ec0e7 100644
--- a/arch/arm/mm/cache-v4wb.S
+++ b/arch/arm/mm/cache-v4wb.S
@@ -114,15 +114,16 @@ ENTRY(v4wb_flush_user_cache_range)
mov pc, lr
/*
- * flush_kern_dcache_page(void *page)
+ * flush_kern_dcache_area(void *addr, size_t size)
*
* Ensure no D cache aliasing occurs, either with itself or
* the I cache
*
- * - addr - page aligned address
+ * - addr - kernel address
+ * - size - region size
*/
-ENTRY(v4wb_flush_kern_dcache_page)
- add r1, r0, #PAGE_SZ
+ENTRY(v4wb_flush_kern_dcache_area)
+ add r1, r0, r1
/* fall through */
/*
@@ -224,7 +225,7 @@ ENTRY(v4wb_cache_fns)
.long v4wb_flush_user_cache_range
.long v4wb_coherent_kern_range
.long v4wb_coherent_user_range
- .long v4wb_flush_kern_dcache_page
+ .long v4wb_flush_kern_dcache_area
.long v4wb_dma_inv_range
.long v4wb_dma_clean_range
.long v4wb_dma_flush_range
diff --git a/arch/arm/mm/cache-v4wt.S b/arch/arm/mm/cache-v4wt.S
index c54fa2cc40e6..b3b7410270b4 100644
--- a/arch/arm/mm/cache-v4wt.S
+++ b/arch/arm/mm/cache-v4wt.S
@@ -117,17 +117,18 @@ ENTRY(v4wt_coherent_user_range)
mov pc, lr
/*
- * flush_kern_dcache_page(void *page)
+ * flush_kern_dcache_area(void *addr, size_t size)
*
* Ensure no D cache aliasing occurs, either with itself or
* the I cache
*
- * - addr - page aligned address
+ * - addr - kernel address
+ * - size - region size
*/
-ENTRY(v4wt_flush_kern_dcache_page)
+ENTRY(v4wt_flush_kern_dcache_area)
mov r2, #0
mcr p15, 0, r2, c7, c5, 0 @ invalidate I cache
- add r1, r0, #PAGE_SZ
+ add r1, r0, r1
/* fallthrough */
/*
@@ -180,7 +181,7 @@ ENTRY(v4wt_cache_fns)
.long v4wt_flush_user_cache_range
.long v4wt_coherent_kern_range
.long v4wt_coherent_user_range
- .long v4wt_flush_kern_dcache_page
+ .long v4wt_flush_kern_dcache_area
.long v4wt_dma_inv_range
.long v4wt_dma_clean_range
.long v4wt_dma_flush_range
diff --git a/arch/arm/mm/cache-v6.S b/arch/arm/mm/cache-v6.S
index 295e25dd6381..4ba0a24ce6f5 100644
--- a/arch/arm/mm/cache-v6.S
+++ b/arch/arm/mm/cache-v6.S
@@ -159,15 +159,16 @@ ENDPROC(v6_coherent_user_range)
ENDPROC(v6_coherent_kern_range)
/*
- * v6_flush_kern_dcache_page(kaddr)
+ * v6_flush_kern_dcache_area(void *addr, size_t size)
*
* Ensure that the data held in the page kaddr is written back
* to the page in question.
*
- * - kaddr - kernel address (guaranteed to be page aligned)
+ * - addr - kernel address
+ * - size - region size
*/
-ENTRY(v6_flush_kern_dcache_page)
- add r1, r0, #PAGE_SZ
+ENTRY(v6_flush_kern_dcache_area)
+ add r1, r0, r1
1:
#ifdef HARVARD_CACHE
mcr p15, 0, r0, c7, c14, 1 @ clean & invalidate D line
@@ -271,7 +272,7 @@ ENTRY(v6_cache_fns)
.long v6_flush_user_cache_range
.long v6_coherent_kern_range
.long v6_coherent_user_range
- .long v6_flush_kern_dcache_page
+ .long v6_flush_kern_dcache_area
.long v6_dma_inv_range
.long v6_dma_clean_range
.long v6_dma_flush_range
diff --git a/arch/arm/mm/cache-v7.S b/arch/arm/mm/cache-v7.S
index e1bd9759617f..9073db849fb4 100644
--- a/arch/arm/mm/cache-v7.S
+++ b/arch/arm/mm/cache-v7.S
@@ -186,16 +186,17 @@ ENDPROC(v7_coherent_kern_range)
ENDPROC(v7_coherent_user_range)
/*
- * v7_flush_kern_dcache_page(kaddr)
+ * v7_flush_kern_dcache_area(void *addr, size_t size)
*
* Ensure that the data held in the page kaddr is written back
* to the page in question.
*
- * - kaddr - kernel address (guaranteed to be page aligned)
+ * - addr - kernel address
+ * - size - region size
*/
-ENTRY(v7_flush_kern_dcache_page)
+ENTRY(v7_flush_kern_dcache_area)
dcache_line_size r2, r3
- add r1, r0, #PAGE_SZ
+ add r1, r0, r1
1:
mcr p15, 0, r0, c7, c14, 1 @ clean & invalidate D line / unified line
add r0, r0, r2
@@ -203,7 +204,7 @@ ENTRY(v7_flush_kern_dcache_page)
blo 1b
dsb
mov pc, lr
-ENDPROC(v7_flush_kern_dcache_page)
+ENDPROC(v7_flush_kern_dcache_area)
/*
* v7_dma_inv_range(start,end)
@@ -279,7 +280,7 @@ ENTRY(v7_cache_fns)
.long v7_flush_user_cache_range
.long v7_coherent_kern_range
.long v7_coherent_user_range
- .long v7_flush_kern_dcache_page
+ .long v7_flush_kern_dcache_area
.long v7_dma_inv_range
.long v7_dma_clean_range
.long v7_dma_flush_range
diff --git a/arch/arm/mm/flush.c b/arch/arm/mm/flush.c
index 329594e760cd..6f3a4b7a3b82 100644
--- a/arch/arm/mm/flush.c
+++ b/arch/arm/mm/flush.c
@@ -131,7 +131,7 @@ void __flush_dcache_page(struct address_space *mapping, struct page *page)
*/
if (addr)
#endif
- __cpuc_flush_dcache_page(addr);
+ __cpuc_flush_dcache_area(addr, PAGE_SIZE);
/*
* If this is a page cache page, and we have an aliasing VIPT cache,
@@ -258,5 +258,5 @@ void __flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned l
* in this mapping of the page. FIXME: this is overkill
* since we actually ask for a write-back and invalidate.
*/
- __cpuc_flush_dcache_page(page_address(page));
+ __cpuc_flush_dcache_area(page_address(page), PAGE_SIZE);
}
diff --git a/arch/arm/mm/highmem.c b/arch/arm/mm/highmem.c
index 30f82fb5918c..2be1ec7c1b41 100644
--- a/arch/arm/mm/highmem.c
+++ b/arch/arm/mm/highmem.c
@@ -79,7 +79,7 @@ void kunmap_atomic(void *kvaddr, enum km_type type)
unsigned int idx = type + KM_TYPE_NR * smp_processor_id();
if (kvaddr >= (void *)FIXADDR_START) {
- __cpuc_flush_dcache_page((void *)vaddr);
+ __cpuc_flush_dcache_area((void *)vaddr, PAGE_SIZE);
#ifdef CONFIG_DEBUG_HIGHMEM
BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
set_pte_ext(TOP_PTE(vaddr), __pte(0), 0);
diff --git a/arch/arm/mm/nommu.c b/arch/arm/mm/nommu.c
index 900811cc9130..374a8311bc84 100644
--- a/arch/arm/mm/nommu.c
+++ b/arch/arm/mm/nommu.c
@@ -61,7 +61,7 @@ void setup_mm_for_reboot(char mode)
void flush_dcache_page(struct page *page)
{
- __cpuc_flush_dcache_page(page_address(page));
+ __cpuc_flush_dcache_area(page_address(page), PAGE_SIZE);
}
EXPORT_SYMBOL(flush_dcache_page);
diff --git a/arch/arm/mm/proc-arm1020.S b/arch/arm/mm/proc-arm1020.S
index d9fb4b98c49f..8012e24282b2 100644
--- a/arch/arm/mm/proc-arm1020.S
+++ b/arch/arm/mm/proc-arm1020.S
@@ -231,17 +231,18 @@ ENTRY(arm1020_coherent_user_range)
mov pc, lr
/*
- * flush_kern_dcache_page(void *page)
+ * flush_kern_dcache_area(void *addr, size_t size)
*
* Ensure no D cache aliasing occurs, either with itself or
* the I cache
*
- * - page - page aligned address
+ * - addr - kernel address
+ * - size - region size
*/
-ENTRY(arm1020_flush_kern_dcache_page)
+ENTRY(arm1020_flush_kern_dcache_area)
mov ip, #0
#ifndef CONFIG_CPU_DCACHE_DISABLE
- add r1, r0, #PAGE_SZ
+ add r1, r0, r1
1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry
mcr p15, 0, ip, c7, c10, 4 @ drain WB
add r0, r0, #CACHE_DLINESIZE
@@ -335,7 +336,7 @@ ENTRY(arm1020_cache_fns)
.long arm1020_flush_user_cache_range
.long arm1020_coherent_kern_range
.long arm1020_coherent_user_range
- .long arm1020_flush_kern_dcache_page
+ .long arm1020_flush_kern_dcache_area
.long arm1020_dma_inv_range
.long arm1020_dma_clean_range
.long arm1020_dma_flush_range
diff --git a/arch/arm/mm/proc-arm1020e.S b/arch/arm/mm/proc-arm1020e.S
index 7453b75dcea5..41fe25d234f5 100644
--- a/arch/arm/mm/proc-arm1020e.S
+++ b/arch/arm/mm/proc-arm1020e.S
@@ -225,17 +225,18 @@ ENTRY(arm1020e_coherent_user_range)
mov pc, lr
/*
- * flush_kern_dcache_page(void *page)
+ * flush_kern_dcache_area(void *addr, size_t size)
*
* Ensure no D cache aliasing occurs, either with itself or
* the I cache
*
- * - page - page aligned address
+ * - addr - kernel address
+ * - size - region size
*/
-ENTRY(arm1020e_flush_kern_dcache_page)
+ENTRY(arm1020e_flush_kern_dcache_area)
mov ip, #0
#ifndef CONFIG_CPU_DCACHE_DISABLE
- add r1, r0, #PAGE_SZ
+ add r1, r0, r1
1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry
add r0, r0, #CACHE_DLINESIZE
cmp r0, r1
@@ -321,7 +322,7 @@ ENTRY(arm1020e_cache_fns)
.long arm1020e_flush_user_cache_range
.long arm1020e_coherent_kern_range
.long arm1020e_coherent_user_range
- .long arm1020e_flush_kern_dcache_page
+ .long arm1020e_flush_kern_dcache_area
.long arm1020e_dma_inv_range
.long arm1020e_dma_clean_range
.long arm1020e_dma_flush_range
diff --git a/arch/arm/mm/proc-arm1022.S b/arch/arm/mm/proc-arm1022.S
index 8eb72d75a8b6..20a5b1b31a70 100644
--- a/arch/arm/mm/proc-arm1022.S
+++ b/arch/arm/mm/proc-arm1022.S
@@ -214,17 +214,18 @@ ENTRY(arm1022_coherent_user_range)
mov pc, lr
/*
- * flush_kern_dcache_page(void *page)
+ * flush_kern_dcache_area(void *addr, size_t size)
*
* Ensure no D cache aliasing occurs, either with itself or
* the I cache
*
- * - page - page aligned address
+ * - addr - kernel address
+ * - size - region size
*/
-ENTRY(arm1022_flush_kern_dcache_page)
+ENTRY(arm1022_flush_kern_dcache_area)
mov ip, #0
#ifndef CONFIG_CPU_DCACHE_DISABLE
- add r1, r0, #PAGE_SZ
+ add r1, r0, r1
1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry
add r0, r0, #CACHE_DLINESIZE
cmp r0, r1
@@ -310,7 +311,7 @@ ENTRY(arm1022_cache_fns)
.long arm1022_flush_user_cache_range
.long arm1022_coherent_kern_range
.long arm1022_coherent_user_range
- .long arm1022_flush_kern_dcache_page
+ .long arm1022_flush_kern_dcache_area
.long arm1022_dma_inv_range
.long arm1022_dma_clean_range
.long arm1022_dma_flush_range
diff --git a/arch/arm/mm/proc-arm1026.S b/arch/arm/mm/proc-arm1026.S
index 3b59f0d67139..96aedb10fcc4 100644
--- a/arch/arm/mm/proc-arm1026.S
+++ b/arch/arm/mm/proc-arm1026.S
@@ -208,17 +208,18 @@ ENTRY(arm1026_coherent_user_range)
mov pc, lr
/*
- * flush_kern_dcache_page(void *page)
+ * flush_kern_dcache_area(void *addr, size_t size)
*
* Ensure no D cache aliasing occurs, either with itself or
* the I cache
*
- * - page - page aligned address
+ * - addr - kernel address
+ * - size - region size
*/
-ENTRY(arm1026_flush_kern_dcache_page)
+ENTRY(arm1026_flush_kern_dcache_area)
mov ip, #0
#ifndef CONFIG_CPU_DCACHE_DISABLE
- add r1, r0, #PAGE_SZ
+ add r1, r0, r1
1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry
add r0, r0, #CACHE_DLINESIZE
cmp r0, r1
@@ -304,7 +305,7 @@ ENTRY(arm1026_cache_fns)
.long arm1026_flush_user_cache_range
.long arm1026_coherent_kern_range
.long arm1026_coherent_user_range
- .long arm1026_flush_kern_dcache_page
+ .long arm1026_flush_kern_dcache_area
.long arm1026_dma_inv_range
.long arm1026_dma_clean_range
.long arm1026_dma_flush_range
diff --git a/arch/arm/mm/proc-arm920.S b/arch/arm/mm/proc-arm920.S
index 2b7c197cc58d..471669e2d7cb 100644
--- a/arch/arm/mm/proc-arm920.S
+++ b/arch/arm/mm/proc-arm920.S
@@ -207,15 +207,16 @@ ENTRY(arm920_coherent_user_range)
mov pc, lr
/*
- * flush_kern_dcache_page(void *page)
+ * flush_kern_dcache_area(void *addr, size_t size)
*
* Ensure no D cache aliasing occurs, either with itself or
* the I cache
*
- * - addr - page aligned address
+ * - addr - kernel address
+ * - size - region size
*/
-ENTRY(arm920_flush_kern_dcache_page)
- add r1, r0, #PAGE_SZ
+ENTRY(arm920_flush_kern_dcache_area)
+ add r1, r0, r1
1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry
add r0, r0, #CACHE_DLINESIZE
cmp r0, r1
@@ -293,7 +294,7 @@ ENTRY(arm920_cache_fns)
.long arm920_flush_user_cache_range
.long arm920_coherent_kern_range
.long arm920_coherent_user_range
- .long arm920_flush_kern_dcache_page
+ .long arm920_flush_kern_dcache_area
.long arm920_dma_inv_range
.long arm920_dma_clean_range
.long arm920_dma_flush_range
diff --git a/arch/arm/mm/proc-arm922.S b/arch/arm/mm/proc-arm922.S
index 06a1aa4e3398..ee111b00fa41 100644
--- a/arch/arm/mm/proc-arm922.S
+++ b/arch/arm/mm/proc-arm922.S
@@ -209,15 +209,16 @@ ENTRY(arm922_coherent_user_range)
mov pc, lr
/*
- * flush_kern_dcache_page(void *page)
+ * flush_kern_dcache_area(void *addr, size_t size)
*
* Ensure no D cache aliasing occurs, either with itself or
* the I cache
*
- * - addr - page aligned address
+ * - addr - kernel address
+ * - size - region size
*/
-ENTRY(arm922_flush_kern_dcache_page)
- add r1, r0, #PAGE_SZ
+ENTRY(arm922_flush_kern_dcache_area)
+ add r1, r0, r1
1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry
add r0, r0, #CACHE_DLINESIZE
cmp r0, r1
@@ -295,7 +296,7 @@ ENTRY(arm922_cache_fns)
.long arm922_flush_user_cache_range
.long arm922_coherent_kern_range
.long arm922_coherent_user_range
- .long arm922_flush_kern_dcache_page
+ .long arm922_flush_kern_dcache_area
.long arm922_dma_inv_range
.long arm922_dma_clean_range
.long arm922_dma_flush_range
diff --git a/arch/arm/mm/proc-arm925.S b/arch/arm/mm/proc-arm925.S
index cb53435a85ae..8deb5bde58e4 100644
--- a/arch/arm/mm/proc-arm925.S
+++ b/arch/arm/mm/proc-arm925.S
@@ -251,15 +251,16 @@ ENTRY(arm925_coherent_user_range)
mov pc, lr
/*
- * flush_kern_dcache_page(void *page)
+ * flush_kern_dcache_area(void *addr, size_t size)
*
* Ensure no D cache aliasing occurs, either with itself or
* the I cache
*
- * - addr - page aligned address
+ * - addr - kernel address
+ * - size - region size
*/
-ENTRY(arm925_flush_kern_dcache_page)
- add r1, r0, #PAGE_SZ
+ENTRY(arm925_flush_kern_dcache_area)
+ add r1, r0, r1
1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry
add r0, r0, #CACHE_DLINESIZE
cmp r0, r1
@@ -346,7 +347,7 @@ ENTRY(arm925_cache_fns)
.long arm925_flush_user_cache_range
.long arm925_coherent_kern_range
.long arm925_coherent_user_range
- .long arm925_flush_kern_dcache_page
+ .long arm925_flush_kern_dcache_area
.long arm925_dma_inv_range
.long arm925_dma_clean_range
.long arm925_dma_flush_range
diff --git a/arch/arm/mm/proc-arm926.S b/arch/arm/mm/proc-arm926.S
index 1c4848704bb3..64db6e275a44 100644
--- a/arch/arm/mm/proc-arm926.S
+++ b/arch/arm/mm/proc-arm926.S
@@ -214,15 +214,16 @@ ENTRY(arm926_coherent_user_range)
mov pc, lr
/*
- * flush_kern_dcache_page(void *page)
+ * flush_kern_dcache_area(void *addr, size_t size)
*
* Ensure no D cache aliasing occurs, either with itself or
* the I cache
*
- * - addr - page aligned address
+ * - addr - kernel address
+ * - size - region size
*/
-ENTRY(arm926_flush_kern_dcache_page)
- add r1, r0, #PAGE_SZ
+ENTRY(arm926_flush_kern_dcache_area)
+ add r1, r0, r1
1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry
add r0, r0, #CACHE_DLINESIZE
cmp r0, r1
@@ -309,7 +310,7 @@ ENTRY(arm926_cache_fns)
.long arm926_flush_user_cache_range
.long arm926_coherent_kern_range
.long arm926_coherent_user_range
- .long arm926_flush_kern_dcache_page
+ .long arm926_flush_kern_dcache_area
.long arm926_dma_inv_range
.long arm926_dma_clean_range
.long arm926_dma_flush_range
diff --git a/arch/arm/mm/proc-arm940.S b/arch/arm/mm/proc-arm940.S
index 5b0f8464c8f2..8196b9f401fb 100644
--- a/arch/arm/mm/proc-arm940.S
+++ b/arch/arm/mm/proc-arm940.S
@@ -141,14 +141,15 @@ ENTRY(arm940_coherent_user_range)
/* FALLTHROUGH */
/*
- * flush_kern_dcache_page(void *page)
+ * flush_kern_dcache_area(void *addr, size_t size)
*
* Ensure no D cache aliasing occurs, either with itself or
* the I cache
*
- * - addr - page aligned address
+ * - addr - kernel address
+ * - size - region size
*/
-ENTRY(arm940_flush_kern_dcache_page)
+ENTRY(arm940_flush_kern_dcache_area)
mov ip, #0
mov r1, #(CACHE_DSEGMENTS - 1) << 4 @ 4 segments
1: orr r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries
@@ -238,7 +239,7 @@ ENTRY(arm940_cache_fns)
.long arm940_flush_user_cache_range
.long arm940_coherent_kern_range
.long arm940_coherent_user_range
- .long arm940_flush_kern_dcache_page
+ .long arm940_flush_kern_dcache_area
.long arm940_dma_inv_range
.long arm940_dma_clean_range
.long arm940_dma_flush_range
diff --git a/arch/arm/mm/proc-arm946.S b/arch/arm/mm/proc-arm946.S
index 40c0449a139b..9a951239c86c 100644
--- a/arch/arm/mm/proc-arm946.S
+++ b/arch/arm/mm/proc-arm946.S
@@ -183,16 +183,17 @@ ENTRY(arm946_coherent_user_range)
mov pc, lr
/*
- * flush_kern_dcache_page(void *page)
+ * flush_kern_dcache_area(void *addr, size_t size)
*
* Ensure no D cache aliasing occurs, either with itself or
* the I cache
*
- * - addr - page aligned address
+ * - addr - kernel address
+ * - size - region size
* (same as arm926)
*/
-ENTRY(arm946_flush_kern_dcache_page)
- add r1, r0, #PAGE_SZ
+ENTRY(arm946_flush_kern_dcache_area)
+ add r1, r0, r1
1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry
add r0, r0, #CACHE_DLINESIZE
cmp r0, r1
@@ -280,7 +281,7 @@ ENTRY(arm946_cache_fns)
.long arm946_flush_user_cache_range
.long arm946_coherent_kern_range
.long arm946_coherent_user_range
- .long arm946_flush_kern_dcache_page
+ .long arm946_flush_kern_dcache_area
.long arm946_dma_inv_range
.long arm946_dma_clean_range
.long arm946_dma_flush_range
diff --git a/arch/arm/mm/proc-feroceon.S b/arch/arm/mm/proc-feroceon.S
index d0d7795200fc..dbc39383e66a 100644
--- a/arch/arm/mm/proc-feroceon.S
+++ b/arch/arm/mm/proc-feroceon.S
@@ -226,16 +226,17 @@ ENTRY(feroceon_coherent_user_range)
mov pc, lr
/*
- * flush_kern_dcache_page(void *page)
+ * flush_kern_dcache_area(void *addr, size_t size)
*
* Ensure no D cache aliasing occurs, either with itself or
* the I cache
*
- * - addr - page aligned address
+ * - addr - kernel address
+ * - size - region size
*/
.align 5
-ENTRY(feroceon_flush_kern_dcache_page)
- add r1, r0, #PAGE_SZ
+ENTRY(feroceon_flush_kern_dcache_area)
+ add r1, r0, r1
1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry
add r0, r0, #CACHE_DLINESIZE
cmp r0, r1
@@ -246,7 +247,7 @@ ENTRY(feroceon_flush_kern_dcache_page)
mov pc, lr
.align 5
-ENTRY(feroceon_range_flush_kern_dcache_page)
+ENTRY(feroceon_range_flush_kern_dcache_area)
mrs r2, cpsr
add r1, r0, #PAGE_SZ - CACHE_DLINESIZE @ top addr is inclusive
orr r3, r2, #PSR_I_BIT
@@ -372,7 +373,7 @@ ENTRY(feroceon_cache_fns)
.long feroceon_flush_user_cache_range
.long feroceon_coherent_kern_range
.long feroceon_coherent_user_range
- .long feroceon_flush_kern_dcache_page
+ .long feroceon_flush_kern_dcache_area
.long feroceon_dma_inv_range
.long feroceon_dma_clean_range
.long feroceon_dma_flush_range
@@ -383,7 +384,7 @@ ENTRY(feroceon_range_cache_fns)
.long feroceon_flush_user_cache_range
.long feroceon_coherent_kern_range
.long feroceon_coherent_user_range
- .long feroceon_range_flush_kern_dcache_page
+ .long feroceon_range_flush_kern_dcache_area
.long feroceon_range_dma_inv_range
.long feroceon_range_dma_clean_range
.long feroceon_range_dma_flush_range
diff --git a/arch/arm/mm/proc-mohawk.S b/arch/arm/mm/proc-mohawk.S
index 52b5fd74fbb3..9674d36cc97d 100644
--- a/arch/arm/mm/proc-mohawk.S
+++ b/arch/arm/mm/proc-mohawk.S
@@ -186,15 +186,16 @@ ENTRY(mohawk_coherent_user_range)
mov pc, lr
/*
- * flush_kern_dcache_page(void *page)
+ * flush_kern_dcache_area(void *addr, size_t size)
*
* Ensure no D cache aliasing occurs, either with itself or
* the I cache
*
- * - addr - page aligned address
+ * - addr - kernel address
+ * - size - region size
*/
-ENTRY(mohawk_flush_kern_dcache_page)
- add r1, r0, #PAGE_SZ
+ENTRY(mohawk_flush_kern_dcache_area)
+ add r1, r0, r1
1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry
add r0, r0, #CACHE_DLINESIZE
cmp r0, r1
@@ -273,7 +274,7 @@ ENTRY(mohawk_cache_fns)
.long mohawk_flush_user_cache_range
.long mohawk_coherent_kern_range
.long mohawk_coherent_user_range
- .long mohawk_flush_kern_dcache_page
+ .long mohawk_flush_kern_dcache_area
.long mohawk_dma_inv_range
.long mohawk_dma_clean_range
.long mohawk_dma_flush_range
diff --git a/arch/arm/mm/proc-syms.c b/arch/arm/mm/proc-syms.c
index ac5c80062b70..3e6210b4d6d4 100644
--- a/arch/arm/mm/proc-syms.c
+++ b/arch/arm/mm/proc-syms.c
@@ -27,8 +27,7 @@ EXPORT_SYMBOL(__cpuc_flush_kern_all);
EXPORT_SYMBOL(__cpuc_flush_user_all);
EXPORT_SYMBOL(__cpuc_flush_user_range);
EXPORT_SYMBOL(__cpuc_coherent_kern_range);
-EXPORT_SYMBOL(__cpuc_flush_dcache_page);
-EXPORT_SYMBOL(dmac_inv_range); /* because of flush_ioremap_region() */
+EXPORT_SYMBOL(__cpuc_flush_dcache_area);
#else
EXPORT_SYMBOL(cpu_cache);
#endif
diff --git a/arch/arm/mm/proc-xsc3.S b/arch/arm/mm/proc-xsc3.S
index fab134e29826..96456f548798 100644
--- a/arch/arm/mm/proc-xsc3.S
+++ b/arch/arm/mm/proc-xsc3.S
@@ -226,15 +226,16 @@ ENTRY(xsc3_coherent_user_range)
mov pc, lr
/*
- * flush_kern_dcache_page(void *page)
+ * flush_kern_dcache_area(void *addr, size_t size)
*
* Ensure no D cache aliasing occurs, either with itself or
* the I cache.
*
- * - addr - page aligned address
+ * - addr - kernel address
+ * - size - region size
*/
-ENTRY(xsc3_flush_kern_dcache_page)
- add r1, r0, #PAGE_SZ
+ENTRY(xsc3_flush_kern_dcache_area)
+ add r1, r0, r1
1: mcr p15, 0, r0, c7, c14, 1 @ clean/invalidate L1 D line
add r0, r0, #CACHELINESIZE
cmp r0, r1
@@ -309,7 +310,7 @@ ENTRY(xsc3_cache_fns)
.long xsc3_flush_user_cache_range
.long xsc3_coherent_kern_range
.long xsc3_coherent_user_range
- .long xsc3_flush_kern_dcache_page
+ .long xsc3_flush_kern_dcache_area
.long xsc3_dma_inv_range
.long xsc3_dma_clean_range
.long xsc3_dma_flush_range
diff --git a/arch/arm/mm/proc-xscale.S b/arch/arm/mm/proc-xscale.S
index f056c283682d..93df47265f2d 100644
--- a/arch/arm/mm/proc-xscale.S
+++ b/arch/arm/mm/proc-xscale.S
@@ -284,15 +284,16 @@ ENTRY(xscale_coherent_user_range)
mov pc, lr
/*
- * flush_kern_dcache_page(void *page)
+ * flush_kern_dcache_area(void *addr, size_t size)
*
* Ensure no D cache aliasing occurs, either with itself or
* the I cache
*
- * - addr - page aligned address
+ * - addr - kernel address
+ * - size - region size
*/
-ENTRY(xscale_flush_kern_dcache_page)
- add r1, r0, #PAGE_SZ
+ENTRY(xscale_flush_kern_dcache_area)
+ add r1, r0, r1
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry
add r0, r0, #CACHELINESIZE
@@ -368,7 +369,7 @@ ENTRY(xscale_cache_fns)
.long xscale_flush_user_cache_range
.long xscale_coherent_kern_range
.long xscale_coherent_user_range
- .long xscale_flush_kern_dcache_page
+ .long xscale_flush_kern_dcache_area
.long xscale_dma_inv_range
.long xscale_dma_clean_range
.long xscale_dma_flush_range
@@ -392,7 +393,7 @@ ENTRY(xscale_80200_A0_A1_cache_fns)
.long xscale_flush_user_cache_range
.long xscale_coherent_kern_range
.long xscale_coherent_user_range
- .long xscale_flush_kern_dcache_page
+ .long xscale_flush_kern_dcache_area
.long xscale_dma_flush_range
.long xscale_dma_clean_range
.long xscale_dma_flush_range
diff --git a/drivers/mtd/maps/pxa2xx-flash.c b/drivers/mtd/maps/pxa2xx-flash.c
index 74fa075c838a..b13f6417b5b2 100644
--- a/drivers/mtd/maps/pxa2xx-flash.c
+++ b/drivers/mtd/maps/pxa2xx-flash.c
@@ -20,14 +20,23 @@
#include <asm/io.h>
#include <mach/hardware.h>
-#include <asm/cacheflush.h>
#include <asm/mach/flash.h>
+#define CACHELINESIZE 32
+
static void pxa2xx_map_inval_cache(struct map_info *map, unsigned long from,
ssize_t len)
{
- flush_ioremap_region(map->phys, map->cached, from, len);
+ unsigned long start = (unsigned long)map->cached + from;
+ unsigned long end = start + len;
+
+ start &= ~(CACHELINESIZE - 1);
+ while (start < end) {
+ /* invalidate D cache line */
+ asm volatile ("mcr p15, 0, %0, c7, c6, 1" : : "r" (start));
+ start += CACHELINESIZE;
+ }
}
struct pxa2xx_flash_info {