summaryrefslogtreecommitdiff
path: root/arch/xtensa/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/xtensa/mm')
-rw-r--r--arch/xtensa/mm/cache.c77
-rw-r--r--arch/xtensa/mm/highmem.c41
-rw-r--r--arch/xtensa/mm/misc.S116
-rw-r--r--arch/xtensa/mm/mmu.c38
4 files changed, 173 insertions, 99 deletions
diff --git a/arch/xtensa/mm/cache.c b/arch/xtensa/mm/cache.c
index 63cbb867dadd..d75aa1476da7 100644
--- a/arch/xtensa/mm/cache.c
+++ b/arch/xtensa/mm/cache.c
@@ -59,9 +59,68 @@
*
*/
-#if (DCACHE_WAY_SIZE > PAGE_SIZE) && defined(CONFIG_HIGHMEM)
-#error "HIGHMEM is not supported on cores with aliasing cache."
-#endif
+#if (DCACHE_WAY_SIZE > PAGE_SIZE)
+static inline void kmap_invalidate_coherent(struct page *page,
+ unsigned long vaddr)
+{
+ if (!DCACHE_ALIAS_EQ(page_to_phys(page), vaddr)) {
+ unsigned long kvaddr;
+
+ if (!PageHighMem(page)) {
+ kvaddr = (unsigned long)page_to_virt(page);
+
+ __invalidate_dcache_page(kvaddr);
+ } else {
+ kvaddr = TLBTEMP_BASE_1 +
+ (page_to_phys(page) & DCACHE_ALIAS_MASK);
+
+ __invalidate_dcache_page_alias(kvaddr,
+ page_to_phys(page));
+ }
+ }
+}
+
+static inline void *coherent_kvaddr(struct page *page, unsigned long base,
+ unsigned long vaddr, unsigned long *paddr)
+{
+ if (PageHighMem(page) || !DCACHE_ALIAS_EQ(page_to_phys(page), vaddr)) {
+ *paddr = page_to_phys(page);
+ return (void *)(base + (vaddr & DCACHE_ALIAS_MASK));
+ } else {
+ *paddr = 0;
+ return page_to_virt(page);
+ }
+}
+
+void clear_user_highpage(struct page *page, unsigned long vaddr)
+{
+ unsigned long paddr;
+ void *kvaddr = coherent_kvaddr(page, TLBTEMP_BASE_1, vaddr, &paddr);
+
+ pagefault_disable();
+ kmap_invalidate_coherent(page, vaddr);
+ set_bit(PG_arch_1, &page->flags);
+ clear_page_alias(kvaddr, paddr);
+ pagefault_enable();
+}
+
+void copy_user_highpage(struct page *dst, struct page *src,
+ unsigned long vaddr, struct vm_area_struct *vma)
+{
+ unsigned long dst_paddr, src_paddr;
+ void *dst_vaddr = coherent_kvaddr(dst, TLBTEMP_BASE_1, vaddr,
+ &dst_paddr);
+ void *src_vaddr = coherent_kvaddr(src, TLBTEMP_BASE_2, vaddr,
+ &src_paddr);
+
+ pagefault_disable();
+ kmap_invalidate_coherent(dst, vaddr);
+ set_bit(PG_arch_1, &dst->flags);
+ copy_page_alias(dst_vaddr, src_vaddr, dst_paddr, src_paddr);
+ pagefault_enable();
+}
+
+#endif /* DCACHE_WAY_SIZE > PAGE_SIZE */
#if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK
@@ -103,7 +162,8 @@ void flush_dcache_page(struct page *page)
if (!alias && !mapping)
return;
- __flush_invalidate_dcache_page((long)page_address(page));
+ virt = TLBTEMP_BASE_1 + (phys & DCACHE_ALIAS_MASK);
+ __flush_invalidate_dcache_page_alias(virt, phys);
virt = TLBTEMP_BASE_1 + (temp & DCACHE_ALIAS_MASK);
@@ -168,13 +228,12 @@ update_mmu_cache(struct vm_area_struct * vma, unsigned long addr, pte_t *ptep)
#if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK
if (!PageReserved(page) && test_bit(PG_arch_1, &page->flags)) {
-
- unsigned long paddr = (unsigned long) page_address(page);
unsigned long phys = page_to_phys(page);
- unsigned long tmp = TLBTEMP_BASE_1 + (addr & DCACHE_ALIAS_MASK);
-
- __flush_invalidate_dcache_page(paddr);
+ unsigned long tmp;
+ tmp = TLBTEMP_BASE_1 + (phys & DCACHE_ALIAS_MASK);
+ __flush_invalidate_dcache_page_alias(tmp, phys);
+ tmp = TLBTEMP_BASE_1 + (addr & DCACHE_ALIAS_MASK);
__flush_invalidate_dcache_page_alias(tmp, phys);
__invalidate_icache_page_alias(tmp, phys);
diff --git a/arch/xtensa/mm/highmem.c b/arch/xtensa/mm/highmem.c
index 17a8c0d6fd17..8cfb71ec0937 100644
--- a/arch/xtensa/mm/highmem.c
+++ b/arch/xtensa/mm/highmem.c
@@ -14,23 +14,45 @@
static pte_t *kmap_pte;
+#if DCACHE_WAY_SIZE > PAGE_SIZE
+unsigned int last_pkmap_nr_arr[DCACHE_N_COLORS];
+wait_queue_head_t pkmap_map_wait_arr[DCACHE_N_COLORS];
+
+static void __init kmap_waitqueues_init(void)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(pkmap_map_wait_arr); ++i)
+ init_waitqueue_head(pkmap_map_wait_arr + i);
+}
+#else
+static inline void kmap_waitqueues_init(void)
+{
+}
+#endif
+
+static inline enum fixed_addresses kmap_idx(int type, unsigned long color)
+{
+ return (type + KM_TYPE_NR * smp_processor_id()) * DCACHE_N_COLORS +
+ color;
+}
+
void *kmap_atomic(struct page *page)
{
enum fixed_addresses idx;
unsigned long vaddr;
- int type;
pagefault_disable();
if (!PageHighMem(page))
return page_address(page);
- type = kmap_atomic_idx_push();
- idx = type + KM_TYPE_NR * smp_processor_id();
+ idx = kmap_idx(kmap_atomic_idx_push(),
+ DCACHE_ALIAS(page_to_phys(page)));
vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
#ifdef CONFIG_DEBUG_HIGHMEM
- BUG_ON(!pte_none(*(kmap_pte - idx)));
+ BUG_ON(!pte_none(*(kmap_pte + idx)));
#endif
- set_pte(kmap_pte - idx, mk_pte(page, PAGE_KERNEL_EXEC));
+ set_pte(kmap_pte + idx, mk_pte(page, PAGE_KERNEL_EXEC));
return (void *)vaddr;
}
@@ -38,12 +60,10 @@ EXPORT_SYMBOL(kmap_atomic);
void __kunmap_atomic(void *kvaddr)
{
- int idx, type;
-
if (kvaddr >= (void *)FIXADDR_START &&
kvaddr < (void *)FIXADDR_TOP) {
- type = kmap_atomic_idx();
- idx = type + KM_TYPE_NR * smp_processor_id();
+ int idx = kmap_idx(kmap_atomic_idx(),
+ DCACHE_ALIAS((unsigned long)kvaddr));
/*
* Force other mappings to Oops if they'll try to access this
@@ -51,7 +71,7 @@ void __kunmap_atomic(void *kvaddr)
* is a bad idea also, in case the page changes cacheability
* attributes or becomes a protected page in a hypervisor.
*/
- pte_clear(&init_mm, kvaddr, kmap_pte - idx);
+ pte_clear(&init_mm, kvaddr, kmap_pte + idx);
local_flush_tlb_kernel_range((unsigned long)kvaddr,
(unsigned long)kvaddr + PAGE_SIZE);
@@ -69,4 +89,5 @@ void __init kmap_init(void)
/* cache the first kmap pte */
kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN);
kmap_pte = kmap_get_fixmap_pte(kmap_vstart);
+ kmap_waitqueues_init();
}
diff --git a/arch/xtensa/mm/misc.S b/arch/xtensa/mm/misc.S
index 1f68558dbcc2..11a01c3e9cea 100644
--- a/arch/xtensa/mm/misc.S
+++ b/arch/xtensa/mm/misc.S
@@ -110,41 +110,24 @@ ENTRY(__tlbtemp_mapping_start)
#if (DCACHE_WAY_SIZE > PAGE_SIZE)
/*
- * clear_user_page (void *addr, unsigned long vaddr, struct page *page)
- * a2 a3 a4
+ * clear_page_alias(void *addr, unsigned long paddr)
+ * a2 a3
*/
-ENTRY(clear_user_page)
+ENTRY(clear_page_alias)
entry a1, 32
- /* Mark page dirty and determine alias. */
+ /* Skip setting up a temporary DTLB if not aliased low page. */
- movi a7, (1 << PG_ARCH_1)
- l32i a5, a4, PAGE_FLAGS
- xor a6, a2, a3
- extui a3, a3, PAGE_SHIFT, DCACHE_ALIAS_ORDER
- extui a6, a6, PAGE_SHIFT, DCACHE_ALIAS_ORDER
- or a5, a5, a7
- slli a3, a3, PAGE_SHIFT
- s32i a5, a4, PAGE_FLAGS
+ movi a5, PAGE_OFFSET
+ movi a6, 0
+ beqz a3, 1f
- /* Skip setting up a temporary DTLB if not aliased. */
-
- beqz a6, 1f
-
- /* Invalidate kernel page. */
-
- mov a10, a2
- call8 __invalidate_dcache_page
-
- /* Setup a temporary DTLB with the color of the VPN */
-
- movi a4, ((PAGE_KERNEL | _PAGE_HW_WRITE) - PAGE_OFFSET) & 0xffffffff
- movi a5, TLBTEMP_BASE_1 # virt
- add a6, a2, a4 # ppn
- add a2, a5, a3 # add 'color'
+ /* Setup a temporary DTLB for the addr. */
+ addi a6, a3, (PAGE_KERNEL | _PAGE_HW_WRITE)
+ mov a4, a2
wdtlb a6, a2
dsync
@@ -165,62 +148,43 @@ ENTRY(clear_user_page)
/* We need to invalidate the temporary idtlb entry, if any. */
-1: addi a2, a2, -PAGE_SIZE
- idtlb a2
+1: idtlb a4
dsync
retw
-ENDPROC(clear_user_page)
+ENDPROC(clear_page_alias)
/*
- * copy_page_user (void *to, void *from, unsigned long vaddr, struct page *page)
- * a2 a3 a4 a5
+ * copy_page_alias(void *to, void *from,
+ * a2 a3
+ * unsigned long to_paddr, unsigned long from_paddr)
+ * a4 a5
*/
-ENTRY(copy_user_page)
+ENTRY(copy_page_alias)
entry a1, 32
- /* Mark page dirty and determine alias for destination. */
-
- movi a8, (1 << PG_ARCH_1)
- l32i a9, a5, PAGE_FLAGS
- xor a6, a2, a4
- xor a7, a3, a4
- extui a4, a4, PAGE_SHIFT, DCACHE_ALIAS_ORDER
- extui a6, a6, PAGE_SHIFT, DCACHE_ALIAS_ORDER
- extui a7, a7, PAGE_SHIFT, DCACHE_ALIAS_ORDER
- or a9, a9, a8
- slli a4, a4, PAGE_SHIFT
- s32i a9, a5, PAGE_FLAGS
- movi a5, ((PAGE_KERNEL | _PAGE_HW_WRITE) - PAGE_OFFSET) & 0xffffffff
-
- beqz a6, 1f
-
- /* Invalidate dcache */
-
- mov a10, a2
- call8 __invalidate_dcache_page
+ /* Skip setting up a temporary DTLB for destination if not aliased. */
- /* Setup a temporary DTLB with a matching color. */
+ movi a6, 0
+ movi a7, 0
+ beqz a4, 1f
- movi a8, TLBTEMP_BASE_1 # base
- add a6, a2, a5 # ppn
- add a2, a8, a4 # add 'color'
+ /* Setup a temporary DTLB for destination. */
+ addi a6, a4, (PAGE_KERNEL | _PAGE_HW_WRITE)
wdtlb a6, a2
dsync
- /* Skip setting up a temporary DTLB for destination if not aliased. */
+ /* Skip setting up a temporary DTLB for source if not aliased. */
-1: beqz a7, 1f
+1: beqz a5, 1f
- /* Setup a temporary DTLB with a matching color. */
+ /* Setup a temporary DTLB for source. */
- movi a8, TLBTEMP_BASE_2 # base
- add a7, a3, a5 # ppn
- add a3, a8, a4
+ addi a7, a5, PAGE_KERNEL
addi a8, a3, 1 # way1
wdtlb a7, a8
@@ -271,7 +235,7 @@ ENTRY(copy_user_page)
retw
-ENDPROC(copy_user_page)
+ENDPROC(copy_page_alias)
#endif
@@ -300,6 +264,30 @@ ENTRY(__flush_invalidate_dcache_page_alias)
retw
ENDPROC(__flush_invalidate_dcache_page_alias)
+
+/*
+ * void __invalidate_dcache_page_alias (addr, phys)
+ * a2 a3
+ */
+
+ENTRY(__invalidate_dcache_page_alias)
+
+ entry sp, 16
+
+ movi a7, 0 # required for exception handler
+ addi a6, a3, (PAGE_KERNEL | _PAGE_HW_WRITE)
+ mov a4, a2
+ wdtlb a6, a2
+ dsync
+
+ ___invalidate_dcache_page a2 a3
+
+ idtlb a4
+ dsync
+
+ retw
+
+ENDPROC(__invalidate_dcache_page_alias)
#endif
ENTRY(__tlbtemp_mapping_itlb)
diff --git a/arch/xtensa/mm/mmu.c b/arch/xtensa/mm/mmu.c
index 3429b483d9f8..abe4513eb0dd 100644
--- a/arch/xtensa/mm/mmu.c
+++ b/arch/xtensa/mm/mmu.c
@@ -18,32 +18,38 @@
#include <asm/io.h>
#if defined(CONFIG_HIGHMEM)
-static void * __init init_pmd(unsigned long vaddr)
+static void * __init init_pmd(unsigned long vaddr, unsigned long n_pages)
{
pgd_t *pgd = pgd_offset_k(vaddr);
pmd_t *pmd = pmd_offset(pgd, vaddr);
+ pte_t *pte;
+ unsigned long i;
- if (pmd_none(*pmd)) {
- unsigned i;
- pte_t *pte = alloc_bootmem_low_pages(PAGE_SIZE);
+ n_pages = ALIGN(n_pages, PTRS_PER_PTE);
- for (i = 0; i < 1024; i++)
- pte_clear(NULL, 0, pte + i);
+ pr_debug("%s: vaddr: 0x%08lx, n_pages: %ld\n",
+ __func__, vaddr, n_pages);
- set_pmd(pmd, __pmd(((unsigned long)pte) & PAGE_MASK));
- BUG_ON(pte != pte_offset_kernel(pmd, 0));
- pr_debug("%s: vaddr: 0x%08lx, pmd: 0x%p, pte: 0x%p\n",
- __func__, vaddr, pmd, pte);
- return pte;
- } else {
- return pte_offset_kernel(pmd, 0);
+ pte = alloc_bootmem_low_pages(n_pages * sizeof(pte_t));
+
+ for (i = 0; i < n_pages; ++i)
+ pte_clear(NULL, 0, pte + i);
+
+ for (i = 0; i < n_pages; i += PTRS_PER_PTE, ++pmd) {
+ pte_t *cur_pte = pte + i;
+
+ BUG_ON(!pmd_none(*pmd));
+ set_pmd(pmd, __pmd(((unsigned long)cur_pte) & PAGE_MASK));
+ BUG_ON(cur_pte != pte_offset_kernel(pmd, 0));
+ pr_debug("%s: pmd: 0x%p, pte: 0x%p\n",
+ __func__, pmd, cur_pte);
}
+ return pte;
}
static void __init fixedrange_init(void)
{
- BUILD_BUG_ON(FIXADDR_SIZE > PMD_SIZE);
- init_pmd(__fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK);
+ init_pmd(__fix_to_virt(0), __end_of_fixed_addresses);
}
#endif
@@ -52,7 +58,7 @@ void __init paging_init(void)
memset(swapper_pg_dir, 0, PAGE_SIZE);
#ifdef CONFIG_HIGHMEM
fixedrange_init();
- pkmap_page_table = init_pmd(PKMAP_BASE);
+ pkmap_page_table = init_pmd(PKMAP_BASE, LAST_PKMAP);
kmap_init();
#endif
}