summaryrefslogtreecommitdiff
path: root/arch/mips/mm
diff options
context:
space:
mode:
authorRalf Baechle <ralf@linux-mips.org>2015-04-13 17:03:32 +0300
committerRalf Baechle <ralf@linux-mips.org>2015-04-13 17:03:32 +0300
commit3e20a26b02bd4f24945c87407df51948dd488620 (patch)
treef466d3b2a47a98ec2910724e17ee2f3a93c1a49e /arch/mips/mm
parent98b0429b7abd5c05efdb23f3eba02ec3f696748e (diff)
parent5306a5450824691e27d68f711758515debedeeac (diff)
downloadlinux-3e20a26b02bd4f24945c87407df51948dd488620.tar.xz
Merge branch '4.0-fixes' into mips-for-linux-next
Diffstat (limited to 'arch/mips/mm')
-rw-r--r--arch/mips/mm/cache.c39
-rw-r--r--arch/mips/mm/init.c7
-rw-r--r--arch/mips/mm/tlb-r4k.c12
-rw-r--r--arch/mips/mm/tlbex.c115
4 files changed, 119 insertions, 54 deletions
diff --git a/arch/mips/mm/cache.c b/arch/mips/mm/cache.c
index 7e3ea7766822..77d96db8253c 100644
--- a/arch/mips/mm/cache.c
+++ b/arch/mips/mm/cache.c
@@ -119,36 +119,37 @@ void __flush_anon_page(struct page *page, unsigned long vmaddr)
EXPORT_SYMBOL(__flush_anon_page);
-static void mips_flush_dcache_from_pte(pte_t pteval, unsigned long address)
+void __flush_icache_page(struct vm_area_struct *vma, struct page *page)
+{
+ unsigned long addr;
+
+ if (PageHighMem(page))
+ return;
+
+ addr = (unsigned long) page_address(page);
+ flush_data_cache_page(addr);
+}
+EXPORT_SYMBOL_GPL(__flush_icache_page);
+
+void __update_cache(struct vm_area_struct *vma, unsigned long address,
+ pte_t pte)
{
struct page *page;
- unsigned long pfn = pte_pfn(pteval);
+ unsigned long pfn, addr;
+ int exec = (vma->vm_flags & VM_EXEC) && !cpu_has_ic_fills_f_dc;
+ pfn = pte_pfn(pte);
if (unlikely(!pfn_valid(pfn)))
return;
-
page = pfn_to_page(pfn);
if (page_mapping(page) && Page_dcache_dirty(page)) {
- unsigned long page_addr = (unsigned long) page_address(page);
-
- if (!cpu_has_ic_fills_f_dc ||
- pages_do_alias(page_addr, address & PAGE_MASK))
- flush_data_cache_page(page_addr);
+ addr = (unsigned long) page_address(page);
+ if (exec || pages_do_alias(addr, address & PAGE_MASK))
+ flush_data_cache_page(addr);
ClearPageDcacheDirty(page);
}
}
-void set_pte_at(struct mm_struct *mm, unsigned long addr,
- pte_t *ptep, pte_t pteval)
-{
- if (cpu_has_dc_aliases || !cpu_has_ic_fills_f_dc) {
- if (pte_present(pteval))
- mips_flush_dcache_from_pte(pteval, addr);
- }
-
- set_pte(ptep, pteval);
-}
-
unsigned long _page_cachable_default;
EXPORT_SYMBOL(_page_cachable_default);
diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c
index 448cde372af0..faa5c9822ecc 100644
--- a/arch/mips/mm/init.c
+++ b/arch/mips/mm/init.c
@@ -96,7 +96,7 @@ static void *__kmap_pgprot(struct page *page, unsigned long addr, pgprot_t prot)
vaddr = __fix_to_virt(FIX_CMAP_END - idx);
pte = mk_pte(page, prot);
#if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
- entrylo = pte.pte_high;
+ entrylo = pte_to_entrylo(pte.pte_high);
#else
entrylo = pte_to_entrylo(pte_val(pte));
#endif
@@ -106,6 +106,11 @@ static void *__kmap_pgprot(struct page *page, unsigned long addr, pgprot_t prot)
write_c0_entryhi(vaddr & (PAGE_MASK << 1));
write_c0_entrylo0(entrylo);
write_c0_entrylo1(entrylo);
+#ifdef CONFIG_XPA
+ entrylo = (pte.pte_low & _PFNX_MASK);
+ writex_c0_entrylo0(entrylo);
+ writex_c0_entrylo1(entrylo);
+#endif
tlbidx = read_c0_wired();
write_c0_wired(tlbidx + 1);
write_c0_index(tlbidx);
diff --git a/arch/mips/mm/tlb-r4k.c b/arch/mips/mm/tlb-r4k.c
index 37ad381c3e5f..a27a088e6f9f 100644
--- a/arch/mips/mm/tlb-r4k.c
+++ b/arch/mips/mm/tlb-r4k.c
@@ -333,9 +333,17 @@ void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte)
ptep = pte_offset_map(pmdp, address);
#if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
+#ifdef CONFIG_XPA
+ write_c0_entrylo0(pte_to_entrylo(ptep->pte_high));
+ writex_c0_entrylo0(ptep->pte_low & _PFNX_MASK);
+ ptep++;
+ write_c0_entrylo1(pte_to_entrylo(ptep->pte_high));
+ writex_c0_entrylo1(ptep->pte_low & _PFNX_MASK);
+#else
write_c0_entrylo0(ptep->pte_high);
ptep++;
write_c0_entrylo1(ptep->pte_high);
+#endif
#else
write_c0_entrylo0(pte_to_entrylo(pte_val(*ptep++)));
write_c0_entrylo1(pte_to_entrylo(pte_val(*ptep)));
@@ -355,6 +363,9 @@ void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte)
void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1,
unsigned long entryhi, unsigned long pagemask)
{
+#ifdef CONFIG_XPA
+ panic("Broken for XPA kernels");
+#else
unsigned long flags;
unsigned long wired;
unsigned long old_pagemask;
@@ -383,6 +394,7 @@ void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1,
write_c0_pagemask(old_pagemask);
local_flush_tlb_all();
local_irq_restore(flags);
+#endif
}
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c
index 7c7469f56ec3..97c87027c17f 100644
--- a/arch/mips/mm/tlbex.c
+++ b/arch/mips/mm/tlbex.c
@@ -35,6 +35,17 @@
#include <asm/uasm.h>
#include <asm/setup.h>
+static int __cpuinitdata mips_xpa_disabled;
+
+static int __init xpa_disable(char *s)
+{
+ mips_xpa_disabled = 1;
+
+ return 1;
+}
+
+__setup("noxpa", xpa_disable);
+
/*
* TLB load/store/modify handlers.
*
@@ -231,14 +242,14 @@ static void output_pgtable_bits_defines(void)
pr_define("_PAGE_HUGE_SHIFT %d\n", _PAGE_HUGE_SHIFT);
pr_define("_PAGE_SPLITTING_SHIFT %d\n", _PAGE_SPLITTING_SHIFT);
#endif
+#ifdef CONFIG_CPU_MIPSR2
if (cpu_has_rixi) {
#ifdef _PAGE_NO_EXEC_SHIFT
pr_define("_PAGE_NO_EXEC_SHIFT %d\n", _PAGE_NO_EXEC_SHIFT);
-#endif
-#ifdef _PAGE_NO_READ_SHIFT
pr_define("_PAGE_NO_READ_SHIFT %d\n", _PAGE_NO_READ_SHIFT);
#endif
}
+#endif
pr_define("_PAGE_GLOBAL_SHIFT %d\n", _PAGE_GLOBAL_SHIFT);
pr_define("_PAGE_VALID_SHIFT %d\n", _PAGE_VALID_SHIFT);
pr_define("_PAGE_DIRTY_SHIFT %d\n", _PAGE_DIRTY_SHIFT);
@@ -501,26 +512,9 @@ static void build_tlb_write_entry(u32 **p, struct uasm_label **l,
case tlb_indexed: tlbw = uasm_i_tlbwi; break;
}
- if (cpu_has_mips_r2_exec_hazard) {
- /*
- * The architecture spec says an ehb is required here,
- * but a number of cores do not have the hazard and
- * using an ehb causes an expensive pipeline stall.
- */
- switch (current_cpu_type()) {
- case CPU_M14KC:
- case CPU_74K:
- case CPU_1074K:
- case CPU_PROAPTIV:
- case CPU_P5600:
- case CPU_M5150:
- case CPU_QEMU_GENERIC:
- break;
-
- default:
+ if (cpu_has_mips_r2_r6) {
+ if (cpu_has_mips_r2_exec_hazard)
uasm_i_ehb(p);
- break;
- }
tlbw(p);
return;
}
@@ -1028,12 +1022,27 @@ static void build_update_entries(u32 **p, unsigned int tmp, unsigned int ptep)
} else {
int pte_off_even = sizeof(pte_t) / 2;
int pte_off_odd = pte_off_even + sizeof(pte_t);
+#ifdef CONFIG_XPA
+ const int scratch = 1; /* Our extra working register */
- /* The pte entries are pre-shifted */
- uasm_i_lw(p, tmp, pte_off_even, ptep); /* get even pte */
- UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */
- uasm_i_lw(p, ptep, pte_off_odd, ptep); /* get odd pte */
- UASM_i_MTC0(p, ptep, C0_ENTRYLO1); /* load it */
+ uasm_i_addu(p, scratch, 0, ptep);
+#endif
+ uasm_i_lw(p, tmp, pte_off_even, ptep); /* even pte */
+ uasm_i_lw(p, ptep, pte_off_odd, ptep); /* odd pte */
+ UASM_i_ROTR(p, tmp, tmp, ilog2(_PAGE_GLOBAL));
+ UASM_i_ROTR(p, ptep, ptep, ilog2(_PAGE_GLOBAL));
+ UASM_i_MTC0(p, tmp, C0_ENTRYLO0);
+ UASM_i_MTC0(p, ptep, C0_ENTRYLO1);
+#ifdef CONFIG_XPA
+ uasm_i_lw(p, tmp, 0, scratch);
+ uasm_i_lw(p, ptep, sizeof(pte_t), scratch);
+ uasm_i_lui(p, scratch, 0xff);
+ uasm_i_ori(p, scratch, scratch, 0xffff);
+ uasm_i_and(p, tmp, scratch, tmp);
+ uasm_i_and(p, ptep, scratch, ptep);
+ uasm_i_mthc0(p, tmp, C0_ENTRYLO0);
+ uasm_i_mthc0(p, ptep, C0_ENTRYLO1);
+#endif
}
#else
UASM_i_LW(p, tmp, 0, ptep); /* get even pte */
@@ -1534,8 +1543,14 @@ iPTE_SW(u32 **p, struct uasm_reloc **r, unsigned int pte, unsigned int ptr,
{
#ifdef CONFIG_PHYS_ADDR_T_64BIT
unsigned int hwmode = mode & (_PAGE_VALID | _PAGE_DIRTY);
-#endif
+ if (!cpu_has_64bits) {
+ const int scratch = 1; /* Our extra working register */
+
+ uasm_i_lui(p, scratch, (mode >> 16));
+ uasm_i_or(p, pte, pte, scratch);
+ } else
+#endif
uasm_i_ori(p, pte, pte, mode);
#ifdef CONFIG_SMP
# ifdef CONFIG_PHYS_ADDR_T_64BIT
@@ -1599,15 +1614,17 @@ build_pte_present(u32 **p, struct uasm_reloc **r,
uasm_il_bbit0(p, r, pte, ilog2(_PAGE_PRESENT), lid);
uasm_i_nop(p);
} else {
- uasm_i_andi(p, t, pte, _PAGE_PRESENT);
+ uasm_i_srl(p, t, pte, _PAGE_PRESENT_SHIFT);
+ uasm_i_andi(p, t, t, 1);
uasm_il_beqz(p, r, t, lid);
if (pte == t)
/* You lose the SMP race :-(*/
iPTE_LW(p, pte, ptr);
}
} else {
- uasm_i_andi(p, t, pte, _PAGE_PRESENT | _PAGE_READ);
- uasm_i_xori(p, t, t, _PAGE_PRESENT | _PAGE_READ);
+ uasm_i_srl(p, t, pte, _PAGE_PRESENT_SHIFT);
+ uasm_i_andi(p, t, t, 3);
+ uasm_i_xori(p, t, t, 3);
uasm_il_bnez(p, r, t, lid);
if (pte == t)
/* You lose the SMP race :-(*/
@@ -1636,8 +1653,9 @@ build_pte_writable(u32 **p, struct uasm_reloc **r,
{
int t = scratch >= 0 ? scratch : pte;
- uasm_i_andi(p, t, pte, _PAGE_PRESENT | _PAGE_WRITE);
- uasm_i_xori(p, t, t, _PAGE_PRESENT | _PAGE_WRITE);
+ uasm_i_srl(p, t, pte, _PAGE_PRESENT_SHIFT);
+ uasm_i_andi(p, t, t, 5);
+ uasm_i_xori(p, t, t, 5);
uasm_il_bnez(p, r, t, lid);
if (pte == t)
/* You lose the SMP race :-(*/
@@ -1673,7 +1691,8 @@ build_pte_modifiable(u32 **p, struct uasm_reloc **r,
uasm_i_nop(p);
} else {
int t = scratch >= 0 ? scratch : pte;
- uasm_i_andi(p, t, pte, _PAGE_WRITE);
+ uasm_i_srl(p, t, pte, _PAGE_WRITE_SHIFT);
+ uasm_i_andi(p, t, t, 1);
uasm_il_beqz(p, r, t, lid);
if (pte == t)
/* You lose the SMP race :-(*/
@@ -2286,6 +2305,11 @@ static void config_htw_params(void)
pwsize = ilog2(PTRS_PER_PGD) << MIPS_PWSIZE_GDW_SHIFT;
pwsize |= ilog2(PTRS_PER_PTE) << MIPS_PWSIZE_PTW_SHIFT;
+
+ /* If XPA has been enabled, PTEs are 64-bit in size. */
+ if (read_c0_pagegrain() & PG_ELPA)
+ pwsize |= 1;
+
write_c0_pwsize(pwsize);
/* Make sure everything is set before we enable the HTW */
@@ -2299,6 +2323,28 @@ static void config_htw_params(void)
print_htw_config();
}
+static void config_xpa_params(void)
+{
+#ifdef CONFIG_XPA
+ unsigned int pagegrain;
+
+ if (mips_xpa_disabled) {
+ pr_info("Extended Physical Addressing (XPA) disabled\n");
+ return;
+ }
+
+ pagegrain = read_c0_pagegrain();
+ write_c0_pagegrain(pagegrain | PG_ELPA);
+ back_to_back_c0_hazard();
+ pagegrain = read_c0_pagegrain();
+
+ if (pagegrain & PG_ELPA)
+ pr_info("Extended Physical Addressing (XPA) enabled\n");
+ else
+ panic("Extended Physical Addressing (XPA) disabled");
+#endif
+}
+
void build_tlb_refill_handler(void)
{
/*
@@ -2363,8 +2409,9 @@ void build_tlb_refill_handler(void)
}
if (cpu_has_local_ebase)
build_r4000_tlb_refill_handler();
+ if (cpu_has_xpa)
+ config_xpa_params();
if (cpu_has_htw)
config_htw_params();
-
}
}