summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJames Hogan <james.hogan@imgtec.com>2016-04-19 11:25:10 +0300
committerRalf Baechle <ralf@linux-mips.org>2016-05-13 16:30:25 +0300
commit4b6f99d307ed6c7a28b952bfb7b66fb26a6a4cf0 (patch)
treeb3a82750ade7072f1c688d8232678ef51521adbb
parent2caa89b49b21737b2b7b6fce37cb323535188b06 (diff)
downloadlinux-4b6f99d307ed6c7a28b952bfb7b66fb26a6a4cf0.tar.xz
MIPS: mm: Don't do MTHC0 if XPA not present
Performing an MTHC0 instruction without XPA being present will trigger a reserved instruction exception, therefore conditionalise the use of this instruction when building TLB handlers (build_update_entries()), and in __update_tlb(). This allows an XPA kernel to run on non XPA hardware without that instruction implemented, just like it can run on XPA capable hardware without XPA in use (with the noxpa kernel argument) or with XPA not configured in hardware. [paul.burton@imgtec.com: - Rebase atop other TLB work. - Add "mm" to subject. - Handle the __kmap_pgprot case.] Fixes: c5b367835cfc ("MIPS: Add support for XPA.") Signed-off-by: James Hogan <james.hogan@imgtec.com> Signed-off-by: Paul Burton <paul.burton@imgtec.com> Cc: Paul Gortmaker <paul.gortmaker@windriver.com> Cc: David Hildenbrand <dahi@linux.vnet.ibm.com> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Jerome Marchand <jmarchan@redhat.com> Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Cc: linux-mips@linux-mips.org Cc: linux-kernel@vger.kernel.org Patchwork: https://patchwork.linux-mips.org/patch/13124/ Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
-rw-r--r--arch/mips/mm/init.c8
-rw-r--r--arch/mips/mm/tlb-r4k.c6
-rw-r--r--arch/mips/mm/tlbex.c16
3 files changed, 19 insertions, 11 deletions
diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c
index 134c988bc61f..9b58eb5fd0d5 100644
--- a/arch/mips/mm/init.c
+++ b/arch/mips/mm/init.c
@@ -112,9 +112,11 @@ static void *__kmap_pgprot(struct page *page, unsigned long addr, pgprot_t prot)
write_c0_entrylo0(entrylo);
write_c0_entrylo1(entrylo);
#ifdef CONFIG_XPA
- entrylo = (pte.pte_low & _PFNX_MASK);
- writex_c0_entrylo0(entrylo);
- writex_c0_entrylo1(entrylo);
+ if (cpu_has_xpa) {
+ entrylo = (pte.pte_low & _PFNX_MASK);
+ writex_c0_entrylo0(entrylo);
+ writex_c0_entrylo1(entrylo);
+ }
#endif
tlbidx = read_c0_wired();
write_c0_wired(tlbidx + 1);
diff --git a/arch/mips/mm/tlb-r4k.c b/arch/mips/mm/tlb-r4k.c
index 61f1f1ba4a17..5a5c7fec645e 100644
--- a/arch/mips/mm/tlb-r4k.c
+++ b/arch/mips/mm/tlb-r4k.c
@@ -339,10 +339,12 @@ void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte)
#if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
#ifdef CONFIG_XPA
write_c0_entrylo0(pte_to_entrylo(ptep->pte_high));
- writex_c0_entrylo0(ptep->pte_low & _PFNX_MASK);
+ if (cpu_has_xpa)
+ writex_c0_entrylo0(ptep->pte_low & _PFNX_MASK);
ptep++;
write_c0_entrylo1(pte_to_entrylo(ptep->pte_high));
- writex_c0_entrylo1(ptep->pte_low & _PFNX_MASK);
+ if (cpu_has_xpa)
+ writex_c0_entrylo1(ptep->pte_low & _PFNX_MASK);
#else
write_c0_entrylo0(ptep->pte_high);
ptep++;
diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c
index 588bde3bdbe7..0efa6211cc40 100644
--- a/arch/mips/mm/tlbex.c
+++ b/arch/mips/mm/tlbex.c
@@ -1030,17 +1030,21 @@ static void build_update_entries(u32 **p, unsigned int tmp, unsigned int ptep)
UASM_i_ROTR(p, tmp, tmp, ilog2(_PAGE_GLOBAL));
UASM_i_MTC0(p, tmp, C0_ENTRYLO0);
- uasm_i_lw(p, tmp, 0, ptep);
- uasm_i_ext(p, tmp, tmp, 0, 24);
- uasm_i_mthc0(p, tmp, C0_ENTRYLO0);
+ if (cpu_has_xpa && !mips_xpa_disabled) {
+ uasm_i_lw(p, tmp, 0, ptep);
+ uasm_i_ext(p, tmp, tmp, 0, 24);
+ uasm_i_mthc0(p, tmp, C0_ENTRYLO0);
+ }
uasm_i_lw(p, tmp, pte_off_odd, ptep); /* odd pte */
UASM_i_ROTR(p, tmp, tmp, ilog2(_PAGE_GLOBAL));
UASM_i_MTC0(p, tmp, C0_ENTRYLO1);
- uasm_i_lw(p, tmp, sizeof(pte_t), ptep);
- uasm_i_ext(p, tmp, tmp, 0, 24);
- uasm_i_mthc0(p, tmp, C0_ENTRYLO1);
+ if (cpu_has_xpa && !mips_xpa_disabled) {
+ uasm_i_lw(p, tmp, sizeof(pte_t), ptep);
+ uasm_i_ext(p, tmp, tmp, 0, 24);
+ uasm_i_mthc0(p, tmp, C0_ENTRYLO1);
+ }
return;
}