summaryrefslogtreecommitdiff
path: root/arch/mips/mm/tlbex.c
diff options
context:
space:
mode:
authorHuacai Chen <chenhc@lemote.com>2017-03-16 16:00:27 +0300
committerRalf Baechle <ralf@linux-mips.org>2017-03-21 23:58:50 +0300
commit0115f6cbf26663c86496bc56eeea293f85b77897 (patch)
tree3d29261c03de9beac5ba7fa8e07e31a6dc4ec81b /arch/mips/mm/tlbex.c
parent5a34133167dce36666ea054e30a561b7f4413b7f (diff)
downloadlinux-0115f6cbf26663c86496bc56eeea293f85b77897.tar.xz
MIPS: Flush wrong invalid FTLB entry for huge page
On VTLB+FTLB platforms (such as Loongson-3A R2), FTLB's pagesize is usually configured the same as PAGE_SIZE. In such a case, Huge page entry is not suitable to write in FTLB. Unfortunately, when a huge page is created, its page table entries haven't created immediately. Then the TLB refill handler will fetch an invalid page table entry which has no "HUGE" bit, and this entry may be written to FTLB. Since it is invalid, TLB load/store handler will then use tlbwi to write the valid entry at the same place. However, the valid entry is a huge page entry which isn't suitable for FTLB. Our solution is to modify build_huge_handler_tail. Flush the invalid old entry (whether it is in FTLB or VTLB, this is in order to reduce branches) and use tlbwr to write the valid new entry. Signed-off-by: Rui Wang <wangr@lemote.com> Signed-off-by: Huacai Chen <chenhc@lemote.com> Cc: John Crispin <john@phrozen.org> Cc: Steven J . Hill <Steven.Hill@caviumnetworks.com> Cc: Fuxin Zhang <zhangfx@lemote.com> Cc: Zhangjin Wu <wuzhangjin@gmail.com> Cc: Huacai Chen <chenhc@lemote.com> Cc: linux-mips@linux-mips.org Cc: stable@vger.kernel.org Patchwork: https://patchwork.linux-mips.org/patch/15754/ Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
Diffstat (limited to 'arch/mips/mm/tlbex.c')
-rw-r--r--arch/mips/mm/tlbex.c25
1 files changed, 21 insertions, 4 deletions
diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c
index 9bfee8988eaf..4f642e07c2b1 100644
--- a/arch/mips/mm/tlbex.c
+++ b/arch/mips/mm/tlbex.c
@@ -760,7 +760,8 @@ static void build_huge_update_entries(u32 **p, unsigned int pte,
static void build_huge_handler_tail(u32 **p, struct uasm_reloc **r,
struct uasm_label **l,
unsigned int pte,
- unsigned int ptr)
+ unsigned int ptr,
+ unsigned int flush)
{
#ifdef CONFIG_SMP
UASM_i_SC(p, pte, 0, ptr);
@@ -769,6 +770,22 @@ static void build_huge_handler_tail(u32 **p, struct uasm_reloc **r,
#else
UASM_i_SW(p, pte, 0, ptr);
#endif
+ if (cpu_has_ftlb && flush) {
+ BUG_ON(!cpu_has_tlbinv);
+
+ UASM_i_MFC0(p, ptr, C0_ENTRYHI);
+ uasm_i_ori(p, ptr, ptr, MIPS_ENTRYHI_EHINV);
+ UASM_i_MTC0(p, ptr, C0_ENTRYHI);
+ build_tlb_write_entry(p, l, r, tlb_indexed);
+
+ uasm_i_xori(p, ptr, ptr, MIPS_ENTRYHI_EHINV);
+ UASM_i_MTC0(p, ptr, C0_ENTRYHI);
+ build_huge_update_entries(p, pte, ptr);
+ build_huge_tlb_write_entry(p, l, r, pte, tlb_random, 0);
+
+ return;
+ }
+
build_huge_update_entries(p, pte, ptr);
build_huge_tlb_write_entry(p, l, r, pte, tlb_indexed, 0);
}
@@ -2199,7 +2216,7 @@ static void build_r4000_tlb_load_handler(void)
uasm_l_tlbl_goaround2(&l, p);
}
uasm_i_ori(&p, wr.r1, wr.r1, (_PAGE_ACCESSED | _PAGE_VALID));
- build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2);
+ build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2, 1);
#endif
uasm_l_nopage_tlbl(&l, p);
@@ -2254,7 +2271,7 @@ static void build_r4000_tlb_store_handler(void)
build_tlb_probe_entry(&p);
uasm_i_ori(&p, wr.r1, wr.r1,
_PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_VALID | _PAGE_DIRTY);
- build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2);
+ build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2, 1);
#endif
uasm_l_nopage_tlbs(&l, p);
@@ -2310,7 +2327,7 @@ static void build_r4000_tlb_modify_handler(void)
build_tlb_probe_entry(&p);
uasm_i_ori(&p, wr.r1, wr.r1,
_PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_VALID | _PAGE_DIRTY);
- build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2);
+ build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2, 0);
#endif
uasm_l_nopage_tlbm(&l, p);