summaryrefslogtreecommitdiff
path: root/arch/openrisc/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/openrisc/mm')
-rw-r--r--arch/openrisc/mm/fault.c4
-rw-r--r--arch/openrisc/mm/init.c2
-rw-r--r--arch/openrisc/mm/tlb.c16
3 files changed, 11 insertions, 11 deletions
diff --git a/arch/openrisc/mm/fault.c b/arch/openrisc/mm/fault.c
index e310ab499385..d0021dfae20a 100644
--- a/arch/openrisc/mm/fault.c
+++ b/arch/openrisc/mm/fault.c
@@ -33,7 +33,7 @@ unsigned long pte_errors; /* updated by do_page_fault() */
/* __PHX__ :: - check the vmalloc_fault in do_page_fault()
* - also look into include/asm-or32/mmu_context.h
*/
-volatile pgd_t *current_pgd;
+volatile pgd_t *current_pgd[NR_CPUS];
extern void die(char *, struct pt_regs *, long);
@@ -319,7 +319,7 @@ vmalloc_fault:
phx_mmu("vmalloc_fault");
*/
- pgd = (pgd_t *)current_pgd + offset;
+ pgd = (pgd_t *)current_pgd[smp_processor_id()] + offset;
pgd_k = init_mm.pgd + offset;
/* Since we're two-level, we don't need to do both
diff --git a/arch/openrisc/mm/init.c b/arch/openrisc/mm/init.c
index f67d82b9d22f..6972d5d6f23f 100644
--- a/arch/openrisc/mm/init.c
+++ b/arch/openrisc/mm/init.c
@@ -147,7 +147,7 @@ void __init paging_init(void)
* (even if it is most probably not used until the next
* switch_mm)
*/
- current_pgd = init_mm.pgd;
+ current_pgd[smp_processor_id()] = init_mm.pgd;
end = (unsigned long)__va(max_low_pfn * PAGE_SIZE);
diff --git a/arch/openrisc/mm/tlb.c b/arch/openrisc/mm/tlb.c
index 683bd4d31c7c..6c253a2e86bc 100644
--- a/arch/openrisc/mm/tlb.c
+++ b/arch/openrisc/mm/tlb.c
@@ -49,7 +49,7 @@
*
*/
-void flush_tlb_all(void)
+void local_flush_tlb_all(void)
{
int i;
unsigned long num_tlb_sets;
@@ -86,7 +86,7 @@ void flush_tlb_all(void)
#define flush_itlb_page_no_eir(addr) \
mtspr_off(SPR_ITLBMR_BASE(0), ITLB_OFFSET(addr), 0);
-void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
+void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
{
if (have_dtlbeir)
flush_dtlb_page_eir(addr);
@@ -99,8 +99,8 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
flush_itlb_page_no_eir(addr);
}
-void flush_tlb_range(struct vm_area_struct *vma,
- unsigned long start, unsigned long end)
+void local_flush_tlb_range(struct vm_area_struct *vma,
+ unsigned long start, unsigned long end)
{
int addr;
bool dtlbeir;
@@ -129,13 +129,13 @@ void flush_tlb_range(struct vm_area_struct *vma,
* This should be changed to loop over over mm and call flush_tlb_range.
*/
-void flush_tlb_mm(struct mm_struct *mm)
+void local_flush_tlb_mm(struct mm_struct *mm)
{
/* Was seeing bugs with the mm struct passed to us. Scrapped most of
this function. */
/* Several architctures do this */
- flush_tlb_all();
+ local_flush_tlb_all();
}
/* called in schedule() just before actually doing the switch_to */
@@ -149,14 +149,14 @@ void switch_mm(struct mm_struct *prev, struct mm_struct *next,
* might be invalid at points where we still need to derefer
* the pgd.
*/
- current_pgd = next->pgd;
+ current_pgd[smp_processor_id()] = next->pgd;
/* We don't have context support implemented, so flush all
* entries belonging to previous map
*/
if (prev != next)
- flush_tlb_mm(prev);
+ local_flush_tlb_mm(prev);
}