summaryrefslogtreecommitdiff
path: root/arch/sparc/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/sparc/mm')
-rw-r--r--arch/sparc/mm/Makefile2
-rw-r--r--arch/sparc/mm/execmem.c21
-rw-r--r--arch/sparc/mm/hugetlbpage.c21
-rw-r--r--arch/sparc/mm/init_64.c18
-rw-r--r--arch/sparc/mm/leon_mm.c8
-rw-r--r--arch/sparc/mm/srmmu.c40
-rw-r--r--arch/sparc/mm/tlb.c7
7 files changed, 46 insertions, 71 deletions
diff --git a/arch/sparc/mm/Makefile b/arch/sparc/mm/Makefile
index 809d993f6d88..2d1752108d77 100644
--- a/arch/sparc/mm/Makefile
+++ b/arch/sparc/mm/Makefile
@@ -14,3 +14,5 @@ obj-$(CONFIG_SPARC32) += leon_mm.o
# Only used by sparc64
obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
+
+obj-$(CONFIG_EXECMEM) += execmem.o
diff --git a/arch/sparc/mm/execmem.c b/arch/sparc/mm/execmem.c
new file mode 100644
index 000000000000..0fac97dd5728
--- /dev/null
+++ b/arch/sparc/mm/execmem.c
@@ -0,0 +1,21 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/mm.h>
+#include <linux/execmem.h>
+
+static struct execmem_info execmem_info __ro_after_init;
+
+struct execmem_info __init *execmem_arch_setup(void)
+{
+ execmem_info = (struct execmem_info){
+ .ranges = {
+ [EXECMEM_DEFAULT] = {
+ .start = MODULES_VADDR,
+ .end = MODULES_END,
+ .pgprot = PAGE_KERNEL,
+ .alignment = 1,
+ },
+ },
+ };
+
+ return &execmem_info;
+}
diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c
index b432500c13a5..cc91ca7a1e18 100644
--- a/arch/sparc/mm/hugetlbpage.c
+++ b/arch/sparc/mm/hugetlbpage.c
@@ -31,17 +31,15 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *filp,
{
struct hstate *h = hstate_file(filp);
unsigned long task_size = TASK_SIZE;
- struct vm_unmapped_area_info info;
+ struct vm_unmapped_area_info info = {};
if (test_thread_flag(TIF_32BIT))
task_size = STACK_TOP32;
- info.flags = 0;
info.length = len;
info.low_limit = TASK_UNMAPPED_BASE;
info.high_limit = min(task_size, VA_EXCLUDE_START);
info.align_mask = PAGE_MASK & ~huge_page_mask(h);
- info.align_offset = 0;
addr = vm_unmapped_area(&info);
if ((addr & ~PAGE_MASK) && task_size > VA_EXCLUDE_END) {
@@ -63,7 +61,7 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
struct hstate *h = hstate_file(filp);
struct mm_struct *mm = current->mm;
unsigned long addr = addr0;
- struct vm_unmapped_area_info info;
+ struct vm_unmapped_area_info info = {};
/* This should only ever run for 32-bit processes. */
BUG_ON(!test_thread_flag(TIF_32BIT));
@@ -73,7 +71,6 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
info.low_limit = PAGE_SIZE;
info.high_limit = mm->mmap_base;
info.align_mask = PAGE_MASK & ~huge_page_mask(h);
- info.align_offset = 0;
addr = vm_unmapped_area(&info);
/*
@@ -123,7 +120,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
(!vma || addr + len <= vm_start_gap(vma)))
return addr;
}
- if (mm->get_unmapped_area == arch_get_unmapped_area)
+ if (!test_bit(MMF_TOPDOWN, &mm->flags))
return hugetlb_get_unmapped_area_bottomup(file, addr, len,
pgoff, flags);
else
@@ -407,18 +404,6 @@ pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
return entry;
}
-int pmd_huge(pmd_t pmd)
-{
- return !pmd_none(pmd) &&
- (pmd_val(pmd) & (_PAGE_VALID|_PAGE_PMD_HUGE)) != _PAGE_VALID;
-}
-
-int pud_huge(pud_t pud)
-{
- return !pud_none(pud) &&
- (pud_val(pud) & (_PAGE_VALID|_PAGE_PUD_HUGE)) != _PAGE_VALID;
-}
-
static void hugetlb_free_pte_range(struct mmu_gather *tlb, pmd_t *pmd,
unsigned long addr)
{
diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
index 1ca9054d9b97..21f8cbbd0581 100644
--- a/arch/sparc/mm/init_64.c
+++ b/arch/sparc/mm/init_64.c
@@ -490,7 +490,7 @@ void flush_dcache_folio(struct folio *folio)
}
set_dcache_dirty(folio, this_cpu);
} else {
- /* We could delay the flush for the !page_mapping
+ /* We could delay the flush for the !folio_mapping
* case too. But that case is for exec env/arg
* pages and those are %99 certainly going to get
* faulted into the tlb (and thus flushed) anyways.
@@ -1075,14 +1075,9 @@ static void __init allocate_node_data(int nid)
{
struct pglist_data *p;
unsigned long start_pfn, end_pfn;
-#ifdef CONFIG_NUMA
- NODE_DATA(nid) = memblock_alloc_node(sizeof(struct pglist_data),
- SMP_CACHE_BYTES, nid);
- if (!NODE_DATA(nid)) {
- prom_printf("Cannot allocate pglist_data for nid[%d]\n", nid);
- prom_halt();
- }
+#ifdef CONFIG_NUMA
+ alloc_node_data(nid);
NODE_DATA(nid)->node_id = nid;
#endif
@@ -1115,11 +1110,9 @@ static void init_node_masks_nonnuma(void)
}
#ifdef CONFIG_NUMA
-struct pglist_data *node_data[MAX_NUMNODES];
EXPORT_SYMBOL(numa_cpu_lookup_table);
EXPORT_SYMBOL(numa_cpumask_lookup_table);
-EXPORT_SYMBOL(node_data);
static int scan_pio_for_cfg_handle(struct mdesc_handle *md, u64 pio,
u32 cfg_handle)
@@ -2640,11 +2633,6 @@ int __meminit vmemmap_populate(unsigned long vstart, unsigned long vend,
return 0;
}
-
-void vmemmap_free(unsigned long start, unsigned long end,
- struct vmem_altmap *altmap)
-{
-}
#endif /* CONFIG_SPARSEMEM_VMEMMAP */
/* These are actually filled in at boot time by sun4{u,v}_pgprot_init() */
diff --git a/arch/sparc/mm/leon_mm.c b/arch/sparc/mm/leon_mm.c
index ec61ff1f96b7..1dc9b3d70eda 100644
--- a/arch/sparc/mm/leon_mm.c
+++ b/arch/sparc/mm/leon_mm.c
@@ -39,12 +39,10 @@ unsigned long leon_swprobe(unsigned long vaddr, unsigned long *paddr)
unsigned int ctxtbl;
unsigned int pgd, pmd, ped;
unsigned int ptr;
- unsigned int lvl, pte, paddrbase;
+ unsigned int lvl, pte;
unsigned int ctx;
unsigned int paddr_calc;
- paddrbase = 0;
-
if (srmmu_swprobe_trace)
printk(KERN_INFO "swprobe: trace on\n");
@@ -73,7 +71,6 @@ unsigned long leon_swprobe(unsigned long vaddr, unsigned long *paddr)
printk(KERN_INFO "swprobe: pgd is entry level 3\n");
lvl = 3;
pte = pgd;
- paddrbase = pgd & _SRMMU_PTE_PMASK_LEON;
goto ready;
}
if (((pgd & SRMMU_ET_MASK) != SRMMU_ET_PTD)) {
@@ -96,7 +93,6 @@ unsigned long leon_swprobe(unsigned long vaddr, unsigned long *paddr)
printk(KERN_INFO "swprobe: pmd is entry level 2\n");
lvl = 2;
pte = pmd;
- paddrbase = pmd & _SRMMU_PTE_PMASK_LEON;
goto ready;
}
if (((pmd & SRMMU_ET_MASK) != SRMMU_ET_PTD)) {
@@ -124,7 +120,6 @@ unsigned long leon_swprobe(unsigned long vaddr, unsigned long *paddr)
printk(KERN_INFO "swprobe: ped is entry level 1\n");
lvl = 1;
pte = ped;
- paddrbase = ped & _SRMMU_PTE_PMASK_LEON;
goto ready;
}
if (((ped & SRMMU_ET_MASK) != SRMMU_ET_PTD)) {
@@ -147,7 +142,6 @@ unsigned long leon_swprobe(unsigned long vaddr, unsigned long *paddr)
printk(KERN_INFO "swprobe: ptr is entry level 0\n");
lvl = 0;
pte = ptr;
- paddrbase = ptr & _SRMMU_PTE_PMASK_LEON;
goto ready;
}
if (srmmu_swprobe_trace)
diff --git a/arch/sparc/mm/srmmu.c b/arch/sparc/mm/srmmu.c
index 852085ada368..9df51a62333d 100644
--- a/arch/sparc/mm/srmmu.c
+++ b/arch/sparc/mm/srmmu.c
@@ -1653,13 +1653,15 @@ static void smp_flush_tlb_all(void)
local_ops->tlb_all();
}
+static bool any_other_mm_cpus(struct mm_struct *mm)
+{
+ return cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids;
+}
+
static void smp_flush_cache_mm(struct mm_struct *mm)
{
if (mm->context != NO_CONTEXT) {
- cpumask_t cpu_mask;
- cpumask_copy(&cpu_mask, mm_cpumask(mm));
- cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
- if (!cpumask_empty(&cpu_mask))
+ if (any_other_mm_cpus(mm))
xc1(local_ops->cache_mm, (unsigned long)mm);
local_ops->cache_mm(mm);
}
@@ -1668,10 +1670,7 @@ static void smp_flush_cache_mm(struct mm_struct *mm)
static void smp_flush_tlb_mm(struct mm_struct *mm)
{
if (mm->context != NO_CONTEXT) {
- cpumask_t cpu_mask;
- cpumask_copy(&cpu_mask, mm_cpumask(mm));
- cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
- if (!cpumask_empty(&cpu_mask)) {
+ if (any_other_mm_cpus(mm)) {
xc1(local_ops->tlb_mm, (unsigned long)mm);
if (atomic_read(&mm->mm_users) == 1 && current->active_mm == mm)
cpumask_copy(mm_cpumask(mm),
@@ -1688,10 +1687,7 @@ static void smp_flush_cache_range(struct vm_area_struct *vma,
struct mm_struct *mm = vma->vm_mm;
if (mm->context != NO_CONTEXT) {
- cpumask_t cpu_mask;
- cpumask_copy(&cpu_mask, mm_cpumask(mm));
- cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
- if (!cpumask_empty(&cpu_mask))
+ if (any_other_mm_cpus(mm))
xc3(local_ops->cache_range, (unsigned long)vma, start,
end);
local_ops->cache_range(vma, start, end);
@@ -1705,10 +1701,7 @@ static void smp_flush_tlb_range(struct vm_area_struct *vma,
struct mm_struct *mm = vma->vm_mm;
if (mm->context != NO_CONTEXT) {
- cpumask_t cpu_mask;
- cpumask_copy(&cpu_mask, mm_cpumask(mm));
- cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
- if (!cpumask_empty(&cpu_mask))
+ if (any_other_mm_cpus(mm))
xc3(local_ops->tlb_range, (unsigned long)vma, start,
end);
local_ops->tlb_range(vma, start, end);
@@ -1720,10 +1713,7 @@ static void smp_flush_cache_page(struct vm_area_struct *vma, unsigned long page)
struct mm_struct *mm = vma->vm_mm;
if (mm->context != NO_CONTEXT) {
- cpumask_t cpu_mask;
- cpumask_copy(&cpu_mask, mm_cpumask(mm));
- cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
- if (!cpumask_empty(&cpu_mask))
+ if (any_other_mm_cpus(mm))
xc2(local_ops->cache_page, (unsigned long)vma, page);
local_ops->cache_page(vma, page);
}
@@ -1734,10 +1724,7 @@ static void smp_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
struct mm_struct *mm = vma->vm_mm;
if (mm->context != NO_CONTEXT) {
- cpumask_t cpu_mask;
- cpumask_copy(&cpu_mask, mm_cpumask(mm));
- cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
- if (!cpumask_empty(&cpu_mask))
+ if (any_other_mm_cpus(mm))
xc2(local_ops->tlb_page, (unsigned long)vma, page);
local_ops->tlb_page(vma, page);
}
@@ -1759,10 +1746,7 @@ static void smp_flush_page_to_ram(unsigned long page)
static void smp_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr)
{
- cpumask_t cpu_mask;
- cpumask_copy(&cpu_mask, mm_cpumask(mm));
- cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
- if (!cpumask_empty(&cpu_mask))
+ if (any_other_mm_cpus(mm))
xc2(local_ops->sig_insns, (unsigned long)mm, insn_addr);
local_ops->sig_insns(mm, insn_addr);
}
diff --git a/arch/sparc/mm/tlb.c b/arch/sparc/mm/tlb.c
index b44d79d778c7..8648a50afe88 100644
--- a/arch/sparc/mm/tlb.c
+++ b/arch/sparc/mm/tlb.c
@@ -183,12 +183,12 @@ static void __set_pmd_acct(struct mm_struct *mm, unsigned long addr,
* hugetlb_pte_count.
*/
if (pmd_val(pmd) & _PAGE_PMD_HUGE) {
- if (is_huge_zero_page(pmd_page(pmd)))
+ if (is_huge_zero_pmd(pmd))
mm->context.hugetlb_pte_count++;
else
mm->context.thp_pte_count++;
} else {
- if (is_huge_zero_page(pmd_page(orig)))
+ if (is_huge_zero_pmd(orig))
mm->context.hugetlb_pte_count--;
else
mm->context.thp_pte_count--;
@@ -249,6 +249,7 @@ pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
{
pmd_t old, entry;
+ VM_WARN_ON_ONCE(!pmd_present(*pmdp));
entry = __pmd(pmd_val(*pmdp) & ~_PAGE_VALID);
old = pmdp_establish(vma, address, pmdp, entry);
flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
@@ -259,7 +260,7 @@ pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
* Sanity check pmd before doing the actual decrement.
*/
if ((pmd_val(entry) & _PAGE_PMD_HUGE) &&
- !is_huge_zero_page(pmd_page(entry)))
+ !is_huge_zero_pmd(entry))
(vma->vm_mm)->context.thp_pte_count--;
return old;