summaryrefslogtreecommitdiff
path: root/arch/sparc/mm
diff options
context:
space:
mode:
authorBart Van Assche <bvanassche@acm.org>2022-08-30 23:58:42 +0300
committerKees Cook <keescook@chromium.org>2022-09-30 05:21:10 +0300
commit17006e86a7641fa3c50324cfb602f0e74dac8527 (patch)
treeeea78255cce61b84f4f96ceda8293ceed5e2e4b8 /arch/sparc/mm
parent8c86f29bfb18465d15b05cfd26a6454ec787b793 (diff)
downloadlinux-17006e86a7641fa3c50324cfb602f0e74dac8527.tar.xz
sparc: Unbreak the build
Fix the following build errors: arch/sparc/mm/srmmu.c: In function ‘smp_flush_page_for_dma’: arch/sparc/mm/srmmu.c:1639:13: error: cast between incompatible function types from ‘void (*)(long unsigned int)’ to ‘void (*)(long unsigned int, long unsigned int, long unsigned int, long unsigned int, long unsigned int)’ [-Werror=cast-function-type] 1639 | xc1((smpfunc_t) local_ops->page_for_dma, page); | ^ arch/sparc/mm/srmmu.c: In function ‘smp_flush_cache_mm’: arch/sparc/mm/srmmu.c:1662:29: error: cast between incompatible function types from ‘void (*)(struct mm_struct *)’ to ‘void (*)(long unsigned int, long unsigned int, long unsigned int, long unsigned int, long unsigned int)’ [-Werror=cast-function-type] 1662 | xc1((smpfunc_t) local_ops->cache_mm, (unsigned long) mm); | [ ... ] Compile-tested only. Fixes: 552a23a0e5d0 ("Makefile: Enable -Wcast-function-type") Cc: stable@vger.kernel.org Signed-off-by: Bart Van Assche <bvanassche@acm.org> Tested-by: Andreas Larsson <andreas@gaisler.com> Signed-off-by: Kees Cook <keescook@chromium.org> Link: https://lore.kernel.org/r/20220830205854.1918026-1-bvanassche@acm.org
Diffstat (limited to 'arch/sparc/mm')
-rw-r--r--arch/sparc/mm/srmmu.c29
1 files changed, 13 insertions, 16 deletions
diff --git a/arch/sparc/mm/srmmu.c b/arch/sparc/mm/srmmu.c
index a9aa6a92c7fe..13f027afc875 100644
--- a/arch/sparc/mm/srmmu.c
+++ b/arch/sparc/mm/srmmu.c
@@ -1636,19 +1636,19 @@ static void __init get_srmmu_type(void)
/* Local cross-calls. */
static void smp_flush_page_for_dma(unsigned long page)
{
- xc1((smpfunc_t) local_ops->page_for_dma, page);
+ xc1(local_ops->page_for_dma, page);
local_ops->page_for_dma(page);
}
static void smp_flush_cache_all(void)
{
- xc0((smpfunc_t) local_ops->cache_all);
+ xc0(local_ops->cache_all);
local_ops->cache_all();
}
static void smp_flush_tlb_all(void)
{
- xc0((smpfunc_t) local_ops->tlb_all);
+ xc0(local_ops->tlb_all);
local_ops->tlb_all();
}
@@ -1659,7 +1659,7 @@ static void smp_flush_cache_mm(struct mm_struct *mm)
cpumask_copy(&cpu_mask, mm_cpumask(mm));
cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
if (!cpumask_empty(&cpu_mask))
- xc1((smpfunc_t) local_ops->cache_mm, (unsigned long) mm);
+ xc1(local_ops->cache_mm, (unsigned long)mm);
local_ops->cache_mm(mm);
}
}
@@ -1671,7 +1671,7 @@ static void smp_flush_tlb_mm(struct mm_struct *mm)
cpumask_copy(&cpu_mask, mm_cpumask(mm));
cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
if (!cpumask_empty(&cpu_mask)) {
- xc1((smpfunc_t) local_ops->tlb_mm, (unsigned long) mm);
+ xc1(local_ops->tlb_mm, (unsigned long)mm);
if (atomic_read(&mm->mm_users) == 1 && current->active_mm == mm)
cpumask_copy(mm_cpumask(mm),
cpumask_of(smp_processor_id()));
@@ -1691,8 +1691,8 @@ static void smp_flush_cache_range(struct vm_area_struct *vma,
cpumask_copy(&cpu_mask, mm_cpumask(mm));
cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
if (!cpumask_empty(&cpu_mask))
- xc3((smpfunc_t) local_ops->cache_range,
- (unsigned long) vma, start, end);
+ xc3(local_ops->cache_range, (unsigned long)vma, start,
+ end);
local_ops->cache_range(vma, start, end);
}
}
@@ -1708,8 +1708,8 @@ static void smp_flush_tlb_range(struct vm_area_struct *vma,
cpumask_copy(&cpu_mask, mm_cpumask(mm));
cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
if (!cpumask_empty(&cpu_mask))
- xc3((smpfunc_t) local_ops->tlb_range,
- (unsigned long) vma, start, end);
+ xc3(local_ops->tlb_range, (unsigned long)vma, start,
+ end);
local_ops->tlb_range(vma, start, end);
}
}
@@ -1723,8 +1723,7 @@ static void smp_flush_cache_page(struct vm_area_struct *vma, unsigned long page)
cpumask_copy(&cpu_mask, mm_cpumask(mm));
cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
if (!cpumask_empty(&cpu_mask))
- xc2((smpfunc_t) local_ops->cache_page,
- (unsigned long) vma, page);
+ xc2(local_ops->cache_page, (unsigned long)vma, page);
local_ops->cache_page(vma, page);
}
}
@@ -1738,8 +1737,7 @@ static void smp_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
cpumask_copy(&cpu_mask, mm_cpumask(mm));
cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
if (!cpumask_empty(&cpu_mask))
- xc2((smpfunc_t) local_ops->tlb_page,
- (unsigned long) vma, page);
+ xc2(local_ops->tlb_page, (unsigned long)vma, page);
local_ops->tlb_page(vma, page);
}
}
@@ -1753,7 +1751,7 @@ static void smp_flush_page_to_ram(unsigned long page)
* XXX This experiment failed, research further... -DaveM
*/
#if 1
- xc1((smpfunc_t) local_ops->page_to_ram, page);
+ xc1(local_ops->page_to_ram, page);
#endif
local_ops->page_to_ram(page);
}
@@ -1764,8 +1762,7 @@ static void smp_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr)
cpumask_copy(&cpu_mask, mm_cpumask(mm));
cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
if (!cpumask_empty(&cpu_mask))
- xc2((smpfunc_t) local_ops->sig_insns,
- (unsigned long) mm, insn_addr);
+ xc2(local_ops->sig_insns, (unsigned long)mm, insn_addr);
local_ops->sig_insns(mm, insn_addr);
}