From a3182c91ef4e7dda90ff080a4132efd3ecb8786a Mon Sep 17 00:00:00 2001 From: Anup Patel Date: Thu, 25 Apr 2019 08:38:41 +0000 Subject: RISC-V: Access CSRs using CSR numbers We should prefer accessing CSRs using their CSR numbers because: 1. It compiles fine with older toolchains. 2. We can use latest CSR names in #define macro names of CSR numbers as-per RISC-V spec. 3. We can access newly added CSRs even if toolchain does not recognize newly addes CSRs by name. Signed-off-by: Anup Patel Reviewed-by: Christoph Hellwig Signed-off-by: Palmer Dabbelt --- arch/riscv/mm/fault.c | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) (limited to 'arch/riscv/mm') diff --git a/arch/riscv/mm/fault.c b/arch/riscv/mm/fault.c index 88401d5125bc..26293bc053a8 100644 --- a/arch/riscv/mm/fault.c +++ b/arch/riscv/mm/fault.c @@ -239,13 +239,9 @@ vmalloc_fault: * Do _not_ use "tsk->active_mm->pgd" here. * We might be inside an interrupt in the middle * of a task switch. - * - * Note: Use the old spbtr name instead of using the current - * satp name to support binutils 2.29 which doesn't know about - * the privileged ISA 1.10 yet. */ index = pgd_index(addr); - pgd = (pgd_t *)pfn_to_virt(csr_read(sptbr)) + index; + pgd = (pgd_t *)pfn_to_virt(csr_read(CSR_SATP)) + index; pgd_k = init_mm.pgd + index; if (!pgd_present(*pgd_k)) -- cgit v1.2.3 From 58de77545e53b94cd6c816776197dade598632c5 Mon Sep 17 00:00:00 2001 From: Gary Guo Date: Wed, 27 Mar 2019 00:41:25 +0000 Subject: riscv: move flush_icache_{all,mm} to cacheflush.c Currently, flush_icache_all is macro-expanded into a SBI call, yet no asm/sbi.h is included in asm/cacheflush.h. This could be moved to mm/cacheflush.c instead (SBI call will dominate performance-wise and there is no worry to not have it inlined. Currently, flush_icache_mm stays in kernel/smp.c, which looks like a hack to prevent it from being compiled when CONFIG_SMP=n. It should also be in mm/cacheflush.c. Signed-off-by: Gary Guo Signed-off-by: Christoph Hellwig Signed-off-by: Palmer Dabbelt --- arch/riscv/include/asm/cacheflush.h | 2 +- arch/riscv/kernel/smp.c | 49 ----------------------------- arch/riscv/mm/cacheflush.c | 61 +++++++++++++++++++++++++++++++++++++ 3 files changed, 62 insertions(+), 50 deletions(-) (limited to 'arch/riscv/mm') diff --git a/arch/riscv/include/asm/cacheflush.h b/arch/riscv/include/asm/cacheflush.h index 8f13074413a7..1f4ba68ab9aa 100644 --- a/arch/riscv/include/asm/cacheflush.h +++ b/arch/riscv/include/asm/cacheflush.h @@ -47,7 +47,7 @@ static inline void flush_dcache_page(struct page *page) #else /* CONFIG_SMP */ -#define flush_icache_all() sbi_remote_fence_i(NULL) +void flush_icache_all(void); void flush_icache_mm(struct mm_struct *mm, bool local); #endif /* CONFIG_SMP */ diff --git a/arch/riscv/kernel/smp.c b/arch/riscv/kernel/smp.c index 9253de5d91b6..b2537ffa855c 100644 --- a/arch/riscv/kernel/smp.c +++ b/arch/riscv/kernel/smp.c @@ -205,52 +205,3 @@ void smp_send_reschedule(int cpu) send_ipi_message(cpumask_of(cpu), IPI_RESCHEDULE); } -/* - * Performs an icache flush for the given MM context. RISC-V has no direct - * mechanism for instruction cache shoot downs, so instead we send an IPI that - * informs the remote harts they need to flush their local instruction caches. - * To avoid pathologically slow behavior in a common case (a bunch of - * single-hart processes on a many-hart machine, ie 'make -j') we avoid the - * IPIs for harts that are not currently executing a MM context and instead - * schedule a deferred local instruction cache flush to be performed before - * execution resumes on each hart. - */ -void flush_icache_mm(struct mm_struct *mm, bool local) -{ - unsigned int cpu; - cpumask_t others, hmask, *mask; - - preempt_disable(); - - /* Mark every hart's icache as needing a flush for this MM. */ - mask = &mm->context.icache_stale_mask; - cpumask_setall(mask); - /* Flush this hart's I$ now, and mark it as flushed. */ - cpu = smp_processor_id(); - cpumask_clear_cpu(cpu, mask); - local_flush_icache_all(); - - /* - * Flush the I$ of other harts concurrently executing, and mark them as - * flushed. - */ - cpumask_andnot(&others, mm_cpumask(mm), cpumask_of(cpu)); - local |= cpumask_empty(&others); - if (mm != current->active_mm || !local) { - cpumask_clear(&hmask); - riscv_cpuid_to_hartid_mask(&others, &hmask); - sbi_remote_fence_i(hmask.bits); - } else { - /* - * It's assumed that at least one strongly ordered operation is - * performed on this hart between setting a hart's cpumask bit - * and scheduling this MM context on that hart. Sending an SBI - * remote message will do this, but in the case where no - * messages are sent we still need to order this hart's writes - * with flush_icache_deferred(). - */ - smp_mb(); - } - - preempt_enable(); -} diff --git a/arch/riscv/mm/cacheflush.c b/arch/riscv/mm/cacheflush.c index 498c0a0814fe..497b7d07af0c 100644 --- a/arch/riscv/mm/cacheflush.c +++ b/arch/riscv/mm/cacheflush.c @@ -14,6 +14,67 @@ #include #include +#ifdef CONFIG_SMP + +#include + +void flush_icache_all(void) +{ + sbi_remote_fence_i(NULL); +} + +/* + * Performs an icache flush for the given MM context. RISC-V has no direct + * mechanism for instruction cache shoot downs, so instead we send an IPI that + * informs the remote harts they need to flush their local instruction caches. + * To avoid pathologically slow behavior in a common case (a bunch of + * single-hart processes on a many-hart machine, ie 'make -j') we avoid the + * IPIs for harts that are not currently executing a MM context and instead + * schedule a deferred local instruction cache flush to be performed before + * execution resumes on each hart. + */ +void flush_icache_mm(struct mm_struct *mm, bool local) +{ + unsigned int cpu; + cpumask_t others, hmask, *mask; + + preempt_disable(); + + /* Mark every hart's icache as needing a flush for this MM. */ + mask = &mm->context.icache_stale_mask; + cpumask_setall(mask); + /* Flush this hart's I$ now, and mark it as flushed. */ + cpu = smp_processor_id(); + cpumask_clear_cpu(cpu, mask); + local_flush_icache_all(); + + /* + * Flush the I$ of other harts concurrently executing, and mark them as + * flushed. + */ + cpumask_andnot(&others, mm_cpumask(mm), cpumask_of(cpu)); + local |= cpumask_empty(&others); + if (mm != current->active_mm || !local) { + cpumask_clear(&hmask); + riscv_cpuid_to_hartid_mask(&others, &hmask); + sbi_remote_fence_i(hmask.bits); + } else { + /* + * It's assumed that at least one strongly ordered operation is + * performed on this hart between setting a hart's cpumask bit + * and scheduling this MM context on that hart. Sending an SBI + * remote message will do this, but in the case where no + * messages are sent we still need to order this hart's writes + * with flush_icache_deferred(). + */ + smp_mb(); + } + + preempt_enable(); +} + +#endif /* CONFIG_SMP */ + void flush_icache_pte(pte_t pte) { struct page *page = pte_page(pte); -- cgit v1.2.3 From f6635f873a605576fa1983c605655a8721475c22 Mon Sep 17 00:00:00 2001 From: Gary Guo Date: Wed, 27 Mar 2019 00:41:29 +0000 Subject: riscv: move switch_mm to its own file switch_mm is an expensive operations that has two users. flush_icache_deferred is only called within switch_mm and can be moved together. The function is expected to be more complicated when ASID support is added, so clean up eagerly. By moving them to a separate file we also removes some excessive dependency of tlbflush.h and cacheflush.h. Signed-off-by: Gary Guo Reviewed-by: Anup Patel Signed-off-by: Christoph Hellwig Signed-off-by: Palmer Dabbelt --- arch/riscv/include/asm/mmu_context.h | 54 ++-------------------------- arch/riscv/mm/Makefile | 1 + arch/riscv/mm/context.c | 69 ++++++++++++++++++++++++++++++++++++ 3 files changed, 72 insertions(+), 52 deletions(-) create mode 100644 arch/riscv/mm/context.c (limited to 'arch/riscv/mm') diff --git a/arch/riscv/include/asm/mmu_context.h b/arch/riscv/include/asm/mmu_context.h index 98c76c821367..bf4f097a9051 100644 --- a/arch/riscv/include/asm/mmu_context.h +++ b/arch/riscv/include/asm/mmu_context.h @@ -20,8 +20,6 @@ #include #include -#include -#include static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *task) @@ -39,56 +37,8 @@ static inline void destroy_context(struct mm_struct *mm) { } -/* - * When necessary, performs a deferred icache flush for the given MM context, - * on the local CPU. RISC-V has no direct mechanism for instruction cache - * shoot downs, so instead we send an IPI that informs the remote harts they - * need to flush their local instruction caches. To avoid pathologically slow - * behavior in a common case (a bunch of single-hart processes on a many-hart - * machine, ie 'make -j') we avoid the IPIs for harts that are not currently - * executing a MM context and instead schedule a deferred local instruction - * cache flush to be performed before execution resumes on each hart. This - * actually performs that local instruction cache flush, which implicitly only - * refers to the current hart. - */ -static inline void flush_icache_deferred(struct mm_struct *mm) -{ -#ifdef CONFIG_SMP - unsigned int cpu = smp_processor_id(); - cpumask_t *mask = &mm->context.icache_stale_mask; - - if (cpumask_test_cpu(cpu, mask)) { - cpumask_clear_cpu(cpu, mask); - /* - * Ensure the remote hart's writes are visible to this hart. - * This pairs with a barrier in flush_icache_mm. - */ - smp_mb(); - local_flush_icache_all(); - } -#endif -} - -static inline void switch_mm(struct mm_struct *prev, - struct mm_struct *next, struct task_struct *task) -{ - if (likely(prev != next)) { - /* - * Mark the current MM context as inactive, and the next as - * active. This is at least used by the icache flushing - * routines in order to determine who should - */ - unsigned int cpu = smp_processor_id(); - - cpumask_clear_cpu(cpu, mm_cpumask(prev)); - cpumask_set_cpu(cpu, mm_cpumask(next)); - - csr_write(CSR_SATP, virt_to_pfn(next->pgd) | SATP_MODE); - local_flush_tlb_all(); - - flush_icache_deferred(next); - } -} +void switch_mm(struct mm_struct *prev, struct mm_struct *next, + struct task_struct *task); static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next) diff --git a/arch/riscv/mm/Makefile b/arch/riscv/mm/Makefile index b68aac701803..0f1295d8731f 100644 --- a/arch/riscv/mm/Makefile +++ b/arch/riscv/mm/Makefile @@ -9,3 +9,4 @@ obj-y += fault.o obj-y += extable.o obj-y += ioremap.o obj-y += cacheflush.o +obj-y += context.o diff --git a/arch/riscv/mm/context.c b/arch/riscv/mm/context.c new file mode 100644 index 000000000000..89ceb3cbe218 --- /dev/null +++ b/arch/riscv/mm/context.c @@ -0,0 +1,69 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2012 Regents of the University of California + * Copyright (C) 2017 SiFive + */ + +#include +#include +#include + +/* + * When necessary, performs a deferred icache flush for the given MM context, + * on the local CPU. RISC-V has no direct mechanism for instruction cache + * shoot downs, so instead we send an IPI that informs the remote harts they + * need to flush their local instruction caches. To avoid pathologically slow + * behavior in a common case (a bunch of single-hart processes on a many-hart + * machine, ie 'make -j') we avoid the IPIs for harts that are not currently + * executing a MM context and instead schedule a deferred local instruction + * cache flush to be performed before execution resumes on each hart. This + * actually performs that local instruction cache flush, which implicitly only + * refers to the current hart. + */ +static inline void flush_icache_deferred(struct mm_struct *mm) +{ +#ifdef CONFIG_SMP + unsigned int cpu = smp_processor_id(); + cpumask_t *mask = &mm->context.icache_stale_mask; + + if (cpumask_test_cpu(cpu, mask)) { + cpumask_clear_cpu(cpu, mask); + /* + * Ensure the remote hart's writes are visible to this hart. + * This pairs with a barrier in flush_icache_mm. + */ + smp_mb(); + local_flush_icache_all(); + } + +#endif +} + +void switch_mm(struct mm_struct *prev, struct mm_struct *next, + struct task_struct *task) +{ + unsigned int cpu; + + if (unlikely(prev == next)) + return; + + /* + * Mark the current MM context as inactive, and the next as + * active. This is at least used by the icache flushing + * routines in order to determine who should be flushed. + */ + cpu = smp_processor_id(); + + cpumask_clear_cpu(cpu, mm_cpumask(prev)); + cpumask_set_cpu(cpu, mm_cpumask(next)); + + /* + * Use the old spbtr name instead of using the current satp + * name to support binutils 2.29 which doesn't know about the + * privileged ISA 1.10 yet. + */ + csr_write(sptbr, virt_to_pfn(next->pgd) | SATP_MODE); + local_flush_tlb_all(); + + flush_icache_deferred(next); +} -- cgit v1.2.3 From a967a289f16969527a8a41e261695c639a69bee4 Mon Sep 17 00:00:00 2001 From: Yash Shah Date: Mon, 6 May 2019 16:18:40 +0530 Subject: RISC-V: sifive_l2_cache: Add L2 cache controller driver for SiFive SoCs The driver currently supports only SiFive FU540-C000 platform. The initial version of L2 cache controller driver includes: - Initial configuration reporting at boot up. - Support for ECC related functionality. Signed-off-by: Yash Shah Signed-off-by: Palmer Dabbelt --- arch/riscv/include/asm/sifive_l2_cache.h | 16 +++ arch/riscv/mm/Makefile | 1 + arch/riscv/mm/sifive_l2_cache.c | 175 +++++++++++++++++++++++++++++++ 3 files changed, 192 insertions(+) create mode 100644 arch/riscv/include/asm/sifive_l2_cache.h create mode 100644 arch/riscv/mm/sifive_l2_cache.c (limited to 'arch/riscv/mm') diff --git a/arch/riscv/include/asm/sifive_l2_cache.h b/arch/riscv/include/asm/sifive_l2_cache.h new file mode 100644 index 000000000000..04f6748fc50b --- /dev/null +++ b/arch/riscv/include/asm/sifive_l2_cache.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * SiFive L2 Cache Controller header file + * + */ + +#ifndef _ASM_RISCV_SIFIVE_L2_CACHE_H +#define _ASM_RISCV_SIFIVE_L2_CACHE_H + +extern int register_sifive_l2_error_notifier(struct notifier_block *nb); +extern int unregister_sifive_l2_error_notifier(struct notifier_block *nb); + +#define SIFIVE_L2_ERR_TYPE_CE 0 +#define SIFIVE_L2_ERR_TYPE_UE 1 + +#endif /* _ASM_RISCV_SIFIVE_L2_CACHE_H */ diff --git a/arch/riscv/mm/Makefile b/arch/riscv/mm/Makefile index 0f1295d8731f..8db569141485 100644 --- a/arch/riscv/mm/Makefile +++ b/arch/riscv/mm/Makefile @@ -10,3 +10,4 @@ obj-y += extable.o obj-y += ioremap.o obj-y += cacheflush.o obj-y += context.o +obj-y += sifive_l2_cache.o diff --git a/arch/riscv/mm/sifive_l2_cache.c b/arch/riscv/mm/sifive_l2_cache.c new file mode 100644 index 000000000000..4eb64619b3f4 --- /dev/null +++ b/arch/riscv/mm/sifive_l2_cache.c @@ -0,0 +1,175 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * SiFive L2 cache controller Driver + * + * Copyright (C) 2018-2019 SiFive, Inc. + * + */ +#include +#include +#include +#include +#include + +#define SIFIVE_L2_DIRECCFIX_LOW 0x100 +#define SIFIVE_L2_DIRECCFIX_HIGH 0x104 +#define SIFIVE_L2_DIRECCFIX_COUNT 0x108 + +#define SIFIVE_L2_DATECCFIX_LOW 0x140 +#define SIFIVE_L2_DATECCFIX_HIGH 0x144 +#define SIFIVE_L2_DATECCFIX_COUNT 0x148 + +#define SIFIVE_L2_DATECCFAIL_LOW 0x160 +#define SIFIVE_L2_DATECCFAIL_HIGH 0x164 +#define SIFIVE_L2_DATECCFAIL_COUNT 0x168 + +#define SIFIVE_L2_CONFIG 0x00 +#define SIFIVE_L2_WAYENABLE 0x08 +#define SIFIVE_L2_ECCINJECTERR 0x40 + +#define SIFIVE_L2_MAX_ECCINTR 3 + +static void __iomem *l2_base; +static int g_irq[SIFIVE_L2_MAX_ECCINTR]; + +enum { + DIR_CORR = 0, + DATA_CORR, + DATA_UNCORR, +}; + +#ifdef CONFIG_DEBUG_FS +static struct dentry *sifive_test; + +static ssize_t l2_write(struct file *file, const char __user *data, + size_t count, loff_t *ppos) +{ + unsigned int val; + + if (kstrtouint_from_user(data, count, 0, &val)) + return -EINVAL; + if ((val >= 0 && val < 0xFF) || (val >= 0x10000 && val < 0x100FF)) + writel(val, l2_base + SIFIVE_L2_ECCINJECTERR); + else + return -EINVAL; + return count; +} + +static const struct file_operations l2_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .write = l2_write +}; + +static void setup_sifive_debug(void) +{ + sifive_test = debugfs_create_dir("sifive_l2_cache", NULL); + + debugfs_create_file("sifive_debug_inject_error", 0200, + sifive_test, NULL, &l2_fops); +} +#endif + +static void l2_config_read(void) +{ + u32 regval, val; + + regval = readl(l2_base + SIFIVE_L2_CONFIG); + val = regval & 0xFF; + pr_info("L2CACHE: No. of Banks in the cache: %d\n", val); + val = (regval & 0xFF00) >> 8; + pr_info("L2CACHE: No. of ways per bank: %d\n", val); + val = (regval & 0xFF0000) >> 16; + pr_info("L2CACHE: Sets per bank: %llu\n", (uint64_t)1 << val); + val = (regval & 0xFF000000) >> 24; + pr_info("L2CACHE: Bytes per cache block: %llu\n", (uint64_t)1 << val); + + regval = readl(l2_base + SIFIVE_L2_WAYENABLE); + pr_info("L2CACHE: Index of the largest way enabled: %d\n", regval); +} + +static const struct of_device_id sifive_l2_ids[] = { + { .compatible = "sifive,fu540-c000-ccache" }, + { /* end of table */ }, +}; + +static ATOMIC_NOTIFIER_HEAD(l2_err_chain); + +int register_sifive_l2_error_notifier(struct notifier_block *nb) +{ + return atomic_notifier_chain_register(&l2_err_chain, nb); +} +EXPORT_SYMBOL_GPL(register_sifive_l2_error_notifier); + +int unregister_sifive_l2_error_notifier(struct notifier_block *nb) +{ + return atomic_notifier_chain_unregister(&l2_err_chain, nb); +} +EXPORT_SYMBOL_GPL(unregister_sifive_l2_error_notifier); + +static irqreturn_t l2_int_handler(int irq, void *device) +{ + unsigned int regval, add_h, add_l; + + if (irq == g_irq[DIR_CORR]) { + add_h = readl(l2_base + SIFIVE_L2_DIRECCFIX_HIGH); + add_l = readl(l2_base + SIFIVE_L2_DIRECCFIX_LOW); + pr_err("L2CACHE: DirError @ 0x%08X.%08X\n", add_h, add_l); + regval = readl(l2_base + SIFIVE_L2_DIRECCFIX_COUNT); + atomic_notifier_call_chain(&l2_err_chain, SIFIVE_L2_ERR_TYPE_CE, + "DirECCFix"); + } + if (irq == g_irq[DATA_CORR]) { + add_h = readl(l2_base + SIFIVE_L2_DATECCFIX_HIGH); + add_l = readl(l2_base + SIFIVE_L2_DATECCFIX_LOW); + pr_err("L2CACHE: DataError @ 0x%08X.%08X\n", add_h, add_l); + regval = readl(l2_base + SIFIVE_L2_DATECCFIX_COUNT); + atomic_notifier_call_chain(&l2_err_chain, SIFIVE_L2_ERR_TYPE_CE, + "DatECCFix"); + } + if (irq == g_irq[DATA_UNCORR]) { + add_h = readl(l2_base + SIFIVE_L2_DATECCFAIL_HIGH); + add_l = readl(l2_base + SIFIVE_L2_DATECCFAIL_LOW); + pr_err("L2CACHE: DataFail @ 0x%08X.%08X\n", add_h, add_l); + regval = readl(l2_base + SIFIVE_L2_DATECCFAIL_COUNT); + atomic_notifier_call_chain(&l2_err_chain, SIFIVE_L2_ERR_TYPE_UE, + "DatECCFail"); + } + + return IRQ_HANDLED; +} + +int __init sifive_l2_init(void) +{ + struct device_node *np; + struct resource res; + int i, rc; + + np = of_find_matching_node(NULL, sifive_l2_ids); + if (!np) + return -ENODEV; + + if (of_address_to_resource(np, 0, &res)) + return -ENODEV; + + l2_base = ioremap(res.start, resource_size(&res)); + if (!l2_base) + return -ENOMEM; + + for (i = 0; i < SIFIVE_L2_MAX_ECCINTR; i++) { + g_irq[i] = irq_of_parse_and_map(np, i); + rc = request_irq(g_irq[i], l2_int_handler, 0, "l2_ecc", NULL); + if (rc) { + pr_err("L2CACHE: Could not request IRQ %d\n", g_irq[i]); + return rc; + } + } + + l2_config_read(); + +#ifdef CONFIG_DEBUG_FS + setup_sifive_debug(); +#endif + return 0; +} +device_initcall(sifive_l2_init); -- cgit v1.2.3 From 8fef9900d43feb9d5017c72840966733085e3e82 Mon Sep 17 00:00:00 2001 From: Andreas Schwab Date: Tue, 7 May 2019 09:36:46 +0200 Subject: riscv: fix locking violation in page fault handler When a user mode process accesses an address in the vmalloc area do_page_fault tries to unlock the mmap semaphore when it isn't locked. Signed-off-by: Andreas Schwab [Palmer: Duplicated code instead of a goto] Signed-off-by: Palmer Dabbelt --- arch/riscv/mm/fault.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'arch/riscv/mm') diff --git a/arch/riscv/mm/fault.c b/arch/riscv/mm/fault.c index 26293bc053a8..cec8be9e2d6a 100644 --- a/arch/riscv/mm/fault.c +++ b/arch/riscv/mm/fault.c @@ -229,8 +229,9 @@ vmalloc_fault: pte_t *pte_k; int index; + /* User mode accesses just cause a SIGSEGV */ if (user_mode(regs)) - goto bad_area; + return do_trap(regs, SIGSEGV, code, addr, tsk); /* * Synchronize this task's top level page-table -- cgit v1.2.3