From e114ba20fe4f2f0a075941a06271e3f0a3539551 Mon Sep 17 00:00:00 2001 From: Paul Burton Date: Wed, 11 Jun 2014 11:00:56 +0100 Subject: MIPS: smp-cps: Convert smp_mb__after_atomic_dec() Commit 91bbefe6b0fc "arch,mips: Convert smp_mb__*()" replaced the smp_mb__after_atomic_dec function with smp_mb__after_atomic, whilst commit 1d8f1f5a780a "MIPS: smp-cps: hotplug support" introduced a new use of it. Replace that use with smp_mb__after_atomic in order to avoid a build failure: arch/mips/kernel/smp-cps.c: In function 'cps_cpu_disable': arch/mips/kernel/smp-cps.c:304:2: error: 'smp_mb__after_atomic_dec' is deprecated (declared at include/linux/atomic.h:35) [-Werror=deprecated-declarations] Signed-off-by: Paul Burton Reviewed-by: Markos Chandras Cc: linux-mips@linux-mips.org Patchwork: https://patchwork.linux-mips.org/patch/7085/ Signed-off-by: Ralf Baechle --- arch/mips/kernel/smp-cps.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/mips/kernel/smp-cps.c b/arch/mips/kernel/smp-cps.c index df0598d9bfdd..949f2c6827a0 100644 --- a/arch/mips/kernel/smp-cps.c +++ b/arch/mips/kernel/smp-cps.c @@ -301,7 +301,7 @@ static int cps_cpu_disable(void) core_cfg = &mips_cps_core_bootcfg[current_cpu_data.core]; atomic_sub(1 << cpu_vpe_id(¤t_cpu_data), &core_cfg->vpe_mask); - smp_mb__after_atomic_dec(); + smp_mb__after_atomic(); set_cpu_online(cpu, false); cpu_clear(cpu, cpu_callin_map); -- cgit v1.2.3 From 7c5491b808d8ea0781c4402792b21a103151135f Mon Sep 17 00:00:00 2001 From: Paul Burton Date: Wed, 11 Jun 2014 11:00:57 +0100 Subject: MIPS: pm-cps: convert smp_mb__*() Commit 91bbefe6b0fc "arch,mips: Convert smp_mb__*()" replaced the smp_mb__* functions with a simpler API, whilst commit 3179d37ee1ed "MIPS: pm-cps: add PM state entry code for CPS systems" introduced new uses of smp_mb__before_atomic_inc & smp_mb__after_clear_bit. Replace those calls with the corresponding before & after atomic functions of the new, simpler API in order to avoid a build failure: arch/mips/kernel/pm-cps.c: In function 'coupled_barrier': arch/mips/kernel/pm-cps.c:104:2: error: 'smp_mb__before_atomic_inc' is deprecated (declared at include/linux/atomic.h:11) [-Werror=deprecated-declarations] arch/mips/kernel/pm-cps.c: In function 'cps_pm_enter_state': arch/mips/kernel/pm-cps.c:161:2: error: 'smp_mb__after_clear_bit' is deprecated (declared at include/linux/bitops.h:48) [-Werror=deprecated-declarations] Signed-off-by: Paul Burton Reviewed-by: Markos Chandras Cc: linux-mips@linux-mips.org Patchwork: https://patchwork.linux-mips.org/patch/7086/ Signed-off-by: Ralf Baechle --- arch/mips/kernel/pm-cps.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/mips/kernel/pm-cps.c b/arch/mips/kernel/pm-cps.c index 5aa4c6f8cf83..c4c2069d3a20 100644 --- a/arch/mips/kernel/pm-cps.c +++ b/arch/mips/kernel/pm-cps.c @@ -101,7 +101,7 @@ static void coupled_barrier(atomic_t *a, unsigned online) if (!coupled_coherence) return; - smp_mb__before_atomic_inc(); + smp_mb__before_atomic(); atomic_inc(a); while (atomic_read(a) < online) @@ -158,7 +158,7 @@ int cps_pm_enter_state(enum cps_pm_state state) /* Indicate that this CPU might not be coherent */ cpumask_clear_cpu(cpu, &cpu_coherent_mask); - smp_mb__after_clear_bit(); + smp_mb__after_atomic(); /* Create a non-coherent mapping of the core ready_count */ core_ready_count = per_cpu(ready_count, core); -- cgit v1.2.3 From 91496ea9f86f55e87be78ecc88c8a6b7e3802601 Mon Sep 17 00:00:00 2001 From: Ralf Baechle Date: Fri, 13 Jun 2014 15:26:14 +0100 Subject: MIPS: math-emu: Work around limitations of older GCC. Older GCC doesn't get named initializations of anonymous structs right, that is members are not initializable in the containing structure through name however old style initializations are working fine. The issue exists with gcc up to 4.5.x. Signed-off-by: Ralf Baechle --- arch/mips/math-emu/ieee754.c | 18 +++++++++++++++--- 1 file changed, 15 insertions(+), 3 deletions(-) (limited to 'arch') diff --git a/arch/mips/math-emu/ieee754.c b/arch/mips/math-emu/ieee754.c index 53f1d2287084..cb9214da372f 100644 --- a/arch/mips/math-emu/ieee754.c +++ b/arch/mips/math-emu/ieee754.c @@ -34,11 +34,17 @@ * Special constants */ +/* + * Older GCC requires the inner braces for initialization of union ieee754dp's + * anonymous struct member. Without an error will result. + */ #define DPCNST(s, b, m) \ { \ - .sign = (s), \ - .bexp = (b) + DP_EBIAS, \ - .mant = (m) \ + { \ + .sign = (s), \ + .bexp = (b) + DP_EBIAS, \ + .mant = (m) \ + } \ } const union ieee754dp __ieee754dp_spcvals[] = { @@ -61,11 +67,17 @@ const union ieee754dp __ieee754dp_spcvals[] = { DPCNST(0, 63, 0x0000000000000ULL), /* + 1.0e63 */ }; +/* + * Older GCC requires the inner braces for initialization of union ieee754sp's + * anonymous struct member. Without an error will result. + */ #define SPCNST(s, b, m) \ { \ + { \ .sign = (s), \ .bexp = (b) + SP_EBIAS, \ .mant = (m) \ + } \ } const union ieee754sp __ieee754sp_spcvals[] = { -- cgit v1.2.3 From fb738f8544d8ceb3599598f3500f33bf6ff2fca4 Mon Sep 17 00:00:00 2001 From: Ralf Baechle Date: Fri, 13 Jun 2014 15:36:45 +0100 Subject: MIPS: math-emu: Reduce code duplication. The fix in the preceeding commit did do exactly the same thing in two places showing some code cleanup was due. Signed-off-by: Ralf Baechle --- arch/mips/math-emu/ieee754.c | 19 ++++++------------- 1 file changed, 6 insertions(+), 13 deletions(-) (limited to 'arch') diff --git a/arch/mips/math-emu/ieee754.c b/arch/mips/math-emu/ieee754.c index cb9214da372f..8e97acbbe22c 100644 --- a/arch/mips/math-emu/ieee754.c +++ b/arch/mips/math-emu/ieee754.c @@ -38,15 +38,18 @@ * Older GCC requires the inner braces for initialization of union ieee754dp's * anonymous struct member. Without an error will result. */ -#define DPCNST(s, b, m) \ +#define xPCNST(s, b, m, ebias) \ { \ { \ .sign = (s), \ - .bexp = (b) + DP_EBIAS, \ + .bexp = (b) + ebias, \ .mant = (m) \ } \ } +#define DPCNST(s, b, m) \ + xPCNST(s, b, m, DP_EBIAS) + const union ieee754dp __ieee754dp_spcvals[] = { DPCNST(0, DP_EMIN - 1, 0x0000000000000ULL), /* + zero */ DPCNST(1, DP_EMIN - 1, 0x0000000000000ULL), /* - zero */ @@ -67,18 +70,8 @@ const union ieee754dp __ieee754dp_spcvals[] = { DPCNST(0, 63, 0x0000000000000ULL), /* + 1.0e63 */ }; -/* - * Older GCC requires the inner braces for initialization of union ieee754sp's - * anonymous struct member. Without an error will result. - */ #define SPCNST(s, b, m) \ -{ \ - { \ - .sign = (s), \ - .bexp = (b) + SP_EBIAS, \ - .mant = (m) \ - } \ -} + xPCNST(s, b, m, SP_EBIAS) const union ieee754sp __ieee754sp_spcvals[] = { SPCNST(0, SP_EMIN - 1, 0x000000), /* + zero */ -- cgit v1.2.3 From 16f77de82f2d2f628306dab9bc4799df0d28a199 Mon Sep 17 00:00:00 2001 From: Paul Burton Date: Wed, 18 Jun 2014 15:00:46 +0100 Subject: Revert "MIPS: Save/restore MSA context around signals" This reverts commit eec43a224cf1 "MIPS: Save/restore MSA context around signals" and the MSA parts of ca750649e08c "MIPS: kernel: signal: Prevent save/restore FPU context in user memory" (the restore path of which appears incorrect anyway...). The reverted patch took care not to break compatibility with userland users of struct sigcontext, but inadvertantly changed the offset of the uc_sigmask field of struct ucontext. Thus Linux v3.15 breaks the userland ABI. The MSA context will need to be saved via some other opt-in mechanism, but for now revert the change to reduce the fallout. This will have minimal impact upon use of MSA since the only supported CPU which includes it (the P5600) is 32-bit and therefore requires that the experimental CONFIG_MIPS_O32_FP64_SUPPORT Kconfig option be selected before the kernel will set FR=1 for a task, a requirement for MSA use. Thus the users of MSA are limited to known small groups of people & this patch won't be breaking any previously working MSA-using userland outside of experimental settings. [ralf@linux-mips.org: Fixed rejects.] Cc: stable@vger.kernel.org Reported-by: Joseph S. Myers Signed-off-by: Paul Burton Cc: linux-mips@linux-mips.org Cc: stable@vger.kernel.org Patchwork: https://patchwork.linux-mips.org/patch/7107/ Signed-off-by: Ralf Baechle --- arch/mips/include/asm/sigcontext.h | 2 - arch/mips/include/uapi/asm/sigcontext.h | 8 -- arch/mips/kernel/asm-offsets.c | 3 - arch/mips/kernel/r4k_fpu.S | 213 -------------------------------- arch/mips/kernel/signal.c | 79 ++---------- arch/mips/kernel/signal32.c | 74 ++--------- 6 files changed, 16 insertions(+), 363 deletions(-) (limited to 'arch') diff --git a/arch/mips/include/asm/sigcontext.h b/arch/mips/include/asm/sigcontext.h index f54bdbe85c0d..eeeb0f48c767 100644 --- a/arch/mips/include/asm/sigcontext.h +++ b/arch/mips/include/asm/sigcontext.h @@ -32,8 +32,6 @@ struct sigcontext32 { __u32 sc_lo2; __u32 sc_hi3; __u32 sc_lo3; - __u64 sc_msaregs[32]; /* Most significant 64 bits */ - __u32 sc_msa_csr; }; #endif /* _MIPS_SIM == _MIPS_SIM_ABI64 || _MIPS_SIM == _MIPS_SIM_NABI32 */ #endif /* _ASM_SIGCONTEXT_H */ diff --git a/arch/mips/include/uapi/asm/sigcontext.h b/arch/mips/include/uapi/asm/sigcontext.h index 681c17603a48..6c9906f59c6e 100644 --- a/arch/mips/include/uapi/asm/sigcontext.h +++ b/arch/mips/include/uapi/asm/sigcontext.h @@ -12,10 +12,6 @@ #include #include -/* Bits which may be set in sc_used_math */ -#define USEDMATH_FP (1 << 0) -#define USEDMATH_MSA (1 << 1) - #if _MIPS_SIM == _MIPS_SIM_ABI32 /* @@ -41,8 +37,6 @@ struct sigcontext { unsigned long sc_lo2; unsigned long sc_hi3; unsigned long sc_lo3; - unsigned long long sc_msaregs[32]; /* Most significant 64 bits */ - unsigned long sc_msa_csr; }; #endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */ @@ -76,8 +70,6 @@ struct sigcontext { __u32 sc_used_math; __u32 sc_dsp; __u32 sc_reserved; - __u64 sc_msaregs[32]; - __u32 sc_msa_csr; }; diff --git a/arch/mips/kernel/asm-offsets.c b/arch/mips/kernel/asm-offsets.c index 02f075df8f2e..4bb5107511e2 100644 --- a/arch/mips/kernel/asm-offsets.c +++ b/arch/mips/kernel/asm-offsets.c @@ -293,7 +293,6 @@ void output_sc_defines(void) OFFSET(SC_LO2, sigcontext, sc_lo2); OFFSET(SC_HI3, sigcontext, sc_hi3); OFFSET(SC_LO3, sigcontext, sc_lo3); - OFFSET(SC_MSAREGS, sigcontext, sc_msaregs); BLANK(); } #endif @@ -308,7 +307,6 @@ void output_sc_defines(void) OFFSET(SC_MDLO, sigcontext, sc_mdlo); OFFSET(SC_PC, sigcontext, sc_pc); OFFSET(SC_FPC_CSR, sigcontext, sc_fpc_csr); - OFFSET(SC_MSAREGS, sigcontext, sc_msaregs); BLANK(); } #endif @@ -320,7 +318,6 @@ void output_sc32_defines(void) OFFSET(SC32_FPREGS, sigcontext32, sc_fpregs); OFFSET(SC32_FPC_CSR, sigcontext32, sc_fpc_csr); OFFSET(SC32_FPC_EIR, sigcontext32, sc_fpc_eir); - OFFSET(SC32_MSAREGS, sigcontext32, sc_msaregs); BLANK(); } #endif diff --git a/arch/mips/kernel/r4k_fpu.S b/arch/mips/kernel/r4k_fpu.S index 71814272d148..8352523568e6 100644 --- a/arch/mips/kernel/r4k_fpu.S +++ b/arch/mips/kernel/r4k_fpu.S @@ -13,7 +13,6 @@ * Copyright (C) 1999, 2001 Silicon Graphics, Inc. */ #include -#include #include #include #include @@ -246,218 +245,6 @@ LEAF(_restore_fp_context32) END(_restore_fp_context32) #endif -#ifdef CONFIG_CPU_HAS_MSA - - .macro save_sc_msareg wr, off, sc, tmp -#ifdef CONFIG_64BIT - copy_u_d \tmp, \wr, 1 - EX sd \tmp, (\off+(\wr*8))(\sc) -#elif defined(CONFIG_CPU_LITTLE_ENDIAN) - copy_u_w \tmp, \wr, 2 - EX sw \tmp, (\off+(\wr*8)+0)(\sc) - copy_u_w \tmp, \wr, 3 - EX sw \tmp, (\off+(\wr*8)+4)(\sc) -#else /* CONFIG_CPU_BIG_ENDIAN */ - copy_u_w \tmp, \wr, 2 - EX sw \tmp, (\off+(\wr*8)+4)(\sc) - copy_u_w \tmp, \wr, 3 - EX sw \tmp, (\off+(\wr*8)+0)(\sc) -#endif - .endm - -/* - * int _save_msa_context(struct sigcontext *sc) - * - * Save the upper 64 bits of each vector register along with the MSA_CSR - * register into sc. Returns zero on success, else non-zero. - */ -LEAF(_save_msa_context) - save_sc_msareg 0, SC_MSAREGS, a0, t0 - save_sc_msareg 1, SC_MSAREGS, a0, t0 - save_sc_msareg 2, SC_MSAREGS, a0, t0 - save_sc_msareg 3, SC_MSAREGS, a0, t0 - save_sc_msareg 4, SC_MSAREGS, a0, t0 - save_sc_msareg 5, SC_MSAREGS, a0, t0 - save_sc_msareg 6, SC_MSAREGS, a0, t0 - save_sc_msareg 7, SC_MSAREGS, a0, t0 - save_sc_msareg 8, SC_MSAREGS, a0, t0 - save_sc_msareg 9, SC_MSAREGS, a0, t0 - save_sc_msareg 10, SC_MSAREGS, a0, t0 - save_sc_msareg 11, SC_MSAREGS, a0, t0 - save_sc_msareg 12, SC_MSAREGS, a0, t0 - save_sc_msareg 13, SC_MSAREGS, a0, t0 - save_sc_msareg 14, SC_MSAREGS, a0, t0 - save_sc_msareg 15, SC_MSAREGS, a0, t0 - save_sc_msareg 16, SC_MSAREGS, a0, t0 - save_sc_msareg 17, SC_MSAREGS, a0, t0 - save_sc_msareg 18, SC_MSAREGS, a0, t0 - save_sc_msareg 19, SC_MSAREGS, a0, t0 - save_sc_msareg 20, SC_MSAREGS, a0, t0 - save_sc_msareg 21, SC_MSAREGS, a0, t0 - save_sc_msareg 22, SC_MSAREGS, a0, t0 - save_sc_msareg 23, SC_MSAREGS, a0, t0 - save_sc_msareg 24, SC_MSAREGS, a0, t0 - save_sc_msareg 25, SC_MSAREGS, a0, t0 - save_sc_msareg 26, SC_MSAREGS, a0, t0 - save_sc_msareg 27, SC_MSAREGS, a0, t0 - save_sc_msareg 28, SC_MSAREGS, a0, t0 - save_sc_msareg 29, SC_MSAREGS, a0, t0 - save_sc_msareg 30, SC_MSAREGS, a0, t0 - save_sc_msareg 31, SC_MSAREGS, a0, t0 - jr ra - li v0, 0 - END(_save_msa_context) - -#ifdef CONFIG_MIPS32_COMPAT - -/* - * int _save_msa_context32(struct sigcontext32 *sc) - * - * Save the upper 64 bits of each vector register along with the MSA_CSR - * register into sc. Returns zero on success, else non-zero. - */ -LEAF(_save_msa_context32) - save_sc_msareg 0, SC32_MSAREGS, a0, t0 - save_sc_msareg 1, SC32_MSAREGS, a0, t0 - save_sc_msareg 2, SC32_MSAREGS, a0, t0 - save_sc_msareg 3, SC32_MSAREGS, a0, t0 - save_sc_msareg 4, SC32_MSAREGS, a0, t0 - save_sc_msareg 5, SC32_MSAREGS, a0, t0 - save_sc_msareg 6, SC32_MSAREGS, a0, t0 - save_sc_msareg 7, SC32_MSAREGS, a0, t0 - save_sc_msareg 8, SC32_MSAREGS, a0, t0 - save_sc_msareg 9, SC32_MSAREGS, a0, t0 - save_sc_msareg 10, SC32_MSAREGS, a0, t0 - save_sc_msareg 11, SC32_MSAREGS, a0, t0 - save_sc_msareg 12, SC32_MSAREGS, a0, t0 - save_sc_msareg 13, SC32_MSAREGS, a0, t0 - save_sc_msareg 14, SC32_MSAREGS, a0, t0 - save_sc_msareg 15, SC32_MSAREGS, a0, t0 - save_sc_msareg 16, SC32_MSAREGS, a0, t0 - save_sc_msareg 17, SC32_MSAREGS, a0, t0 - save_sc_msareg 18, SC32_MSAREGS, a0, t0 - save_sc_msareg 19, SC32_MSAREGS, a0, t0 - save_sc_msareg 20, SC32_MSAREGS, a0, t0 - save_sc_msareg 21, SC32_MSAREGS, a0, t0 - save_sc_msareg 22, SC32_MSAREGS, a0, t0 - save_sc_msareg 23, SC32_MSAREGS, a0, t0 - save_sc_msareg 24, SC32_MSAREGS, a0, t0 - save_sc_msareg 25, SC32_MSAREGS, a0, t0 - save_sc_msareg 26, SC32_MSAREGS, a0, t0 - save_sc_msareg 27, SC32_MSAREGS, a0, t0 - save_sc_msareg 28, SC32_MSAREGS, a0, t0 - save_sc_msareg 29, SC32_MSAREGS, a0, t0 - save_sc_msareg 30, SC32_MSAREGS, a0, t0 - save_sc_msareg 31, SC32_MSAREGS, a0, t0 - jr ra - li v0, 0 - END(_save_msa_context32) - -#endif /* CONFIG_MIPS32_COMPAT */ - - .macro restore_sc_msareg wr, off, sc, tmp -#ifdef CONFIG_64BIT - EX ld \tmp, (\off+(\wr*8))(\sc) - insert_d \wr, 1, \tmp -#elif defined(CONFIG_CPU_LITTLE_ENDIAN) - EX lw \tmp, (\off+(\wr*8)+0)(\sc) - insert_w \wr, 2, \tmp - EX lw \tmp, (\off+(\wr*8)+4)(\sc) - insert_w \wr, 3, \tmp -#else /* CONFIG_CPU_BIG_ENDIAN */ - EX lw \tmp, (\off+(\wr*8)+4)(\sc) - insert_w \wr, 2, \tmp - EX lw \tmp, (\off+(\wr*8)+0)(\sc) - insert_w \wr, 3, \tmp -#endif - .endm - -/* - * int _restore_msa_context(struct sigcontext *sc) - */ -LEAF(_restore_msa_context) - restore_sc_msareg 0, SC_MSAREGS, a0, t0 - restore_sc_msareg 1, SC_MSAREGS, a0, t0 - restore_sc_msareg 2, SC_MSAREGS, a0, t0 - restore_sc_msareg 3, SC_MSAREGS, a0, t0 - restore_sc_msareg 4, SC_MSAREGS, a0, t0 - restore_sc_msareg 5, SC_MSAREGS, a0, t0 - restore_sc_msareg 6, SC_MSAREGS, a0, t0 - restore_sc_msareg 7, SC_MSAREGS, a0, t0 - restore_sc_msareg 8, SC_MSAREGS, a0, t0 - restore_sc_msareg 9, SC_MSAREGS, a0, t0 - restore_sc_msareg 10, SC_MSAREGS, a0, t0 - restore_sc_msareg 11, SC_MSAREGS, a0, t0 - restore_sc_msareg 12, SC_MSAREGS, a0, t0 - restore_sc_msareg 13, SC_MSAREGS, a0, t0 - restore_sc_msareg 14, SC_MSAREGS, a0, t0 - restore_sc_msareg 15, SC_MSAREGS, a0, t0 - restore_sc_msareg 16, SC_MSAREGS, a0, t0 - restore_sc_msareg 17, SC_MSAREGS, a0, t0 - restore_sc_msareg 18, SC_MSAREGS, a0, t0 - restore_sc_msareg 19, SC_MSAREGS, a0, t0 - restore_sc_msareg 20, SC_MSAREGS, a0, t0 - restore_sc_msareg 21, SC_MSAREGS, a0, t0 - restore_sc_msareg 22, SC_MSAREGS, a0, t0 - restore_sc_msareg 23, SC_MSAREGS, a0, t0 - restore_sc_msareg 24, SC_MSAREGS, a0, t0 - restore_sc_msareg 25, SC_MSAREGS, a0, t0 - restore_sc_msareg 26, SC_MSAREGS, a0, t0 - restore_sc_msareg 27, SC_MSAREGS, a0, t0 - restore_sc_msareg 28, SC_MSAREGS, a0, t0 - restore_sc_msareg 29, SC_MSAREGS, a0, t0 - restore_sc_msareg 30, SC_MSAREGS, a0, t0 - restore_sc_msareg 31, SC_MSAREGS, a0, t0 - jr ra - li v0, 0 - END(_restore_msa_context) - -#ifdef CONFIG_MIPS32_COMPAT - -/* - * int _restore_msa_context32(struct sigcontext32 *sc) - */ -LEAF(_restore_msa_context32) - restore_sc_msareg 0, SC32_MSAREGS, a0, t0 - restore_sc_msareg 1, SC32_MSAREGS, a0, t0 - restore_sc_msareg 2, SC32_MSAREGS, a0, t0 - restore_sc_msareg 3, SC32_MSAREGS, a0, t0 - restore_sc_msareg 4, SC32_MSAREGS, a0, t0 - restore_sc_msareg 5, SC32_MSAREGS, a0, t0 - restore_sc_msareg 6, SC32_MSAREGS, a0, t0 - restore_sc_msareg 7, SC32_MSAREGS, a0, t0 - restore_sc_msareg 8, SC32_MSAREGS, a0, t0 - restore_sc_msareg 9, SC32_MSAREGS, a0, t0 - restore_sc_msareg 10, SC32_MSAREGS, a0, t0 - restore_sc_msareg 11, SC32_MSAREGS, a0, t0 - restore_sc_msareg 12, SC32_MSAREGS, a0, t0 - restore_sc_msareg 13, SC32_MSAREGS, a0, t0 - restore_sc_msareg 14, SC32_MSAREGS, a0, t0 - restore_sc_msareg 15, SC32_MSAREGS, a0, t0 - restore_sc_msareg 16, SC32_MSAREGS, a0, t0 - restore_sc_msareg 17, SC32_MSAREGS, a0, t0 - restore_sc_msareg 18, SC32_MSAREGS, a0, t0 - restore_sc_msareg 19, SC32_MSAREGS, a0, t0 - restore_sc_msareg 20, SC32_MSAREGS, a0, t0 - restore_sc_msareg 21, SC32_MSAREGS, a0, t0 - restore_sc_msareg 22, SC32_MSAREGS, a0, t0 - restore_sc_msareg 23, SC32_MSAREGS, a0, t0 - restore_sc_msareg 24, SC32_MSAREGS, a0, t0 - restore_sc_msareg 25, SC32_MSAREGS, a0, t0 - restore_sc_msareg 26, SC32_MSAREGS, a0, t0 - restore_sc_msareg 27, SC32_MSAREGS, a0, t0 - restore_sc_msareg 28, SC32_MSAREGS, a0, t0 - restore_sc_msareg 29, SC32_MSAREGS, a0, t0 - restore_sc_msareg 30, SC32_MSAREGS, a0, t0 - restore_sc_msareg 31, SC32_MSAREGS, a0, t0 - jr ra - li v0, 0 - END(_restore_msa_context32) - -#endif /* CONFIG_MIPS32_COMPAT */ - -#endif /* CONFIG_CPU_HAS_MSA */ - .set reorder .type fault@function diff --git a/arch/mips/kernel/signal.c b/arch/mips/kernel/signal.c index 33133d3df3e5..9e60d117e41e 100644 --- a/arch/mips/kernel/signal.c +++ b/arch/mips/kernel/signal.c @@ -31,7 +31,6 @@ #include #include #include -#include #include #include #include @@ -48,9 +47,6 @@ static int (*restore_fp_context)(struct sigcontext __user *sc); extern asmlinkage int _save_fp_context(struct sigcontext __user *sc); extern asmlinkage int _restore_fp_context(struct sigcontext __user *sc); -extern asmlinkage int _save_msa_context(struct sigcontext __user *sc); -extern asmlinkage int _restore_msa_context(struct sigcontext __user *sc); - struct sigframe { u32 sf_ass[4]; /* argument save space for o32 */ u32 sf_pad[2]; /* Was: signal trampoline */ @@ -99,61 +95,21 @@ static int copy_fp_from_sigcontext(struct sigcontext __user *sc) return err; } -/* - * These functions will save only the upper 64 bits of the vector registers, - * since the lower 64 bits have already been saved as the scalar FP context. - */ -static int copy_msa_to_sigcontext(struct sigcontext __user *sc) -{ - int i; - int err = 0; - - for (i = 0; i < NUM_FPU_REGS; i++) { - err |= - __put_user(get_fpr64(¤t->thread.fpu.fpr[i], 1), - &sc->sc_msaregs[i]); - } - err |= __put_user(current->thread.fpu.msacsr, &sc->sc_msa_csr); - - return err; -} - -static int copy_msa_from_sigcontext(struct sigcontext __user *sc) -{ - int i; - int err = 0; - u64 val; - - for (i = 0; i < NUM_FPU_REGS; i++) { - err |= __get_user(val, &sc->sc_msaregs[i]); - set_fpr64(¤t->thread.fpu.fpr[i], 1, val); - } - err |= __get_user(current->thread.fpu.msacsr, &sc->sc_msa_csr); - - return err; -} - /* * Helper routines */ -static int protected_save_fp_context(struct sigcontext __user *sc, - unsigned used_math) +static int protected_save_fp_context(struct sigcontext __user *sc) { int err; - bool save_msa = cpu_has_msa && (used_math & USEDMATH_MSA); #ifndef CONFIG_EVA while (1) { lock_fpu_owner(); if (is_fpu_owner()) { err = save_fp_context(sc); - if (save_msa && !err) - err = _save_msa_context(sc); unlock_fpu_owner(); } else { unlock_fpu_owner(); err = copy_fp_to_sigcontext(sc); - if (save_msa && !err) - err = copy_msa_to_sigcontext(sc); } if (likely(!err)) break; @@ -169,38 +125,24 @@ static int protected_save_fp_context(struct sigcontext __user *sc, * EVA does not have FPU EVA instructions so saving fpu context directly * does not work. */ - disable_msa(); lose_fpu(1); err = save_fp_context(sc); /* this might fail */ - if (save_msa && !err) - err = copy_msa_to_sigcontext(sc); #endif return err; } -static int protected_restore_fp_context(struct sigcontext __user *sc, - unsigned used_math) +static int protected_restore_fp_context(struct sigcontext __user *sc) { int err, tmp __maybe_unused; - bool restore_msa = cpu_has_msa && (used_math & USEDMATH_MSA); #ifndef CONFIG_EVA while (1) { lock_fpu_owner(); if (is_fpu_owner()) { err = restore_fp_context(sc); - if (restore_msa && !err) { - enable_msa(); - err = _restore_msa_context(sc); - } else { - /* signal handler may have used MSA */ - disable_msa(); - } unlock_fpu_owner(); } else { unlock_fpu_owner(); err = copy_fp_from_sigcontext(sc); - if (!err && (used_math & USEDMATH_MSA)) - err = copy_msa_from_sigcontext(sc); } if (likely(!err)) break; @@ -216,11 +158,8 @@ static int protected_restore_fp_context(struct sigcontext __user *sc, * EVA does not have FPU EVA instructions so restoring fpu context * directly does not work. */ - enable_msa(); lose_fpu(0); err = restore_fp_context(sc); /* this might fail */ - if (restore_msa && !err) - err = copy_msa_from_sigcontext(sc); #endif return err; } @@ -252,8 +191,7 @@ int setup_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc) err |= __put_user(rddsp(DSP_MASK), &sc->sc_dsp); } - used_math = used_math() ? USEDMATH_FP : 0; - used_math |= thread_msa_context_live() ? USEDMATH_MSA : 0; + used_math = !!used_math(); err |= __put_user(used_math, &sc->sc_used_math); if (used_math) { @@ -261,7 +199,7 @@ int setup_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc) * Save FPU state to signal context. Signal handler * will "inherit" current FPU state. */ - err |= protected_save_fp_context(sc, used_math); + err |= protected_save_fp_context(sc); } return err; } @@ -286,14 +224,14 @@ int fpcsr_pending(unsigned int __user *fpcsr) } static int -check_and_restore_fp_context(struct sigcontext __user *sc, unsigned used_math) +check_and_restore_fp_context(struct sigcontext __user *sc) { int err, sig; err = sig = fpcsr_pending(&sc->sc_fpc_csr); if (err > 0) err = 0; - err |= protected_restore_fp_context(sc, used_math); + err |= protected_restore_fp_context(sc); return err ?: sig; } @@ -333,10 +271,9 @@ int restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc) if (used_math) { /* restore fpu context if we have used it before */ if (!err) - err = check_and_restore_fp_context(sc, used_math); + err = check_and_restore_fp_context(sc); } else { - /* signal handler may have used FPU or MSA. Disable them. */ - disable_msa(); + /* signal handler may have used FPU. Give it up. */ lose_fpu(0); } diff --git a/arch/mips/kernel/signal32.c b/arch/mips/kernel/signal32.c index 299f956e4db3..bae2e6ee2109 100644 --- a/arch/mips/kernel/signal32.c +++ b/arch/mips/kernel/signal32.c @@ -30,7 +30,6 @@ #include #include #include -#include #include #include #include @@ -43,9 +42,6 @@ static int (*restore_fp_context32)(struct sigcontext32 __user *sc); extern asmlinkage int _save_fp_context32(struct sigcontext32 __user *sc); extern asmlinkage int _restore_fp_context32(struct sigcontext32 __user *sc); -extern asmlinkage int _save_msa_context32(struct sigcontext32 __user *sc); -extern asmlinkage int _restore_msa_context32(struct sigcontext32 __user *sc); - /* * Including would give use the 64-bit syscall numbers ... */ @@ -114,60 +110,20 @@ static int copy_fp_from_sigcontext32(struct sigcontext32 __user *sc) return err; } -/* - * These functions will save only the upper 64 bits of the vector registers, - * since the lower 64 bits have already been saved as the scalar FP context. - */ -static int copy_msa_to_sigcontext32(struct sigcontext32 __user *sc) -{ - int i; - int err = 0; - - for (i = 0; i < NUM_FPU_REGS; i++) { - err |= - __put_user(get_fpr64(¤t->thread.fpu.fpr[i], 1), - &sc->sc_msaregs[i]); - } - err |= __put_user(current->thread.fpu.msacsr, &sc->sc_msa_csr); - - return err; -} - -static int copy_msa_from_sigcontext32(struct sigcontext32 __user *sc) -{ - int i; - int err = 0; - u64 val; - - for (i = 0; i < NUM_FPU_REGS; i++) { - err |= __get_user(val, &sc->sc_msaregs[i]); - set_fpr64(¤t->thread.fpu.fpr[i], 1, val); - } - err |= __get_user(current->thread.fpu.msacsr, &sc->sc_msa_csr); - - return err; -} - /* * sigcontext handlers */ -static int protected_save_fp_context32(struct sigcontext32 __user *sc, - unsigned used_math) +static int protected_save_fp_context32(struct sigcontext32 __user *sc) { int err; - bool save_msa = cpu_has_msa && (used_math & USEDMATH_MSA); while (1) { lock_fpu_owner(); if (is_fpu_owner()) { err = save_fp_context32(sc); - if (save_msa && !err) - err = _save_msa_context32(sc); unlock_fpu_owner(); } else { unlock_fpu_owner(); err = copy_fp_to_sigcontext32(sc); - if (save_msa && !err) - err = copy_msa_to_sigcontext32(sc); } if (likely(!err)) break; @@ -181,28 +137,17 @@ static int protected_save_fp_context32(struct sigcontext32 __user *sc, return err; } -static int protected_restore_fp_context32(struct sigcontext32 __user *sc, - unsigned used_math) +static int protected_restore_fp_context32(struct sigcontext32 __user *sc) { int err, tmp __maybe_unused; - bool restore_msa = cpu_has_msa && (used_math & USEDMATH_MSA); while (1) { lock_fpu_owner(); if (is_fpu_owner()) { err = restore_fp_context32(sc); - if (restore_msa && !err) { - enable_msa(); - err = _restore_msa_context32(sc); - } else { - /* signal handler may have used MSA */ - disable_msa(); - } unlock_fpu_owner(); } else { unlock_fpu_owner(); err = copy_fp_from_sigcontext32(sc); - if (restore_msa && !err) - err = copy_msa_from_sigcontext32(sc); } if (likely(!err)) break; @@ -241,8 +186,7 @@ static int setup_sigcontext32(struct pt_regs *regs, err |= __put_user(mflo3(), &sc->sc_lo3); } - used_math = used_math() ? USEDMATH_FP : 0; - used_math |= thread_msa_context_live() ? USEDMATH_MSA : 0; + used_math = !!used_math(); err |= __put_user(used_math, &sc->sc_used_math); if (used_math) { @@ -250,21 +194,20 @@ static int setup_sigcontext32(struct pt_regs *regs, * Save FPU state to signal context. Signal handler * will "inherit" current FPU state. */ - err |= protected_save_fp_context32(sc, used_math); + err |= protected_save_fp_context32(sc); } return err; } static int -check_and_restore_fp_context32(struct sigcontext32 __user *sc, - unsigned used_math) +check_and_restore_fp_context32(struct sigcontext32 __user *sc) { int err, sig; err = sig = fpcsr_pending(&sc->sc_fpc_csr); if (err > 0) err = 0; - err |= protected_restore_fp_context32(sc, used_math); + err |= protected_restore_fp_context32(sc); return err ?: sig; } @@ -301,10 +244,9 @@ static int restore_sigcontext32(struct pt_regs *regs, if (used_math) { /* restore fpu context if we have used it before */ if (!err) - err = check_and_restore_fp_context32(sc, used_math); + err = check_and_restore_fp_context32(sc); } else { - /* signal handler may have used FPU or MSA. Disable them. */ - disable_msa(); + /* signal handler may have used FPU. Give it up. */ lose_fpu(0); } -- cgit v1.2.3 From a83d081ed14e4281d2620d182c6044c0c21c551e Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Tue, 17 Jun 2014 12:16:18 +0200 Subject: MIPS: BPF JIT: Fix build error. mips: allmodconfig fails in 3.16-rc1 with lots of undefined symbols. arch/mips/net/bpf_jit.c: In function 'is_load_to_a': arch/mips/net/bpf_jit.c:559:7: error: 'BPF_S_LD_W_LEN' undeclared (first use in this function) arch/mips/net/bpf_jit.c:559:7: note: each undeclared identifier is reported only once for each function it appears in arch/mips/net/bpf_jit.c:560:7: error: 'BPF_S_LD_W_ABS' undeclared (first use in this function) [...] The reason behind this is that 3480593131e0 ("net: filter: get rid of BPF_S_* enum") was routed via net-next tree, that takes all BPF-related changes, at a time where MIPS BPF JIT was not part of net-next, while c6610de353da ("MIPS: net: Add BPF JIT") was routed via mips arch tree and went into mainline within the same merge window. Thus, fix it up by converting BPF_S_* in a similar fashion as in 3480593131e0 for MIPS. Reported-by: Guenter Roeck Signed-off-by: Daniel Borkmann Cc: Ralf Baechle Cc: Alexei Starovoitov Cc: Markos Chandras Cc: linux-kernel@vger.kernel.org Cc: Linux MIPS Mailing List Patchwork: https://patchwork.linux-mips.org/patch/7099/ Signed-off-by: Ralf Baechle --- arch/mips/net/bpf_jit.c | 143 +++++++++++++++++++++++------------------------- 1 file changed, 69 insertions(+), 74 deletions(-) (limited to 'arch') diff --git a/arch/mips/net/bpf_jit.c b/arch/mips/net/bpf_jit.c index a67b9753330b..f7c206404989 100644 --- a/arch/mips/net/bpf_jit.c +++ b/arch/mips/net/bpf_jit.c @@ -556,18 +556,10 @@ static inline void update_on_xread(struct jit_ctx *ctx) static bool is_load_to_a(u16 inst) { switch (inst) { - case BPF_S_LD_W_LEN: - case BPF_S_LD_W_ABS: - case BPF_S_LD_H_ABS: - case BPF_S_LD_B_ABS: - case BPF_S_ANC_CPU: - case BPF_S_ANC_IFINDEX: - case BPF_S_ANC_MARK: - case BPF_S_ANC_PROTOCOL: - case BPF_S_ANC_RXHASH: - case BPF_S_ANC_VLAN_TAG: - case BPF_S_ANC_VLAN_TAG_PRESENT: - case BPF_S_ANC_QUEUE: + case BPF_LD | BPF_W | BPF_LEN: + case BPF_LD | BPF_W | BPF_ABS: + case BPF_LD | BPF_H | BPF_ABS: + case BPF_LD | BPF_B | BPF_ABS: return true; default: return false; @@ -709,7 +701,7 @@ static void build_prologue(struct jit_ctx *ctx) emit_jit_reg_move(r_X, r_zero, ctx); /* Do not leak kernel data to userspace */ - if ((first_inst != BPF_S_RET_K) && !(is_load_to_a(first_inst))) + if ((first_inst != (BPF_RET | BPF_K)) && !(is_load_to_a(first_inst))) emit_jit_reg_move(r_A, r_zero, ctx); } @@ -783,41 +775,44 @@ static int build_body(struct jit_ctx *ctx) u32 k, b_off __maybe_unused; for (i = 0; i < prog->len; i++) { + u16 code; + inst = &(prog->insns[i]); pr_debug("%s: code->0x%02x, jt->0x%x, jf->0x%x, k->0x%x\n", __func__, inst->code, inst->jt, inst->jf, inst->k); k = inst->k; + code = bpf_anc_helper(inst); if (ctx->target == NULL) ctx->offsets[i] = ctx->idx * 4; - switch (inst->code) { - case BPF_S_LD_IMM: + switch (code) { + case BPF_LD | BPF_IMM: /* A <- k ==> li r_A, k */ ctx->flags |= SEEN_A; emit_load_imm(r_A, k, ctx); break; - case BPF_S_LD_W_LEN: + case BPF_LD | BPF_W | BPF_LEN: BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, len) != 4); /* A <- len ==> lw r_A, offset(skb) */ ctx->flags |= SEEN_SKB | SEEN_A; off = offsetof(struct sk_buff, len); emit_load(r_A, r_skb, off, ctx); break; - case BPF_S_LD_MEM: + case BPF_LD | BPF_MEM: /* A <- M[k] ==> lw r_A, offset(M) */ ctx->flags |= SEEN_MEM | SEEN_A; emit_load(r_A, r_M, SCRATCH_OFF(k), ctx); break; - case BPF_S_LD_W_ABS: + case BPF_LD | BPF_W | BPF_ABS: /* A <- P[k:4] */ load_order = 2; goto load; - case BPF_S_LD_H_ABS: + case BPF_LD | BPF_H | BPF_ABS: /* A <- P[k:2] */ load_order = 1; goto load; - case BPF_S_LD_B_ABS: + case BPF_LD | BPF_B | BPF_ABS: /* A <- P[k:1] */ load_order = 0; load: @@ -852,15 +847,15 @@ load_common: emit_b(b_imm(prog->len, ctx), ctx); emit_reg_move(r_ret, r_zero, ctx); break; - case BPF_S_LD_W_IND: + case BPF_LD | BPF_W | BPF_IND: /* A <- P[X + k:4] */ load_order = 2; goto load_ind; - case BPF_S_LD_H_IND: + case BPF_LD | BPF_H | BPF_IND: /* A <- P[X + k:2] */ load_order = 1; goto load_ind; - case BPF_S_LD_B_IND: + case BPF_LD | BPF_B | BPF_IND: /* A <- P[X + k:1] */ load_order = 0; load_ind: @@ -868,23 +863,23 @@ load_ind: ctx->flags |= SEEN_OFF | SEEN_X; emit_addiu(r_off, r_X, k, ctx); goto load_common; - case BPF_S_LDX_IMM: + case BPF_LDX | BPF_IMM: /* X <- k */ ctx->flags |= SEEN_X; emit_load_imm(r_X, k, ctx); break; - case BPF_S_LDX_MEM: + case BPF_LDX | BPF_MEM: /* X <- M[k] */ ctx->flags |= SEEN_X | SEEN_MEM; emit_load(r_X, r_M, SCRATCH_OFF(k), ctx); break; - case BPF_S_LDX_W_LEN: + case BPF_LDX | BPF_W | BPF_LEN: /* X <- len */ ctx->flags |= SEEN_X | SEEN_SKB; off = offsetof(struct sk_buff, len); emit_load(r_X, r_skb, off, ctx); break; - case BPF_S_LDX_B_MSH: + case BPF_LDX | BPF_B | BPF_MSH: /* X <- 4 * (P[k:1] & 0xf) */ ctx->flags |= SEEN_X | SEEN_CALL | SEEN_S0 | SEEN_SKB; /* Load offset to a1 */ @@ -917,50 +912,50 @@ load_ind: emit_b(b_imm(prog->len, ctx), ctx); emit_load_imm(r_ret, 0, ctx); /* delay slot */ break; - case BPF_S_ST: + case BPF_ST: /* M[k] <- A */ ctx->flags |= SEEN_MEM | SEEN_A; emit_store(r_A, r_M, SCRATCH_OFF(k), ctx); break; - case BPF_S_STX: + case BPF_STX: /* M[k] <- X */ ctx->flags |= SEEN_MEM | SEEN_X; emit_store(r_X, r_M, SCRATCH_OFF(k), ctx); break; - case BPF_S_ALU_ADD_K: + case BPF_ALU | BPF_ADD | BPF_K: /* A += K */ ctx->flags |= SEEN_A; emit_addiu(r_A, r_A, k, ctx); break; - case BPF_S_ALU_ADD_X: + case BPF_ALU | BPF_ADD | BPF_X: /* A += X */ ctx->flags |= SEEN_A | SEEN_X; emit_addu(r_A, r_A, r_X, ctx); break; - case BPF_S_ALU_SUB_K: + case BPF_ALU | BPF_SUB | BPF_K: /* A -= K */ ctx->flags |= SEEN_A; emit_addiu(r_A, r_A, -k, ctx); break; - case BPF_S_ALU_SUB_X: + case BPF_ALU | BPF_SUB | BPF_X: /* A -= X */ ctx->flags |= SEEN_A | SEEN_X; emit_subu(r_A, r_A, r_X, ctx); break; - case BPF_S_ALU_MUL_K: + case BPF_ALU | BPF_MUL | BPF_K: /* A *= K */ /* Load K to scratch register before MUL */ ctx->flags |= SEEN_A | SEEN_S0; emit_load_imm(r_s0, k, ctx); emit_mul(r_A, r_A, r_s0, ctx); break; - case BPF_S_ALU_MUL_X: + case BPF_ALU | BPF_MUL | BPF_X: /* A *= X */ update_on_xread(ctx); ctx->flags |= SEEN_A | SEEN_X; emit_mul(r_A, r_A, r_X, ctx); break; - case BPF_S_ALU_DIV_K: + case BPF_ALU | BPF_DIV | BPF_K: /* A /= k */ if (k == 1) break; @@ -973,7 +968,7 @@ load_ind: emit_load_imm(r_s0, k, ctx); emit_div(r_A, r_s0, ctx); break; - case BPF_S_ALU_MOD_K: + case BPF_ALU | BPF_MOD | BPF_K: /* A %= k */ if (k == 1 || optimize_div(&k)) { ctx->flags |= SEEN_A; @@ -984,7 +979,7 @@ load_ind: emit_mod(r_A, r_s0, ctx); } break; - case BPF_S_ALU_DIV_X: + case BPF_ALU | BPF_DIV | BPF_X: /* A /= X */ update_on_xread(ctx); ctx->flags |= SEEN_X | SEEN_A; @@ -994,7 +989,7 @@ load_ind: emit_load_imm(r_val, 0, ctx); /* delay slot */ emit_div(r_A, r_X, ctx); break; - case BPF_S_ALU_MOD_X: + case BPF_ALU | BPF_MOD | BPF_X: /* A %= X */ update_on_xread(ctx); ctx->flags |= SEEN_X | SEEN_A; @@ -1004,94 +999,94 @@ load_ind: emit_load_imm(r_val, 0, ctx); /* delay slot */ emit_mod(r_A, r_X, ctx); break; - case BPF_S_ALU_OR_K: + case BPF_ALU | BPF_OR | BPF_K: /* A |= K */ ctx->flags |= SEEN_A; emit_ori(r_A, r_A, k, ctx); break; - case BPF_S_ALU_OR_X: + case BPF_ALU | BPF_OR | BPF_X: /* A |= X */ update_on_xread(ctx); ctx->flags |= SEEN_A; emit_ori(r_A, r_A, r_X, ctx); break; - case BPF_S_ALU_XOR_K: + case BPF_ALU | BPF_XOR | BPF_K: /* A ^= k */ ctx->flags |= SEEN_A; emit_xori(r_A, r_A, k, ctx); break; - case BPF_S_ANC_ALU_XOR_X: - case BPF_S_ALU_XOR_X: + case BPF_ANC | SKF_AD_ALU_XOR_X: + case BPF_ALU | BPF_XOR | BPF_X: /* A ^= X */ update_on_xread(ctx); ctx->flags |= SEEN_A; emit_xor(r_A, r_A, r_X, ctx); break; - case BPF_S_ALU_AND_K: + case BPF_ALU | BPF_AND | BPF_K: /* A &= K */ ctx->flags |= SEEN_A; emit_andi(r_A, r_A, k, ctx); break; - case BPF_S_ALU_AND_X: + case BPF_ALU | BPF_AND | BPF_X: /* A &= X */ update_on_xread(ctx); ctx->flags |= SEEN_A | SEEN_X; emit_and(r_A, r_A, r_X, ctx); break; - case BPF_S_ALU_LSH_K: + case BPF_ALU | BPF_LSH | BPF_K: /* A <<= K */ ctx->flags |= SEEN_A; emit_sll(r_A, r_A, k, ctx); break; - case BPF_S_ALU_LSH_X: + case BPF_ALU | BPF_LSH | BPF_X: /* A <<= X */ ctx->flags |= SEEN_A | SEEN_X; update_on_xread(ctx); emit_sllv(r_A, r_A, r_X, ctx); break; - case BPF_S_ALU_RSH_K: + case BPF_ALU | BPF_RSH | BPF_K: /* A >>= K */ ctx->flags |= SEEN_A; emit_srl(r_A, r_A, k, ctx); break; - case BPF_S_ALU_RSH_X: + case BPF_ALU | BPF_RSH | BPF_X: ctx->flags |= SEEN_A | SEEN_X; update_on_xread(ctx); emit_srlv(r_A, r_A, r_X, ctx); break; - case BPF_S_ALU_NEG: + case BPF_ALU | BPF_NEG: /* A = -A */ ctx->flags |= SEEN_A; emit_neg(r_A, ctx); break; - case BPF_S_JMP_JA: + case BPF_JMP | BPF_JA: /* pc += K */ emit_b(b_imm(i + k + 1, ctx), ctx); emit_nop(ctx); break; - case BPF_S_JMP_JEQ_K: + case BPF_JMP | BPF_JEQ | BPF_K: /* pc += ( A == K ) ? pc->jt : pc->jf */ condt = MIPS_COND_EQ | MIPS_COND_K; goto jmp_cmp; - case BPF_S_JMP_JEQ_X: + case BPF_JMP | BPF_JEQ | BPF_X: ctx->flags |= SEEN_X; /* pc += ( A == X ) ? pc->jt : pc->jf */ condt = MIPS_COND_EQ | MIPS_COND_X; goto jmp_cmp; - case BPF_S_JMP_JGE_K: + case BPF_JMP | BPF_JGE | BPF_K: /* pc += ( A >= K ) ? pc->jt : pc->jf */ condt = MIPS_COND_GE | MIPS_COND_K; goto jmp_cmp; - case BPF_S_JMP_JGE_X: + case BPF_JMP | BPF_JGE | BPF_X: ctx->flags |= SEEN_X; /* pc += ( A >= X ) ? pc->jt : pc->jf */ condt = MIPS_COND_GE | MIPS_COND_X; goto jmp_cmp; - case BPF_S_JMP_JGT_K: + case BPF_JMP | BPF_JGT | BPF_K: /* pc += ( A > K ) ? pc->jt : pc->jf */ condt = MIPS_COND_GT | MIPS_COND_K; goto jmp_cmp; - case BPF_S_JMP_JGT_X: + case BPF_JMP | BPF_JGT | BPF_X: ctx->flags |= SEEN_X; /* pc += ( A > X ) ? pc->jt : pc->jf */ condt = MIPS_COND_GT | MIPS_COND_X; @@ -1167,7 +1162,7 @@ jmp_cmp: } } break; - case BPF_S_JMP_JSET_K: + case BPF_JMP | BPF_JSET | BPF_K: ctx->flags |= SEEN_S0 | SEEN_S1 | SEEN_A; /* pc += (A & K) ? pc -> jt : pc -> jf */ emit_load_imm(r_s1, k, ctx); @@ -1181,7 +1176,7 @@ jmp_cmp: emit_b(b_off, ctx); emit_nop(ctx); break; - case BPF_S_JMP_JSET_X: + case BPF_JMP | BPF_JSET | BPF_X: ctx->flags |= SEEN_S0 | SEEN_X | SEEN_A; /* pc += (A & X) ? pc -> jt : pc -> jf */ emit_and(r_s0, r_A, r_X, ctx); @@ -1194,7 +1189,7 @@ jmp_cmp: emit_b(b_off, ctx); emit_nop(ctx); break; - case BPF_S_RET_A: + case BPF_RET | BPF_A: ctx->flags |= SEEN_A; if (i != prog->len - 1) /* @@ -1204,7 +1199,7 @@ jmp_cmp: emit_b(b_imm(prog->len, ctx), ctx); emit_reg_move(r_ret, r_A, ctx); /* delay slot */ break; - case BPF_S_RET_K: + case BPF_RET | BPF_K: /* * It can emit two instructions so it does not fit on * the delay slot. @@ -1219,19 +1214,19 @@ jmp_cmp: emit_nop(ctx); } break; - case BPF_S_MISC_TAX: + case BPF_MISC | BPF_TAX: /* X = A */ ctx->flags |= SEEN_X | SEEN_A; emit_jit_reg_move(r_X, r_A, ctx); break; - case BPF_S_MISC_TXA: + case BPF_MISC | BPF_TXA: /* A = X */ ctx->flags |= SEEN_A | SEEN_X; update_on_xread(ctx); emit_jit_reg_move(r_A, r_X, ctx); break; /* AUX */ - case BPF_S_ANC_PROTOCOL: + case BPF_ANC | SKF_AD_PROTOCOL: /* A = ntohs(skb->protocol */ ctx->flags |= SEEN_SKB | SEEN_OFF | SEEN_A; BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, @@ -1256,7 +1251,7 @@ jmp_cmp: } #endif break; - case BPF_S_ANC_CPU: + case BPF_ANC | SKF_AD_CPU: ctx->flags |= SEEN_A | SEEN_OFF; /* A = current_thread_info()->cpu */ BUILD_BUG_ON(FIELD_SIZEOF(struct thread_info, @@ -1265,7 +1260,7 @@ jmp_cmp: /* $28/gp points to the thread_info struct */ emit_load(r_A, 28, off, ctx); break; - case BPF_S_ANC_IFINDEX: + case BPF_ANC | SKF_AD_IFINDEX: /* A = skb->dev->ifindex */ ctx->flags |= SEEN_SKB | SEEN_A | SEEN_S0; off = offsetof(struct sk_buff, dev); @@ -1279,31 +1274,31 @@ jmp_cmp: off = offsetof(struct net_device, ifindex); emit_load(r_A, r_s0, off, ctx); break; - case BPF_S_ANC_MARK: + case BPF_ANC | SKF_AD_MARK: ctx->flags |= SEEN_SKB | SEEN_A; BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4); off = offsetof(struct sk_buff, mark); emit_load(r_A, r_skb, off, ctx); break; - case BPF_S_ANC_RXHASH: + case BPF_ANC | SKF_AD_RXHASH: ctx->flags |= SEEN_SKB | SEEN_A; BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, hash) != 4); off = offsetof(struct sk_buff, hash); emit_load(r_A, r_skb, off, ctx); break; - case BPF_S_ANC_VLAN_TAG: - case BPF_S_ANC_VLAN_TAG_PRESENT: + case BPF_ANC | SKF_AD_VLAN_TAG: + case BPF_ANC | SKF_AD_VLAN_TAG_PRESENT: ctx->flags |= SEEN_SKB | SEEN_S0 | SEEN_A; BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_tci) != 2); off = offsetof(struct sk_buff, vlan_tci); emit_half_load(r_s0, r_skb, off, ctx); - if (inst->code == BPF_S_ANC_VLAN_TAG) + if (code == (BPF_ANC | SKF_AD_VLAN_TAG)) emit_and(r_A, r_s0, VLAN_VID_MASK, ctx); else emit_and(r_A, r_s0, VLAN_TAG_PRESENT, ctx); break; - case BPF_S_ANC_PKTTYPE: + case BPF_ANC | SKF_AD_PKTTYPE: off = pkt_type_offset(); if (off < 0) @@ -1312,7 +1307,7 @@ jmp_cmp: /* Keep only the last 3 bits */ emit_andi(r_A, r_tmp, PKT_TYPE_MAX, ctx); break; - case BPF_S_ANC_QUEUE: + case BPF_ANC | SKF_AD_QUEUE: ctx->flags |= SEEN_SKB | SEEN_A; BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, queue_mapping) != 2); -- cgit v1.2.3 From 9d9873697e8b5aaf39ef1f96e57ab4fffb8f45ae Mon Sep 17 00:00:00 2001 From: Markos Chandras Date: Mon, 23 Jun 2014 10:38:44 +0100 Subject: MIPS: uasm: Add s3s1s2 instruction builder It will be used later on by the SLT instruction. Signed-off-by: Markos Chandras Cc: linux-mips@linux-mips.org Patchwork: https://patchwork.linux-mips.org/patch/7119/ Signed-off-by: Ralf Baechle --- arch/mips/include/asm/uasm.h | 3 +++ arch/mips/mm/uasm.c | 7 +++++++ 2 files changed, 10 insertions(+) (limited to 'arch') diff --git a/arch/mips/include/asm/uasm.h b/arch/mips/include/asm/uasm.h index f8d63b3b40b4..43259b3fca6d 100644 --- a/arch/mips/include/asm/uasm.h +++ b/arch/mips/include/asm/uasm.h @@ -67,6 +67,9 @@ void ISAOPC(op)(u32 **buf, unsigned int a, unsigned int b, signed int c) #define Ip_u2s3u1(op) \ void ISAOPC(op)(u32 **buf, unsigned int a, signed int b, unsigned int c) +#define Ip_s3s1s2(op) \ +void ISAOPC(op)(u32 **buf, int a, int b, int c) + #define Ip_u2u1s3(op) \ void ISAOPC(op)(u32 **buf, unsigned int a, unsigned int b, signed int c) diff --git a/arch/mips/mm/uasm.c b/arch/mips/mm/uasm.c index 00515805fe41..1e3e10919423 100644 --- a/arch/mips/mm/uasm.c +++ b/arch/mips/mm/uasm.c @@ -139,6 +139,13 @@ Ip_u1u2u3(op) \ } \ UASM_EXPORT_SYMBOL(uasm_i##op); +#define I_s3s1s2(op) \ +Ip_s3s1s2(op) \ +{ \ + build_insn(buf, insn##op, b, c, a); \ +} \ +UASM_EXPORT_SYMBOL(uasm_i##op); + #define I_u2u1u3(op) \ Ip_u2u1u3(op) \ { \ -- cgit v1.2.3 From 7682f9e81899f1f874bd565daa7fb0b20024ed80 Mon Sep 17 00:00:00 2001 From: Markos Chandras Date: Mon, 23 Jun 2014 10:38:45 +0100 Subject: MIPS: uasm: Add SLT uasm instruction It will be used later on by bpf-jit Signed-off-by: Markos Chandras Cc: linux-mips@linux-mips.org Cc: Markos Chandras Patchwork: https://patchwork.linux-mips.org/patch/7120/ Signed-off-by: Ralf Baechle --- arch/mips/include/asm/uasm.h | 1 + arch/mips/include/uapi/asm/inst.h | 1 + arch/mips/mm/uasm-micromips.c | 1 + arch/mips/mm/uasm-mips.c | 1 + arch/mips/mm/uasm.c | 3 ++- 5 files changed, 6 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/mips/include/asm/uasm.h b/arch/mips/include/asm/uasm.h index 43259b3fca6d..708c5d414905 100644 --- a/arch/mips/include/asm/uasm.h +++ b/arch/mips/include/asm/uasm.h @@ -150,6 +150,7 @@ Ip_u2s3u1(_scd); Ip_u2s3u1(_sd); Ip_u2u1u3(_sll); Ip_u3u2u1(_sllv); +Ip_s3s1s2(_slt); Ip_u2u1s3(_sltiu); Ip_u3u1u2(_sltu); Ip_u2u1u3(_sra); diff --git a/arch/mips/include/uapi/asm/inst.h b/arch/mips/include/uapi/asm/inst.h index 4b7160259292..4bfdb9d4c186 100644 --- a/arch/mips/include/uapi/asm/inst.h +++ b/arch/mips/include/uapi/asm/inst.h @@ -273,6 +273,7 @@ enum mm_32a_minor_op { mm_and_op = 0x250, mm_or32_op = 0x290, mm_xor32_op = 0x310, + mm_slt_op = 0x350, mm_sltu_op = 0x390, }; diff --git a/arch/mips/mm/uasm-micromips.c b/arch/mips/mm/uasm-micromips.c index 775c2800cba2..8399ddf03a02 100644 --- a/arch/mips/mm/uasm-micromips.c +++ b/arch/mips/mm/uasm-micromips.c @@ -102,6 +102,7 @@ static struct insn insn_table_MM[] = { { insn_sd, 0, 0 }, { insn_sll, M(mm_pool32a_op, 0, 0, 0, 0, mm_sll32_op), RT | RS | RD }, { insn_sllv, M(mm_pool32a_op, 0, 0, 0, 0, mm_sllv32_op), RT | RS | RD }, + { insn_slt, M(mm_pool32a_op, 0, 0, 0, 0, mm_slt_op), RT | RS | RD }, { insn_sltiu, M(mm_sltiu32_op, 0, 0, 0, 0, 0), RT | RS | SIMM }, { insn_sltu, M(mm_pool32a_op, 0, 0, 0, 0, mm_sltu_op), RT | RS | RD }, { insn_sra, M(mm_pool32a_op, 0, 0, 0, 0, mm_sra_op), RT | RS | RD }, diff --git a/arch/mips/mm/uasm-mips.c b/arch/mips/mm/uasm-mips.c index 38792c2364f5..4535a9d19ea5 100644 --- a/arch/mips/mm/uasm-mips.c +++ b/arch/mips/mm/uasm-mips.c @@ -110,6 +110,7 @@ static struct insn insn_table[] = { { insn_sd, M(sd_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, { insn_sll, M(spec_op, 0, 0, 0, 0, sll_op), RT | RD | RE }, { insn_sllv, M(spec_op, 0, 0, 0, 0, sllv_op), RS | RT | RD }, + { insn_slt, M(spec_op, 0, 0, 0, 0, slt_op), RS | RT | RD }, { insn_sltiu, M(sltiu_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, { insn_sltu, M(spec_op, 0, 0, 0, 0, sltu_op), RS | RT | RD }, { insn_sra, M(spec_op, 0, 0, 0, 0, sra_op), RT | RD | RE }, diff --git a/arch/mips/mm/uasm.c b/arch/mips/mm/uasm.c index 1e3e10919423..a01b0d6cedd2 100644 --- a/arch/mips/mm/uasm.c +++ b/arch/mips/mm/uasm.c @@ -53,7 +53,7 @@ enum opcode { insn_ld, insn_ldx, insn_lh, insn_ll, insn_lld, insn_lui, insn_lw, insn_lwx, insn_mfc0, insn_mfhi, insn_mflo, insn_mtc0, insn_mul, insn_or, insn_ori, insn_pref, insn_rfe, insn_rotr, insn_sc, insn_scd, - insn_sd, insn_sll, insn_sllv, insn_sltiu, insn_sltu, insn_sra, + insn_sd, insn_sll, insn_sllv, insn_slt, insn_sltiu, insn_sltu, insn_sra, insn_srl, insn_srlv, insn_subu, insn_sw, insn_sync, insn_syscall, insn_tlbp, insn_tlbr, insn_tlbwi, insn_tlbwr, insn_wait, insn_wsbh, insn_xor, insn_xori, insn_yield, @@ -296,6 +296,7 @@ I_u2s3u1(_scd) I_u2s3u1(_sd) I_u2u1u3(_sll) I_u3u2u1(_sllv) +I_s3s1s2(_slt) I_u2u1s3(_sltiu) I_u3u1u2(_sltu) I_u2u1u3(_sra) -- cgit v1.2.3 From 84c68cbc667287dfd5eb793b6715c6d76a318e3f Mon Sep 17 00:00:00 2001 From: Markos Chandras Date: Mon, 23 Jun 2014 10:38:46 +0100 Subject: MIPS: mm: uasm: Fix lh micro-assembler instruction Commit d6b3314b49e12e8c349deb4ca28e7028db00728f "MIPS: uasm: Add lh uam instruction" added the 'lh' micro-assembler instruction but it used the 'lw' opcode for it. Fix it by using the correct 'lh' opcode. Signed-off-by: Markos Chandras Cc: linux-mips@linux-mips.org Patchwork: https://patchwork.linux-mips.org/patch/7121/ Signed-off-by: Ralf Baechle --- arch/mips/mm/uasm-mips.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/mips/mm/uasm-mips.c b/arch/mips/mm/uasm-mips.c index 4535a9d19ea5..6708a2dbf934 100644 --- a/arch/mips/mm/uasm-mips.c +++ b/arch/mips/mm/uasm-mips.c @@ -89,7 +89,7 @@ static struct insn insn_table[] = { { insn_lb, M(lb_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, { insn_ld, M(ld_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, { insn_ldx, M(spec3_op, 0, 0, 0, ldx_op, lx_op), RS | RT | RD }, - { insn_lh, M(lw_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, + { insn_lh, M(lh_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, { insn_lld, M(lld_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, { insn_ll, M(ll_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, { insn_lui, M(lui_op, 0, 0, 0, 0, 0), RT | SIMM }, -- cgit v1.2.3 From 35a8e16abe36d385d602997e1500a668d2b9c5cf Mon Sep 17 00:00:00 2001 From: Markos Chandras Date: Mon, 23 Jun 2014 10:38:47 +0100 Subject: MIPS: bpf: Use the LO register to get division's quotient Reading from the HI register to get the division result is wrong. The quotient is placed in the LO register. Signed-off-by: Markos Chandras Cc: "David S. Miller" Cc: Daniel Borkmann Cc: Alexei Starovoitov Cc: netdev@vger.kernel.org Cc: linux-mips@linux-mips.org Patchwork: https://patchwork.linux-mips.org/patch/7122/ Signed-off-by: Ralf Baechle --- arch/mips/net/bpf_jit.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/mips/net/bpf_jit.c b/arch/mips/net/bpf_jit.c index f7c206404989..5cc92c4590cb 100644 --- a/arch/mips/net/bpf_jit.c +++ b/arch/mips/net/bpf_jit.c @@ -408,7 +408,7 @@ static inline void emit_div(unsigned int dst, unsigned int src, u32 *p = &ctx->target[ctx->idx]; uasm_i_divu(&p, dst, src); p = &ctx->target[ctx->idx + 1]; - uasm_i_mfhi(&p, dst); + uasm_i_mflo(&p, dst); } ctx->idx += 2; /* 2 insts */ } -- cgit v1.2.3 From 55393ee535496f7db15f3b2e9d3cf418f772f71a Mon Sep 17 00:00:00 2001 From: Markos Chandras Date: Mon, 23 Jun 2014 10:38:48 +0100 Subject: MIPS: bpf: Return error code if the offset is a negative number Previously, the negative offset was not checked leading to failures due to trying to load data beyond the skb struct boundaries. Until we have proper asm helpers in place, it's best if we return ENOSUPP if K is negative when trying to JIT the filter or 0 during runtime if we do an indirect load where the value of X is unknown during build time. Signed-off-by: Markos Chandras Cc: David S. Miller Cc: Daniel Borkmann Cc: Alexei Starovoitov Cc: netdev@vger.kernel.org Cc: linux-mips@linux-mips.org Patchwork: https://patchwork.linux-mips.org/patch/7123/ Signed-off-by: Ralf Baechle --- arch/mips/net/bpf_jit.c | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) (limited to 'arch') diff --git a/arch/mips/net/bpf_jit.c b/arch/mips/net/bpf_jit.c index 5cc92c4590cb..95728ea6cb74 100644 --- a/arch/mips/net/bpf_jit.c +++ b/arch/mips/net/bpf_jit.c @@ -331,6 +331,12 @@ static inline void emit_srl(unsigned int dst, unsigned int src, emit_instr(ctx, srl, dst, src, sa); } +static inline void emit_slt(unsigned int dst, unsigned int src1, + unsigned int src2, struct jit_ctx *ctx) +{ + emit_instr(ctx, slt, dst, src1, src2); +} + static inline void emit_sltu(unsigned int dst, unsigned int src1, unsigned int src2, struct jit_ctx *ctx) { @@ -816,8 +822,21 @@ static int build_body(struct jit_ctx *ctx) /* A <- P[k:1] */ load_order = 0; load: + /* the interpreter will deal with the negative K */ + if ((int)k < 0) + return -ENOTSUPP; + emit_load_imm(r_off, k, ctx); load_common: + /* + * We may got here from the indirect loads so + * return if offset is negative. + */ + emit_slt(r_s0, r_off, r_zero, ctx); + emit_bcond(MIPS_COND_NE, r_s0, r_zero, + b_imm(prog->len, ctx), ctx); + emit_reg_move(r_ret, r_zero, ctx); + ctx->flags |= SEEN_CALL | SEEN_OFF | SEEN_S0 | SEEN_SKB | SEEN_A; @@ -880,6 +899,10 @@ load_ind: emit_load(r_X, r_skb, off, ctx); break; case BPF_LDX | BPF_B | BPF_MSH: + /* the interpreter will deal with the negative K */ + if ((int)k < 0) + return -ENOTSUPP; + /* X <- 4 * (P[k:1] & 0xf) */ ctx->flags |= SEEN_X | SEEN_CALL | SEEN_S0 | SEEN_SKB; /* Load offset to a1 */ -- cgit v1.2.3 From 9ee1606e8a6316287029c86ef48ddcfe4dec595d Mon Sep 17 00:00:00 2001 From: Markos Chandras Date: Mon, 23 Jun 2014 10:38:49 +0100 Subject: MIPS: bpf: Use 'andi' instead of 'and' for the VLAN cases The VLAN_VID_MASK and VLAN_TAG_PRESENT are immediates, so using 'and' which expects 3 registers will produce wrong results. Fix this by using the 'andi' instruction. Signed-off-by: Markos Chandras Cc: David S. Miller Cc: Daniel Borkmann Cc: Alexei Starovoitov Cc: netdev@vger.kernel.org Cc: linux-mips@linux-mips.org Patchwork: https://patchwork.linux-mips.org/patch/7124/ Signed-off-by: Ralf Baechle --- arch/mips/net/bpf_jit.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/mips/net/bpf_jit.c b/arch/mips/net/bpf_jit.c index 95728ea6cb74..fe5041bdc6fb 100644 --- a/arch/mips/net/bpf_jit.c +++ b/arch/mips/net/bpf_jit.c @@ -1317,9 +1317,9 @@ jmp_cmp: off = offsetof(struct sk_buff, vlan_tci); emit_half_load(r_s0, r_skb, off, ctx); if (code == (BPF_ANC | SKF_AD_VLAN_TAG)) - emit_and(r_A, r_s0, VLAN_VID_MASK, ctx); + emit_andi(r_A, r_s0, VLAN_VID_MASK, ctx); else - emit_and(r_A, r_s0, VLAN_TAG_PRESENT, ctx); + emit_andi(r_A, r_s0, VLAN_TAG_PRESENT, ctx); break; case BPF_ANC | SKF_AD_PKTTYPE: off = pkt_type_offset(); -- cgit v1.2.3 From 9eebfe478d1b83389c3ac1bd73a45b6665e356db Mon Sep 17 00:00:00 2001 From: Markos Chandras Date: Mon, 23 Jun 2014 10:38:50 +0100 Subject: MIPS: bpf: Add SEEN_SKB to flags when looking for the PKT_TYPE The SKF_AD_PKTTYPE uses the skb pointer so make sure it's in the flags so it will be initialized in time. Signed-off-by: Markos Chandras Cc: David S. Miller Cc: Daniel Borkmann Cc: Alexei Starovoitov Cc: netdev@vger.kernel.org Cc: linux-mips@linux-mips.org Patchwork: https://patchwork.linux-mips.org/patch/7125/ Signed-off-by: Ralf Baechle --- arch/mips/net/bpf_jit.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'arch') diff --git a/arch/mips/net/bpf_jit.c b/arch/mips/net/bpf_jit.c index fe5041bdc6fb..8cae27af03da 100644 --- a/arch/mips/net/bpf_jit.c +++ b/arch/mips/net/bpf_jit.c @@ -1322,6 +1322,8 @@ jmp_cmp: emit_andi(r_A, r_s0, VLAN_TAG_PRESENT, ctx); break; case BPF_ANC | SKF_AD_PKTTYPE: + ctx->flags |= SEEN_SKB; + off = pkt_type_offset(); if (off < 0) -- cgit v1.2.3 From 1ab24a4e3de1ec37d8ed255841d2d94d77e8a4f4 Mon Sep 17 00:00:00 2001 From: Markos Chandras Date: Mon, 23 Jun 2014 10:38:51 +0100 Subject: MIPS: bpf: Fix branch conditional for BPF_J{GT/GE} cases The sltiu and sltu instructions will set the scratch register to 1 if A <= X|K so fix the emitted branch conditional to check for scratch != zero rather than scratch >= zero which would complicate the resuling branch logic given that MIPS does not have a BGT or BGET instructions to compare general purpose registers directly. Signed-off-by: Markos Chandras Cc: David S. Miller Cc: Daniel Borkmann Cc: Alexei Starovoitov Cc: netdev@vger.kernel.org Cc: linux-mips@linux-mips.org Patchwork: https://patchwork.linux-mips.org/patch/7126/ Signed-off-by: Ralf Baechle --- arch/mips/net/bpf_jit.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/mips/net/bpf_jit.c b/arch/mips/net/bpf_jit.c index 8cae27af03da..500f97fdc0e1 100644 --- a/arch/mips/net/bpf_jit.c +++ b/arch/mips/net/bpf_jit.c @@ -1127,7 +1127,7 @@ jmp_cmp: } /* A < (K|X) ? r_scrach = 1 */ b_off = b_imm(i + inst->jf + 1, ctx); - emit_bcond(MIPS_COND_GT, r_s0, r_zero, b_off, + emit_bcond(MIPS_COND_NE, r_s0, r_zero, b_off, ctx); emit_nop(ctx); /* A > (K|X) ? scratch = 0 */ -- cgit v1.2.3 From 6e86c59d4d0d04e7ebefd05b8af245c968892f81 Mon Sep 17 00:00:00 2001 From: Markos Chandras Date: Mon, 23 Jun 2014 10:38:52 +0100 Subject: MIPS: bpf: Use correct mask for VLAN_TAG case Using VLAN_VID_MASK is not correct to get the vlan tag. Use ~VLAN_PRESENT_MASK instead and make sure it's u16 so the top 16-bits will be removed. This will ensure that the emit_andi() code will not treat this as a big 32-bit unsigned value. Signed-off-by: Markos Chandras Cc: David S. Miller Cc: Daniel Borkmann Cc: Alexei Starovoitov Cc: netdev@vger.kernel.org Cc: linux-mips@linux-mips.org Patchwork: https://patchwork.linux-mips.org/patch/7127/ Signed-off-by: Ralf Baechle --- arch/mips/net/bpf_jit.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/mips/net/bpf_jit.c b/arch/mips/net/bpf_jit.c index 500f97fdc0e1..a4d1b76e7373 100644 --- a/arch/mips/net/bpf_jit.c +++ b/arch/mips/net/bpf_jit.c @@ -1317,7 +1317,7 @@ jmp_cmp: off = offsetof(struct sk_buff, vlan_tci); emit_half_load(r_s0, r_skb, off, ctx); if (code == (BPF_ANC | SKF_AD_VLAN_TAG)) - emit_andi(r_A, r_s0, VLAN_VID_MASK, ctx); + emit_andi(r_A, r_s0, (u16)~VLAN_TAG_PRESENT, ctx); else emit_andi(r_A, r_s0, VLAN_TAG_PRESENT, ctx); break; -- cgit v1.2.3 From 91a41d7f972b1d78b4bcbb61ada4a33c9d7ba8a3 Mon Sep 17 00:00:00 2001 From: Markos Chandras Date: Mon, 23 Jun 2014 10:38:53 +0100 Subject: MIPS: bpf: Fix return values for VLAN_TAG_PRESENT case If VLAN_TAG_PRESENT is not zero, then return 1 as expected by classic BPF. Otherwise return 0. Signed-off-by: Markos Chandras Cc: David S. Miller Cc: Daniel Borkmann Cc: Alexei Starovoitov Cc: netdev@vger.kernel.org Cc: linux-mips@linux-mips.org Patchwork: https://patchwork.linux-mips.org/patch/7128/ Signed-off-by: Ralf Baechle --- arch/mips/net/bpf_jit.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/mips/net/bpf_jit.c b/arch/mips/net/bpf_jit.c index a4d1b76e7373..d852bb6d3fe3 100644 --- a/arch/mips/net/bpf_jit.c +++ b/arch/mips/net/bpf_jit.c @@ -1316,10 +1316,13 @@ jmp_cmp: vlan_tci) != 2); off = offsetof(struct sk_buff, vlan_tci); emit_half_load(r_s0, r_skb, off, ctx); - if (code == (BPF_ANC | SKF_AD_VLAN_TAG)) + if (code == (BPF_ANC | SKF_AD_VLAN_TAG)) { emit_andi(r_A, r_s0, (u16)~VLAN_TAG_PRESENT, ctx); - else + } else { emit_andi(r_A, r_s0, VLAN_TAG_PRESENT, ctx); + /* return 1 if present */ + emit_sltu(r_A, r_zero, r_A, ctx); + } break; case BPF_ANC | SKF_AD_PKTTYPE: ctx->flags |= SEEN_SKB; -- cgit v1.2.3 From 78b95b662c4c633206c997fe2bd25a9c680e047a Mon Sep 17 00:00:00 2001 From: Markos Chandras Date: Mon, 23 Jun 2014 10:38:54 +0100 Subject: MIPS: bpf: Use pr_debug instead of pr_warn for unhandled opcodes We should prevent spamming the logs during normal execution of bpf-jit. Signed-off-by: Markos Chandras Suggested-by: Alexei Starovoitov Cc: David S. Miller Cc: Daniel Borkmann Cc: Alexei Starovoitov Cc: netdev@vger.kernel.org Cc: linux-mips@linux-mips.org Patchwork: https://patchwork.linux-mips.org/patch/7129/ Signed-off-by: Ralf Baechle --- arch/mips/net/bpf_jit.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/mips/net/bpf_jit.c b/arch/mips/net/bpf_jit.c index d852bb6d3fe3..1d228d27d759 100644 --- a/arch/mips/net/bpf_jit.c +++ b/arch/mips/net/bpf_jit.c @@ -1345,8 +1345,8 @@ jmp_cmp: emit_half_load(r_A, r_skb, off, ctx); break; default: - pr_warn("%s: Unhandled opcode: 0x%02x\n", __FILE__, - inst->code); + pr_debug("%s: Unhandled opcode: 0x%02x\n", __FILE__, + inst->code); return -1; } } -- cgit v1.2.3 From 10c4d614d2ffcfc17add01f9648c3e530fb308d1 Mon Sep 17 00:00:00 2001 From: Markos Chandras Date: Mon, 23 Jun 2014 10:38:55 +0100 Subject: MIPS: bpf: Fix is_range() semantics is_range() was meant to check whether the number is within the s16 range or not. However the return values and consumers expected the exact opposite. We fix that by inverting the logic in the function to return 'true' for < s16 and 'false' for > s16. Signed-off-by: Markos Chandras Reported-by: Alexei Starovoitov Cc: David S. Miller Cc: Daniel Borkmann Cc: Alexei Starovoitov Cc: netdev@vger.kernel.org Cc: linux-mips@linux-mips.org Patchwork: https://patchwork.linux-mips.org/patch/7131/ Signed-off-by: Ralf Baechle --- arch/mips/net/bpf_jit.c | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) (limited to 'arch') diff --git a/arch/mips/net/bpf_jit.c b/arch/mips/net/bpf_jit.c index 1d228d27d759..00c4c83972bb 100644 --- a/arch/mips/net/bpf_jit.c +++ b/arch/mips/net/bpf_jit.c @@ -166,9 +166,7 @@ do { \ /* Determine if immediate is within the 16-bit signed range */ static inline bool is_range16(s32 imm) { - if (imm >= SBIT(15) || imm < -SBIT(15)) - return true; - return false; + return !(imm >= SBIT(15) || imm < -SBIT(15)); } static inline void emit_addu(unsigned int dst, unsigned int src1, @@ -187,7 +185,7 @@ static inline void emit_load_imm(unsigned int dst, u32 imm, struct jit_ctx *ctx) { if (ctx->target != NULL) { /* addiu can only handle s16 */ - if (is_range16(imm)) { + if (!is_range16(imm)) { u32 *p = &ctx->target[ctx->idx]; uasm_i_lui(&p, r_tmp_imm, (s32)imm >> 16); p = &ctx->target[ctx->idx + 1]; @@ -199,7 +197,7 @@ static inline void emit_load_imm(unsigned int dst, u32 imm, struct jit_ctx *ctx) } ctx->idx++; - if (is_range16(imm)) + if (!is_range16(imm)) ctx->idx++; } @@ -240,7 +238,7 @@ static inline void emit_daddiu(unsigned int dst, unsigned int src, static inline void emit_addiu(unsigned int dst, unsigned int src, u32 imm, struct jit_ctx *ctx) { - if (is_range16(imm)) { + if (!is_range16(imm)) { emit_load_imm(r_tmp, imm, ctx); emit_addu(dst, r_tmp, src, ctx); } else { @@ -347,7 +345,7 @@ static inline void emit_sltiu(unsigned dst, unsigned int src, unsigned int imm, struct jit_ctx *ctx) { /* 16 bit immediate */ - if (is_range16((s32)imm)) { + if (!is_range16((s32)imm)) { emit_load_imm(r_tmp, imm, ctx); emit_sltu(dst, src, r_tmp, ctx); } else { -- cgit v1.2.3 From e5bb48b0553d75918094c5a6f7b60a4359887218 Mon Sep 17 00:00:00 2001 From: Markos Chandras Date: Mon, 23 Jun 2014 10:38:56 +0100 Subject: MIPS: bpf: Drop update_on_xread and always initialize the X register Previously, update_on_xread() only set the reset flag if SEEN_X hasn't been set already. However, SEEN_X is used to indicate that X is used as destination or source register so there are some cases where X is only used as source register and we really need to make sure that it has been initialized in time. As a result of which, drop this function and always set X to zero if it's used in any of the opcodes. Signed-off-by: Markos Chandras Cc: David S. Miller Cc: Daniel Borkmann Cc: Alexei Starovoitov Cc: netdev@vger.kernel.org Cc: linux-mips@linux-mips.org Patchwork: https://patchwork.linux-mips.org/patch/7133/ Signed-off-by: Ralf Baechle --- arch/mips/net/bpf_jit.c | 22 +--------------------- 1 file changed, 1 insertion(+), 21 deletions(-) (limited to 'arch') diff --git a/arch/mips/net/bpf_jit.c b/arch/mips/net/bpf_jit.c index 00c4c83972bb..1bcd599d9971 100644 --- a/arch/mips/net/bpf_jit.c +++ b/arch/mips/net/bpf_jit.c @@ -119,8 +119,6 @@ /* Arguments used by JIT */ #define ARGS_USED_BY_JIT 2 /* only applicable to 64-bit */ -#define FLAG_NEED_X_RESET (1 << 0) - #define SBIT(x) (1 << (x)) /* Signed version of BIT() */ /** @@ -549,14 +547,6 @@ static inline u16 align_sp(unsigned int num) return num; } -static inline void update_on_xread(struct jit_ctx *ctx) -{ - if (!(ctx->flags & SEEN_X)) - ctx->flags |= FLAG_NEED_X_RESET; - - ctx->flags |= SEEN_X; -} - static bool is_load_to_a(u16 inst) { switch (inst) { @@ -701,7 +691,7 @@ static void build_prologue(struct jit_ctx *ctx) if (ctx->flags & SEEN_SKB) emit_reg_move(r_skb, MIPS_R_A0, ctx); - if (ctx->flags & FLAG_NEED_X_RESET) + if (ctx->flags & SEEN_X) emit_jit_reg_move(r_X, r_zero, ctx); /* Do not leak kernel data to userspace */ @@ -876,7 +866,6 @@ load_common: /* A <- P[X + k:1] */ load_order = 0; load_ind: - update_on_xread(ctx); ctx->flags |= SEEN_OFF | SEEN_X; emit_addiu(r_off, r_X, k, ctx); goto load_common; @@ -972,7 +961,6 @@ load_ind: break; case BPF_ALU | BPF_MUL | BPF_X: /* A *= X */ - update_on_xread(ctx); ctx->flags |= SEEN_A | SEEN_X; emit_mul(r_A, r_A, r_X, ctx); break; @@ -1002,7 +990,6 @@ load_ind: break; case BPF_ALU | BPF_DIV | BPF_X: /* A /= X */ - update_on_xread(ctx); ctx->flags |= SEEN_X | SEEN_A; /* Check if r_X is zero */ emit_bcond(MIPS_COND_EQ, r_X, r_zero, @@ -1012,7 +999,6 @@ load_ind: break; case BPF_ALU | BPF_MOD | BPF_X: /* A %= X */ - update_on_xread(ctx); ctx->flags |= SEEN_X | SEEN_A; /* Check if r_X is zero */ emit_bcond(MIPS_COND_EQ, r_X, r_zero, @@ -1027,7 +1013,6 @@ load_ind: break; case BPF_ALU | BPF_OR | BPF_X: /* A |= X */ - update_on_xread(ctx); ctx->flags |= SEEN_A; emit_ori(r_A, r_A, r_X, ctx); break; @@ -1039,7 +1024,6 @@ load_ind: case BPF_ANC | SKF_AD_ALU_XOR_X: case BPF_ALU | BPF_XOR | BPF_X: /* A ^= X */ - update_on_xread(ctx); ctx->flags |= SEEN_A; emit_xor(r_A, r_A, r_X, ctx); break; @@ -1050,7 +1034,6 @@ load_ind: break; case BPF_ALU | BPF_AND | BPF_X: /* A &= X */ - update_on_xread(ctx); ctx->flags |= SEEN_A | SEEN_X; emit_and(r_A, r_A, r_X, ctx); break; @@ -1062,7 +1045,6 @@ load_ind: case BPF_ALU | BPF_LSH | BPF_X: /* A <<= X */ ctx->flags |= SEEN_A | SEEN_X; - update_on_xread(ctx); emit_sllv(r_A, r_A, r_X, ctx); break; case BPF_ALU | BPF_RSH | BPF_K: @@ -1072,7 +1054,6 @@ load_ind: break; case BPF_ALU | BPF_RSH | BPF_X: ctx->flags |= SEEN_A | SEEN_X; - update_on_xread(ctx); emit_srlv(r_A, r_A, r_X, ctx); break; case BPF_ALU | BPF_NEG: @@ -1243,7 +1224,6 @@ jmp_cmp: case BPF_MISC | BPF_TXA: /* A = X */ ctx->flags |= SEEN_A | SEEN_X; - update_on_xread(ctx); emit_jit_reg_move(r_A, r_X, ctx); break; /* AUX */ -- cgit v1.2.3 From 95782bf434437b3292f5cb9ce21b53bdbc1beda1 Mon Sep 17 00:00:00 2001 From: Markos Chandras Date: Wed, 25 Jun 2014 09:37:21 +0100 Subject: MIPS: BPF: Prevent kernel fall over for >=32bit shifts Remove BUG_ON() if the shift immediate is >=32 to avoid kernel crashes due to malicious user input. If the shift immediate is >= 32, we simply load the destination register with 0 since only 32-bit instructions are used by JIT so this will do the correct thing even on MIPS64. Signed-off-by: Markos Chandras Cc: David S. Miller Cc: Daniel Borkmann Cc: Alexei Starovoitov Cc: netdev@vger.kernel.org Cc: linux-mips@linux-mips.org Patchwork: https://patchwork.linux-mips.org/patch/7179/ Signed-off-by: Ralf Baechle --- arch/mips/net/bpf_jit.c | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) (limited to 'arch') diff --git a/arch/mips/net/bpf_jit.c b/arch/mips/net/bpf_jit.c index 1bcd599d9971..9476e7f061a1 100644 --- a/arch/mips/net/bpf_jit.c +++ b/arch/mips/net/bpf_jit.c @@ -151,6 +151,8 @@ static inline int optimize_div(u32 *k) return 0; } +static inline void emit_jit_reg_move(ptr dst, ptr src, struct jit_ctx *ctx); + /* Simply emit the instruction if the JIT memory space has been allocated */ #define emit_instr(ctx, func, ...) \ do { \ @@ -309,8 +311,11 @@ static inline void emit_sll(unsigned int dst, unsigned int src, unsigned int sa, struct jit_ctx *ctx) { /* sa is 5-bits long */ - BUG_ON(sa >= BIT(5)); - emit_instr(ctx, sll, dst, src, sa); + if (sa >= BIT(5)) + /* Shifting >= 32 results in zero */ + emit_jit_reg_move(dst, r_zero, ctx); + else + emit_instr(ctx, sll, dst, src, sa); } static inline void emit_srlv(unsigned int dst, unsigned int src, @@ -323,8 +328,11 @@ static inline void emit_srl(unsigned int dst, unsigned int src, unsigned int sa, struct jit_ctx *ctx) { /* sa is 5-bits long */ - BUG_ON(sa >= BIT(5)); - emit_instr(ctx, srl, dst, src, sa); + if (sa >= BIT(5)) + /* Shifting >= 32 results in zero */ + emit_jit_reg_move(dst, r_zero, ctx); + else + emit_instr(ctx, srl, dst, src, sa); } static inline void emit_slt(unsigned int dst, unsigned int src1, -- cgit v1.2.3 From b4fe0ec86dae91abfa9f932cd0e2e9d50e336c8b Mon Sep 17 00:00:00 2001 From: Markos Chandras Date: Mon, 23 Jun 2014 10:38:58 +0100 Subject: MIPS: bpf: Fix PKT_TYPE case for big-endian cores The skb->pkt_type field is defined as follows: u8 pkt_type:3, fclone:2, ipvs_property:1, peeked:1, nf_trace:1 resulting to the following layout in big-endian systems [pkt_type][fclone][ipvs_propery][peeked][nf_trace] ^ ^ | | LSB MSB As a result, the existing code did not work because it was trying to match pkt_type == 7 whereas in reality it is 7<<5 on big-endian systems. This has been fixed in the interpreter in 0dcceabb0c1bf2d4c12a748df9933fad303072a7 "net: filter: fix SKF_AD_PKTTYPE extension on big-endian" The fix is to look for 7<<5 on big-endian systems for the pkt_type field, and shift by 5 so the packet type will be at the lower 3 bits of the A register. Signed-off-by: Markos Chandras Cc: David S. Miller Cc: Daniel Borkmann Cc: Alexei Starovoitov Cc: netdev@vger.kernel.org Cc: linux-mips@linux-mips.org Patchwork: https://patchwork.linux-mips.org/patch/7132/ Signed-off-by: Ralf Baechle --- arch/mips/net/bpf_jit.c | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/mips/net/bpf_jit.c b/arch/mips/net/bpf_jit.c index 9476e7f061a1..4505e2e6ab53 100644 --- a/arch/mips/net/bpf_jit.c +++ b/arch/mips/net/bpf_jit.c @@ -751,13 +751,17 @@ static u64 jit_get_skb_w(struct sk_buff *skb, unsigned offset) return (u64)err << 32 | ntohl(ret); } -#define PKT_TYPE_MAX 7 +#ifdef __BIG_ENDIAN_BITFIELD +#define PKT_TYPE_MAX (7 << 5) +#else +#define PKT_TYPE_MAX 7 +#endif static int pkt_type_offset(void) { struct sk_buff skb_probe = { .pkt_type = ~0, }; - char *ct = (char *)&skb_probe; + u8 *ct = (u8 *)&skb_probe; unsigned int off; for (off = 0; off < sizeof(struct sk_buff); off++) { @@ -1320,6 +1324,10 @@ jmp_cmp: emit_load_byte(r_tmp, r_skb, off, ctx); /* Keep only the last 3 bits */ emit_andi(r_A, r_tmp, PKT_TYPE_MAX, ctx); +#ifdef __BIG_ENDIAN_BITFIELD + /* Get the actual packet type to the lower 3 bits */ + emit_srl(r_A, r_A, 5, ctx); +#endif break; case BPF_ANC | SKF_AD_QUEUE: ctx->flags |= SEEN_SKB | SEEN_A; -- cgit v1.2.3 From b6a14a9845259eb21c9d8121330c4c3b22de182e Mon Sep 17 00:00:00 2001 From: Markos Chandras Date: Wed, 25 Jun 2014 09:39:38 +0100 Subject: MIPS: BPF: Use 32 or 64-bit load instruction to load an address to register When loading a pointer to register we need to use the appropriate 32 or 64bit instruction to preserve the pointers' top 32bits. Signed-off-by: Markos Chandras Cc: David S. Miller Cc: Daniel Borkmann Cc: Alexei Starovoitov Cc: netdev@vger.kernel.org Cc: linux-mips@linux-mips.org Patchwork: https://patchwork.linux-mips.org/patch/7180/ Signed-off-by: Ralf Baechle --- arch/mips/net/bpf_jit.c | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/mips/net/bpf_jit.c b/arch/mips/net/bpf_jit.c index 4505e2e6ab53..6e3963425b64 100644 --- a/arch/mips/net/bpf_jit.c +++ b/arch/mips/net/bpf_jit.c @@ -453,6 +453,17 @@ static inline void emit_wsbh(unsigned int dst, unsigned int src, emit_instr(ctx, wsbh, dst, src); } +/* load pointer to register */ +static inline void emit_load_ptr(unsigned int dst, unsigned int src, + int imm, struct jit_ctx *ctx) +{ + /* src contains the base addr of the 32/64-pointer */ + if (config_enabled(CONFIG_64BIT)) + emit_instr(ctx, ld, dst, imm, src); + else + emit_instr(ctx, lw, dst, imm, src); +} + /* load a function pointer to register */ static inline void emit_load_func(unsigned int reg, ptr imm, struct jit_ctx *ctx) @@ -1277,7 +1288,8 @@ jmp_cmp: /* A = skb->dev->ifindex */ ctx->flags |= SEEN_SKB | SEEN_A | SEEN_S0; off = offsetof(struct sk_buff, dev); - emit_load(r_s0, r_skb, off, ctx); + /* Load *dev pointer */ + emit_load_ptr(r_s0, r_skb, off, ctx); /* error (0) in the delay slot */ emit_bcond(MIPS_COND_EQ, r_s0, r_zero, b_imm(prog->len, ctx), ctx); -- cgit v1.2.3 From d8214ef14a1db4172c93e5694906bda9b00fac93 Mon Sep 17 00:00:00 2001 From: Markos Chandras Date: Mon, 23 Jun 2014 10:39:00 +0100 Subject: MIPS: bpf: Fix stack space allocation for BPF memwords on MIPS64 When allocating stack space for BPF memwords we need to use the appropriate 32 or 64-bit instruction to avoid losing the top 32 bits of the stack pointer. Signed-off-by: Markos Chandras Cc: "David S. Miller" Cc: Daniel Borkmann Cc: Alexei Starovoitov Cc: netdev@vger.kernel.org Cc: linux-mips@linux-mips.org Patchwork: https://patchwork.linux-mips.org/patch/7135/ Signed-off-by: Ralf Baechle --- arch/mips/net/bpf_jit.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/mips/net/bpf_jit.c b/arch/mips/net/bpf_jit.c index 6e3963425b64..b87390a56a2f 100644 --- a/arch/mips/net/bpf_jit.c +++ b/arch/mips/net/bpf_jit.c @@ -623,7 +623,10 @@ static void save_bpf_jit_regs(struct jit_ctx *ctx, unsigned offset) if (ctx->flags & SEEN_MEM) { if (real_off % (RSIZE * 2)) real_off += RSIZE; - emit_addiu(r_M, r_sp, real_off, ctx); + if (config_enabled(CONFIG_64BIT)) + emit_daddiu(r_M, r_sp, real_off, ctx); + else + emit_addiu(r_M, r_sp, real_off, ctx); } } -- cgit v1.2.3 From ab6c15bc6620ebe220970cc040b29bcb2757f373 Mon Sep 17 00:00:00 2001 From: Markos Chandras Date: Mon, 23 Jun 2014 09:48:51 +0100 Subject: MIPS: MSC: Prevent out-of-bounds writes to MIPS SC ioremap'd region Previously, the lower limit for the MIPS SC initialization loop was set incorrectly allowing one extra loop leading to writes beyond the MSC ioremap'd space. More precisely, the value of the 'imp' in the last loop increased beyond the msc_irqmap_t boundaries and as a result of which, the 'n' variable was loaded with an incorrect value. This value was used later on to calculate the offset in the MSC01_IC_SUP which led to random crashes like the following one: CPU 0 Unable to handle kernel paging request at virtual address e75c0200, epc == 8058dba4, ra == 8058db90 [...] Call Trace: [<8058dba4>] init_msc_irqs+0x104/0x154 [<8058b5bc>] arch_init_irq+0xd8/0x154 [<805897b0>] start_kernel+0x220/0x36c Kernel panic - not syncing: Attempted to kill the idle task! This patch fixes the problem Signed-off-by: Markos Chandras Reviewed-by: James Hogan Cc: stable@vger.kernel.org Cc: linux-mips@linux-mips.org Patchwork: https://patchwork.linux-mips.org/patch/7118/ Signed-off-by: Ralf Baechle --- arch/mips/kernel/irq-msc01.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/mips/kernel/irq-msc01.c b/arch/mips/kernel/irq-msc01.c index 4858642d543d..a734b2c2f9ea 100644 --- a/arch/mips/kernel/irq-msc01.c +++ b/arch/mips/kernel/irq-msc01.c @@ -126,7 +126,7 @@ void __init init_msc_irqs(unsigned long icubase, unsigned int irqbase, msc_irqma board_bind_eic_interrupt = &msc_bind_eic_interrupt; - for (; nirq >= 0; nirq--, imp++) { + for (; nirq > 0; nirq--, imp++) { int n = imp->im_irq; switch (imp->im_type) { -- cgit v1.2.3 From 16f0bbbc1fbe0d09cda5b5b2dbbd6716026dfa7b Mon Sep 17 00:00:00 2001 From: Ralf Baechle Date: Thu, 26 Jun 2014 14:43:01 +0100 Subject: MIPS: Lasat: Fix build error if CRC32 is not enabled. Kconfig doesn't select CRC32 so it's possible to build a Lasat kernel without CONFIG_CRC32 resulting in a build error: LD vmlinux arch/mips/built-in.o: In function `lasat_init_board_info': (.text+0x22c): undefined reference to `crc32_le' arch/mips/built-in.o: In function `lasat_write_eeprom_info': (.text+0x7fc): undefined reference to `crc32_le' make: *** [vmlinux] Error 1 Signed-off-by: Ralf Baechle --- arch/mips/Kconfig | 1 + 1 file changed, 1 insertion(+) (limited to 'arch') diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index 7a469acee33c..4e238e6e661c 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig @@ -269,6 +269,7 @@ config LANTIQ config LASAT bool "LASAT Networks platforms" select CEVT_R4K + select CRC32 select CSRC_R4K select DMA_NONCOHERENT select SYS_HAS_EARLY_PRINTK -- cgit v1.2.3