From 52bd080d5c87af556bf71e3b07bdd8586166c43b Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Sun, 23 Jan 2011 15:17:00 +0000 Subject: MIPS: Replace deprecated spinlock initialization SPIN_LOCK_UNLOCK is deprecated. Use the lockdep capable variant instead. Signed-off-by: Thomas Gleixner Cc: linux-mips@linux-mips.org Patchwork: https://patchwork.linux-mips.org/patch/2025/ Signed-off-by: Ralf Baechle --- arch/mips/kernel/vpe.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/mips/kernel/vpe.c b/arch/mips/kernel/vpe.c index 6a1fdfef8fde..ab52b7cf3b6b 100644 --- a/arch/mips/kernel/vpe.c +++ b/arch/mips/kernel/vpe.c @@ -148,9 +148,9 @@ struct { spinlock_t tc_list_lock; struct list_head tc_list; /* Thread contexts */ } vpecontrol = { - .vpe_list_lock = SPIN_LOCK_UNLOCKED, + .vpe_list_lock = __SPIN_LOCK_UNLOCKED(vpe_list_lock), .vpe_list = LIST_HEAD_INIT(vpecontrol.vpe_list), - .tc_list_lock = SPIN_LOCK_UNLOCKED, + .tc_list_lock = __SPIN_LOCK_UNLOCKED(tc_list_lock), .tc_list = LIST_HEAD_INIT(vpecontrol.tc_list) }; -- cgit v1.2.3 From 9a620a559be65023b5fd5d0eaf37dae884c4f404 Mon Sep 17 00:00:00 2001 From: Wu Zhangjin Date: Thu, 20 Jan 2011 03:28:27 +0800 Subject: MIPS, Tracing: Speed up function graph tracer This simply moves the "ip-=4" statement down to the end of the do { ... } while (...); loop, which reduces one unneeded subtration and the subsequent memory loading and comparison. Signed-off-by: Wu Zhangjin Cc: Steven Rostedt Cc: linux-mips@linux-mips.org Patchwork: https://patchwork.linux-mips.org/patch/2006/ Signed-off-by: Ralf Baechle --- arch/mips/kernel/ftrace.c | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) (limited to 'arch') diff --git a/arch/mips/kernel/ftrace.c b/arch/mips/kernel/ftrace.c index 5a84a1f11231..635c1dcdc4fc 100644 --- a/arch/mips/kernel/ftrace.c +++ b/arch/mips/kernel/ftrace.c @@ -200,19 +200,17 @@ unsigned long ftrace_get_parent_addr(unsigned long self_addr, int faulted; /* - * For module, move the ip from calling site of mcount to the - * instruction "lui v1, hi_16bit_of_mcount"(offset is 20), but for - * kernel, move to the instruction "move ra, at"(offset is 12) + * For module, move the ip from calling site of mcount after the + * instruction "lui v1, hi_16bit_of_mcount"(offset is 24), but for + * kernel, move after the instruction "move ra, at"(offset is 16) */ - ip = self_addr - (in_module(self_addr) ? 20 : 12); + ip = self_addr - (in_module(self_addr) ? 24 : 16); /* * search the text until finding the non-store instruction or "s{d,w} * ra, offset(sp)" instruction */ do { - ip -= 4; - /* get the code at "ip": code = *(unsigned int *)ip; */ safe_load_code(code, ip, faulted); @@ -226,7 +224,9 @@ unsigned long ftrace_get_parent_addr(unsigned long self_addr, if ((code & S_R_SP) != S_R_SP) return parent_addr; - } while (((code & S_RA_SP) != S_RA_SP)); + /* Move to the next instruction */ + ip -= 4; + } while ((code & S_RA_SP) != S_RA_SP); sp = fp + (code & OFFSET_MASK); -- cgit v1.2.3 From d9cdb2f1038143c945fcb1a366aae4fa2998419e Mon Sep 17 00:00:00 2001 From: Wu Zhangjin Date: Thu, 20 Jan 2011 03:28:29 +0800 Subject: MIPS, Tracing: Substitute in_kernel_space() for in_module() The old in_module() may not work in some situations(e.g. when module & kernel are in the same address space when CONFIG_MAPPED_KERNEL=y), The in_kernel_space() is more generic and it is also easy to be implemented via cloning the existing core_kernel_text(), so, replace the in_module() with in_kernel_space(). Signed-off-by: Wu Zhangjin Cc: Steven Rostedt Cc: linux-mips@linux-mips.org Patchwork: https://patchwork.linux-mips.org/patch/2005/ Signed-off-by: Ralf Baechle --- arch/mips/kernel/ftrace.c | 54 +++++++++++++++++++++++------------------------ 1 file changed, 27 insertions(+), 27 deletions(-) (limited to 'arch') diff --git a/arch/mips/kernel/ftrace.c b/arch/mips/kernel/ftrace.c index 635c1dcdc4fc..5970286131dd 100644 --- a/arch/mips/kernel/ftrace.c +++ b/arch/mips/kernel/ftrace.c @@ -17,21 +17,7 @@ #include #include -/* - * If the Instruction Pointer is in module space (0xc0000000), return true; - * otherwise, it is in kernel space (0x80000000), return false. - * - * FIXME: This will not work when the kernel space and module space are the - * same. If they are the same, we need to modify scripts/recordmcount.pl, - * ftrace_make_nop/call() and the other related parts to ensure the - * enabling/disabling of the calling site to _mcount is right for both kernel - * and module. - */ - -static inline int in_module(unsigned long ip) -{ - return ip & 0x40000000; -} +#include #ifdef CONFIG_DYNAMIC_FTRACE @@ -69,6 +55,20 @@ static inline void ftrace_dyn_arch_init_insns(void) #endif } +/* + * Check if the address is in kernel space + * + * Clone core_kernel_text() from kernel/extable.c, but doesn't call + * init_kernel_text() for Ftrace doesn't trace functions in init sections. + */ +static inline int in_kernel_space(unsigned long ip) +{ + if (ip >= (unsigned long)_stext && + ip <= (unsigned long)_etext) + return 1; + return 0; +} + static int ftrace_modify_code(unsigned long ip, unsigned int new_code) { int faulted; @@ -91,10 +91,16 @@ int ftrace_make_nop(struct module *mod, unsigned long ip = rec->ip; /* - * We have compiled module with -mlong-calls, but compiled the kernel - * without it, we need to cope with them respectively. + * If ip is in kernel space, no long call, otherwise, long call is + * needed. */ - if (in_module(ip)) { + if (in_kernel_space(ip)) { + /* + * move at, ra + * jal _mcount --> nop + */ + new = INSN_NOP; + } else { #if defined(KBUILD_MCOUNT_RA_ADDRESS) && defined(CONFIG_32BIT) /* * lui v1, hi_16bit_of_mcount --> b 1f (0x10000005) @@ -117,12 +123,6 @@ int ftrace_make_nop(struct module *mod, */ new = INSN_B_1F_4; #endif - } else { - /* - * move at, ra - * jal _mcount --> nop - */ - new = INSN_NOP; } return ftrace_modify_code(ip, new); } @@ -132,8 +132,8 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) unsigned int new; unsigned long ip = rec->ip; - /* ip, module: 0xc0000000, kernel: 0x80000000 */ - new = in_module(ip) ? insn_lui_v1_hi16_mcount : insn_jal_ftrace_caller; + new = in_kernel_space(ip) ? insn_jal_ftrace_caller : + insn_lui_v1_hi16_mcount; return ftrace_modify_code(ip, new); } @@ -204,7 +204,7 @@ unsigned long ftrace_get_parent_addr(unsigned long self_addr, * instruction "lui v1, hi_16bit_of_mcount"(offset is 24), but for * kernel, move after the instruction "move ra, at"(offset is 16) */ - ip = self_addr - (in_module(self_addr) ? 24 : 16); + ip = self_addr - (in_kernel_space(self_addr) ? 16 : 24); /* * search the text until finding the non-store instruction or "s{d,w} -- cgit v1.2.3 From 2816e325969396af5bd1d5f70c7360074ae1d63c Mon Sep 17 00:00:00 2001 From: Wu Zhangjin Date: Thu, 20 Jan 2011 03:28:30 +0800 Subject: MIPS, Tracing: Clean up prepare_ftrace_return() The old prepare_ftrace_return() for MIPS is confused and have introduced some problem. This patch cleans up the names of the arguments, variables and related functions. For MIPS, the 2nd argument of prepare_ftrace_return() is not really the 'selfpc' described in ftrace-design.txt but instead it is the self return address. This did break the compatibility of the generic interface but really reduced one unneeded calculation for to get the current function name, the parent return address and the self return address are enough, no need to tranform the self return address to the self address. But set_graph_function of function graph tracer is an exception, it does need the 2nd argument of prepare_ftrace_return() as 'selfpc', for it will use 'selfpc' to match user's configuration of function graph entries, but in reality, it doesn't need the 'selfpc' but the recorded ip address of the mcount calling site in the __mcount_loc section. So, the 2nd argument of prepare_ftrace_return() is not important, the real requirement is the right recorded ip address should be calculated and assign to trace.func, this will be fixed in the next patches. Reported-by: Zhiping Zhong Signed-off-by: Wu Zhangjin Cc: Steven Rostedt Cc: linux-mips@linux-mips.org Patchwork: https://patchwork.linux-mips.org/patch/2007/ Signed-off-by: Ralf Baechle --- arch/mips/kernel/ftrace.c | 52 +++++++++++++++++++++++------------------------ 1 file changed, 25 insertions(+), 27 deletions(-) (limited to 'arch') diff --git a/arch/mips/kernel/ftrace.c b/arch/mips/kernel/ftrace.c index 5970286131dd..40ef34ccb76b 100644 --- a/arch/mips/kernel/ftrace.c +++ b/arch/mips/kernel/ftrace.c @@ -190,21 +190,19 @@ int ftrace_disable_ftrace_graph_caller(void) #define S_R_SP (0xafb0 << 16) /* s{d,w} R, offset(sp) */ #define OFFSET_MASK 0xffff /* stack offset range: 0 ~ PT_SIZE */ -unsigned long ftrace_get_parent_addr(unsigned long self_addr, - unsigned long parent, - unsigned long parent_addr, - unsigned long fp) +unsigned long ftrace_get_parent_ra_addr(unsigned long self_ra, unsigned long + old_parent_ra, unsigned long parent_ra_addr, unsigned long fp) { - unsigned long sp, ip, ra; + unsigned long sp, ip, tmp; unsigned int code; int faulted; /* - * For module, move the ip from calling site of mcount after the + * For module, move the ip from the return address after the * instruction "lui v1, hi_16bit_of_mcount"(offset is 24), but for * kernel, move after the instruction "move ra, at"(offset is 16) */ - ip = self_addr - (in_kernel_space(self_addr) ? 16 : 24); + ip = self_ra - (in_kernel_space(self_ra) ? 16 : 24); /* * search the text until finding the non-store instruction or "s{d,w} @@ -222,7 +220,7 @@ unsigned long ftrace_get_parent_addr(unsigned long self_addr, * store the ra on the stack */ if ((code & S_R_SP) != S_R_SP) - return parent_addr; + return parent_ra_addr; /* Move to the next instruction */ ip -= 4; @@ -230,12 +228,12 @@ unsigned long ftrace_get_parent_addr(unsigned long self_addr, sp = fp + (code & OFFSET_MASK); - /* ra = *(unsigned long *)sp; */ - safe_load_stack(ra, sp, faulted); + /* tmp = *(unsigned long *)sp; */ + safe_load_stack(tmp, sp, faulted); if (unlikely(faulted)) return 0; - if (ra == parent) + if (tmp == old_parent_ra) return sp; return 0; } @@ -246,10 +244,10 @@ unsigned long ftrace_get_parent_addr(unsigned long self_addr, * Hook the return address and push it in the stack of return addrs * in current thread info. */ -void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr, +void prepare_ftrace_return(unsigned long *parent_ra_addr, unsigned long self_ra, unsigned long fp) { - unsigned long old; + unsigned long old_parent_ra; struct ftrace_graph_ent trace; unsigned long return_hooker = (unsigned long) &return_to_handler; @@ -259,8 +257,8 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr, return; /* - * "parent" is the stack address saved the return address of the caller - * of _mcount. + * "parent_ra_addr" is the stack address saved the return address of + * the caller of _mcount. * * if the gcc < 4.5, a leaf function does not save the return address * in the stack address, so, we "emulate" one in _mcount's stack space, @@ -275,37 +273,37 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr, * do it in ftrace_graph_caller of mcount.S. */ - /* old = *parent; */ - safe_load_stack(old, parent, faulted); + /* old_parent_ra = *parent_ra_addr; */ + safe_load_stack(old_parent_ra, parent_ra_addr, faulted); if (unlikely(faulted)) goto out; #ifndef KBUILD_MCOUNT_RA_ADDRESS - parent = (unsigned long *)ftrace_get_parent_addr(self_addr, old, - (unsigned long)parent, fp); + parent_ra_addr = (unsigned long *)ftrace_get_parent_ra_addr(self_ra, + old_parent_ra, (unsigned long)parent_ra_addr, fp); /* * If fails when getting the stack address of the non-leaf function's * ra, stop function graph tracer and return */ - if (parent == 0) + if (parent_ra_addr == 0) goto out; #endif - /* *parent = return_hooker; */ - safe_store_stack(return_hooker, parent, faulted); + /* *parent_ra_addr = return_hooker; */ + safe_store_stack(return_hooker, parent_ra_addr, faulted); if (unlikely(faulted)) goto out; - if (ftrace_push_return_trace(old, self_addr, &trace.depth, fp) == - -EBUSY) { - *parent = old; + if (ftrace_push_return_trace(old_parent_ra, self_ra, &trace.depth, fp) + == -EBUSY) { + *parent_ra_addr = old_parent_ra; return; } - trace.func = self_addr; + trace.func = self_ra; /* Only trace if the calling function expects to */ if (!ftrace_graph_entry(&trace)) { current->curr_ret_stack--; - *parent = old; + *parent_ra_addr = old_parent_ra; } return; out: -- cgit v1.2.3 From 7f21a60968221eabad5c53fe760db3d094994011 Mon Sep 17 00:00:00 2001 From: Wu Zhangjin Date: Thu, 20 Jan 2011 03:28:31 +0800 Subject: MIPS, Tracing: Clean up ftrace_make_nop() This moves the comments out of ftrace_make_nop() and cleans it. At the same time, a macro MCOUNT_OFFSET_INSNS is defined for sharing with the next patch. Signed-off-by: Wu Zhangjin Cc: Steven Rostedt Cc: linux-mips@linux-mips.org Patchwork: https://patchwork.linux-mips.org/patch/2008/ Signed-off-by: Ralf Baechle --- arch/mips/kernel/ftrace.c | 70 +++++++++++++++++++++++++---------------------- 1 file changed, 38 insertions(+), 32 deletions(-) (limited to 'arch') diff --git a/arch/mips/kernel/ftrace.c b/arch/mips/kernel/ftrace.c index 40ef34ccb76b..bc91e4aa08e9 100644 --- a/arch/mips/kernel/ftrace.c +++ b/arch/mips/kernel/ftrace.c @@ -24,8 +24,6 @@ #define JAL 0x0c000000 /* jump & link: ip --> ra, jump to target */ #define ADDR_MASK 0x03ffffff /* op_code|addr : 31...26|25 ....0 */ -#define INSN_B_1F_4 0x10000004 /* b 1f; offset = 4 */ -#define INSN_B_1F_5 0x10000005 /* b 1f; offset = 5 */ #define INSN_NOP 0x00000000 /* nop */ #define INSN_JAL(addr) \ ((unsigned int)(JAL | (((addr) >> 2) & ADDR_MASK))) @@ -84,6 +82,42 @@ static int ftrace_modify_code(unsigned long ip, unsigned int new_code) return 0; } +/* + * The details about the calling site of mcount on MIPS + * + * 1. For kernel: + * + * move at, ra + * jal _mcount --> nop + * + * 2. For modules: + * + * 2.1 For KBUILD_MCOUNT_RA_ADDRESS and CONFIG_32BIT + * + * lui v1, hi_16bit_of_mcount --> b 1f (0x10000005) + * addiu v1, v1, low_16bit_of_mcount + * move at, ra + * move $12, ra_address + * jalr v1 + * sub sp, sp, 8 + * 1: offset = 5 instructions + * 2.2 For the Other situations + * + * lui v1, hi_16bit_of_mcount --> b 1f (0x10000004) + * addiu v1, v1, low_16bit_of_mcount + * move at, ra + * jalr v1 + * nop | move $12, ra_address | sub sp, sp, 8 + * 1: offset = 4 instructions + */ + +#if defined(KBUILD_MCOUNT_RA_ADDRESS) && defined(CONFIG_32BIT) +#define MCOUNT_OFFSET_INSNS 5 +#else +#define MCOUNT_OFFSET_INSNS 4 +#endif +#define INSN_B_1F (0x10000000 | MCOUNT_OFFSET_INSNS) + int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, unsigned long addr) { @@ -94,36 +128,8 @@ int ftrace_make_nop(struct module *mod, * If ip is in kernel space, no long call, otherwise, long call is * needed. */ - if (in_kernel_space(ip)) { - /* - * move at, ra - * jal _mcount --> nop - */ - new = INSN_NOP; - } else { -#if defined(KBUILD_MCOUNT_RA_ADDRESS) && defined(CONFIG_32BIT) - /* - * lui v1, hi_16bit_of_mcount --> b 1f (0x10000005) - * addiu v1, v1, low_16bit_of_mcount - * move at, ra - * move $12, ra_address - * jalr v1 - * sub sp, sp, 8 - * 1: offset = 5 instructions - */ - new = INSN_B_1F_5; -#else - /* - * lui v1, hi_16bit_of_mcount --> b 1f (0x10000004) - * addiu v1, v1, low_16bit_of_mcount - * move at, ra - * jalr v1 - * nop | move $12, ra_address | sub sp, sp, 8 - * 1: offset = 4 instructions - */ - new = INSN_B_1F_4; -#endif - } + new = in_kernel_space(ip) ? INSN_NOP : INSN_B_1F; + return ftrace_modify_code(ip, new); } -- cgit v1.2.3 From b9f07eb2f25a64098e2ba223c1a2fe2a8f249e01 Mon Sep 17 00:00:00 2001 From: Wu Zhangjin Date: Sat, 22 Jan 2011 02:01:53 +0800 Subject: MIPS, Tracing: Fix set_graph_function of function graph tracer trace.func should be set to the recorded ip of the mcount calling site in the __mcount_loc section to filter the function entries configured through the tracing/set_graph_function interface, but before, this is set to the self_ra(the return address of mcount), which has made set_graph_function not work as expected. This fixes it via calculating the right recorded ip in the __mcount_loc section and assign it to trace.func. Reported-by: Zhiping Zhong Signed-off-by: Wu Zhangjin Cc: Steven Rostedt Cc: Sergei Shtylyov Cc: linux-mips@linux-mips.org Patchwork: https://patchwork.linux-mips.org/patch/2017/ Signed-off-by: Ralf Baechle --- arch/mips/kernel/ftrace.c | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/mips/kernel/ftrace.c b/arch/mips/kernel/ftrace.c index bc91e4aa08e9..94ca2b018af7 100644 --- a/arch/mips/kernel/ftrace.c +++ b/arch/mips/kernel/ftrace.c @@ -257,7 +257,7 @@ void prepare_ftrace_return(unsigned long *parent_ra_addr, unsigned long self_ra, struct ftrace_graph_ent trace; unsigned long return_hooker = (unsigned long) &return_to_handler; - int faulted; + int faulted, insns; if (unlikely(atomic_read(¤t->tracing_graph_pause))) return; @@ -304,7 +304,14 @@ void prepare_ftrace_return(unsigned long *parent_ra_addr, unsigned long self_ra, return; } - trace.func = self_ra; + /* + * Get the recorded ip of the current mcount calling site in the + * __mcount_loc section, which will be used to filter the function + * entries configured through the tracing/set_graph_function interface. + */ + + insns = in_kernel_space(self_ra) ? 2 : MCOUNT_OFFSET_INSNS + 1; + trace.func = self_ra - (MCOUNT_INSN_SIZE * insns); /* Only trace if the calling function expects to */ if (!ftrace_graph_entry(&trace)) { -- cgit v1.2.3 From e1c87d2a5567c7940d129a6045efadc4b8c0f888 Mon Sep 17 00:00:00 2001 From: David Daney Date: Wed, 19 Jan 2011 15:24:42 -0800 Subject: MIPS: Add an unreachable return statement to satisfy buggy GCCs. It was reported that GCC-4.3.3 (with CodeSourcery extensions) fails without this. Reported-by: Jonas Gorski Signed-off-by: David Daney Cc: linux-mips@linux-mips.org Patchwork: https://patchwork.linux-mips.org/patch/2010/ Signed-off-by: Ralf Baechle --- arch/mips/mm/tlbex.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'arch') diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c index 083d3412d0bc..04f9e17db9d0 100644 --- a/arch/mips/mm/tlbex.c +++ b/arch/mips/mm/tlbex.c @@ -109,6 +109,8 @@ static bool scratchpad_available(void) static int scratchpad_offset(int i) { BUG(); + /* Really unreachable, but evidently some GCC want this. */ + return 0; } #endif /* -- cgit v1.2.3 From e56293b129607be089f2c12906d709e3c84b68c4 Mon Sep 17 00:00:00 2001 From: Stefan Oberhumer Date: Mon, 17 Jan 2011 09:19:53 +0100 Subject: MIPS: Clear the correct flag in sysmips(MIPS_FIXADE, ...). The sysmips(MIPS_FIXADE, ...) case contains an obvious copy-and-paste error in the handling of the TIF_LOGADE flag. Fix that Patchwork: https://patchwork.linux-mips.org/patch/1997/ Signed-off-by: Ralf Baechle --- arch/mips/kernel/syscall.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/mips/kernel/syscall.c b/arch/mips/kernel/syscall.c index 1dc6edff45e0..cde2a32c1043 100644 --- a/arch/mips/kernel/syscall.c +++ b/arch/mips/kernel/syscall.c @@ -405,7 +405,7 @@ _sys_sysmips(nabi_no_regargs struct pt_regs regs) if (arg1 & 2) set_thread_flag(TIF_LOGADE); else - clear_thread_flag(TIF_FIXADE); + clear_thread_flag(TIF_LOGADE); return 0; -- cgit v1.2.3 From 597c6740185c44686b5476a251eb53a13f685ae9 Mon Sep 17 00:00:00 2001 From: Robert Millan Date: Sun, 7 Nov 2010 13:38:29 +0100 Subject: MIPS: Loongson: Remove ad-hoc cmdline default Loongson builds have an ad-hoc cmdline default of "console=ttyS0,115200 root=/dev/hda1". These settings come from a vendor; I remember builds from Lemote branch requiring a "console=tty" override in order to get a working console. At least on Yeeloong, they're particularly useless: there's no external serial port, and the IDE drive is now recognised as /dev/sda. Signed-off-by: Robert Millan To: linux-mips@linux-mips.org Patchwork: https://patchwork.linux-mips.org/patch/1759/ Signed-off-by: Ralf Baechle --- arch/mips/loongson/common/cmdline.c | 5 ----- 1 file changed, 5 deletions(-) (limited to 'arch') diff --git a/arch/mips/loongson/common/cmdline.c b/arch/mips/loongson/common/cmdline.c index 1a06defc4f7f..353e1d2e41a5 100644 --- a/arch/mips/loongson/common/cmdline.c +++ b/arch/mips/loongson/common/cmdline.c @@ -44,10 +44,5 @@ void __init prom_init_cmdline(void) strcat(arcs_cmdline, " "); } - if ((strstr(arcs_cmdline, "console=")) == NULL) - strcat(arcs_cmdline, " console=ttyS0,115200"); - if ((strstr(arcs_cmdline, "root=")) == NULL) - strcat(arcs_cmdline, " root=/dev/hda1"); - prom_init_machtype(); } -- cgit v1.2.3 From 39d30c13767cbe9d7e77e05886c399b40c76cb1e Mon Sep 17 00:00:00 2001 From: Anoop P A Date: Thu, 18 Nov 2010 13:42:28 +0530 Subject: MIPS: Select R4K timer lib for all MSP platforms Signed-off-by: Anoop P A To: linux-mips@linux-mips.org To: linux-kernel@vger.kernel.org Patchwork: https://patchwork.linux-mips.org/patch/1803/ Tested-by: Shane McDonald Signed-off-by: Ralf Baechle --- arch/mips/Kconfig | 2 ++ arch/mips/pmc-sierra/Kconfig | 4 ---- arch/mips/pmc-sierra/msp71xx/msp_time.c | 2 +- 3 files changed, 3 insertions(+), 5 deletions(-) (limited to 'arch') diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index f5ecc0566bc2..972695e359eb 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig @@ -333,6 +333,8 @@ config PNX8550_STB810 config PMC_MSP bool "PMC-Sierra MSP chipsets" depends on EXPERIMENTAL + select CEVT_R4K + select CSRC_R4K select DMA_NONCOHERENT select SWAP_IO_SPACE select NO_EXCEPT_FILL diff --git a/arch/mips/pmc-sierra/Kconfig b/arch/mips/pmc-sierra/Kconfig index c139988bb85d..8d798497c614 100644 --- a/arch/mips/pmc-sierra/Kconfig +++ b/arch/mips/pmc-sierra/Kconfig @@ -4,15 +4,11 @@ choice config PMC_MSP4200_EVAL bool "PMC-Sierra MSP4200 Eval Board" - select CEVT_R4K - select CSRC_R4K select IRQ_MSP_SLP select HW_HAS_PCI config PMC_MSP4200_GW bool "PMC-Sierra MSP4200 VoIP Gateway" - select CEVT_R4K - select CSRC_R4K select IRQ_MSP_SLP select HW_HAS_PCI diff --git a/arch/mips/pmc-sierra/msp71xx/msp_time.c b/arch/mips/pmc-sierra/msp71xx/msp_time.c index cca64e15f57f..01df84ce31e2 100644 --- a/arch/mips/pmc-sierra/msp71xx/msp_time.c +++ b/arch/mips/pmc-sierra/msp71xx/msp_time.c @@ -81,7 +81,7 @@ void __init plat_time_init(void) mips_hpt_frequency = cpu_rate/2; } -unsigned int __init get_c0_compare_int(void) +unsigned int __cpuinit get_c0_compare_int(void) { return MSP_INT_VPE0_TIMER; } -- cgit v1.2.3 From a18059ace1eee8c4755bcce22163d137425db607 Mon Sep 17 00:00:00 2001 From: Anoop P A Date: Thu, 18 Nov 2010 16:02:50 +0530 Subject: MIPS: MSP: Fix MSP71xx bpci interrupt handler return value Signed-off-by: Anoop P A To: Ben Hutchings To: linux-mips@linux-mips.org To: linux-kernel@vger.kernel.org Patchwork: https://patchwork.linux-mips.org/patch/1804/ Signed-off-by: Ralf Baechle --- arch/mips/pci/ops-pmcmsp.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/mips/pci/ops-pmcmsp.c b/arch/mips/pci/ops-pmcmsp.c index b7c03d80c88c..68798f869c0f 100644 --- a/arch/mips/pci/ops-pmcmsp.c +++ b/arch/mips/pci/ops-pmcmsp.c @@ -308,7 +308,7 @@ static struct resource pci_mem_resource = { * RETURNS: PCIBIOS_SUCCESSFUL - success * ****************************************************************************/ -static int bpci_interrupt(int irq, void *dev_id) +static irqreturn_t bpci_interrupt(int irq, void *dev_id) { struct msp_pci_regs *preg = (void *)PCI_BASE_REG; unsigned int stat = preg->if_status; @@ -326,7 +326,7 @@ static int bpci_interrupt(int irq, void *dev_id) /* write to clear all asserted interrupts */ preg->if_status = stat; - return PCIBIOS_SUCCESSFUL; + return IRQ_HANDLED; } /***************************************************************************** -- cgit v1.2.3 From c726b822131e7fdb62745a5585449e6a159395e8 Mon Sep 17 00:00:00 2001 From: David Daney Date: Mon, 24 Jan 2011 14:51:34 -0800 Subject: MIPS: Fix GCC-4.6 'set but not used' warning in signal*.c GCC-4.6 can find more unused code than previous versions could. In the case of protected_restore_fp_context{,32}, the variable tmp is really used. Its use is tricky in that we really care about the side effects of the __put_user() calls. So we must mark tmp with __maybe_unused to quiet the warning. Signed-off-by: David Daney To: linux-mips@linux-mips.org Patchwork: https://patchwork.linux-mips.org/patch/2035/ Signed-off-by: Ralf Baechle --- arch/mips/kernel/signal.c | 2 +- arch/mips/kernel/signal32.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/mips/kernel/signal.c b/arch/mips/kernel/signal.c index 5922342bca39..dbbe0ce48d89 100644 --- a/arch/mips/kernel/signal.c +++ b/arch/mips/kernel/signal.c @@ -84,7 +84,7 @@ static int protected_save_fp_context(struct sigcontext __user *sc) static int protected_restore_fp_context(struct sigcontext __user *sc) { - int err, tmp; + int err, tmp __maybe_unused; while (1) { lock_fpu_owner(); own_fpu_inatomic(0); diff --git a/arch/mips/kernel/signal32.c b/arch/mips/kernel/signal32.c index a0ed0e052b2e..aae986613795 100644 --- a/arch/mips/kernel/signal32.c +++ b/arch/mips/kernel/signal32.c @@ -115,7 +115,7 @@ static int protected_save_fp_context32(struct sigcontext32 __user *sc) static int protected_restore_fp_context32(struct sigcontext32 __user *sc) { - int err, tmp; + int err, tmp __maybe_unused; while (1) { lock_fpu_owner(); own_fpu_inatomic(0); -- cgit v1.2.3 From 7a6e4ca1eea8dc364f60ac55884f450a132cd100 Mon Sep 17 00:00:00 2001 From: David Daney Date: Mon, 24 Jan 2011 14:51:35 -0800 Subject: MIPS: Remove unused code from arch/mips/kernel/syscall.c The variable arg3 in _sys_sysmips() is unused. Remove it. Signed-off-by: David Daney To: linux-mips@linux-mips.org Patchwork: https://patchwork.linux-mips.org/patch/2034/ Signed-off-by: Ralf Baechle --- arch/mips/kernel/syscall.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/mips/kernel/syscall.c b/arch/mips/kernel/syscall.c index cde2a32c1043..58beabf50b3c 100644 --- a/arch/mips/kernel/syscall.c +++ b/arch/mips/kernel/syscall.c @@ -383,12 +383,11 @@ save_static_function(sys_sysmips); static int __used noinline _sys_sysmips(nabi_no_regargs struct pt_regs regs) { - long cmd, arg1, arg2, arg3; + long cmd, arg1, arg2; cmd = regs.regs[4]; arg1 = regs.regs[5]; arg2 = regs.regs[6]; - arg3 = regs.regs[7]; switch (cmd) { case MIPS_ATOMIC_SET: -- cgit v1.2.3 From 91b51f30084911754aed004bd3792f71f7bf0843 Mon Sep 17 00:00:00 2001 From: David Daney Date: Mon, 24 Jan 2011 14:51:36 -0800 Subject: MIPS: Fix GCC-4.6 'set but not used' warning in ieee754int.h GCC-4.6 can find more unused code than previous versions could. In the case of arch/mips/math-emu/ieee754int.h, the COMPXSP and COMPXDP macros are used in several places, but a couple of them leave xs unused. The easiest thing to do is mark it as __maybe_unused to quiet the warning. Signed-off-by: David Daney To: linux-mips@linux-mips.org Patchwork: https://patchwork.linux-mips.org/patch/2032/ Signed-off-by: Ralf Baechle --- arch/mips/math-emu/ieee754int.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/mips/math-emu/ieee754int.h b/arch/mips/math-emu/ieee754int.h index 2701d9500959..2a7d43f4f161 100644 --- a/arch/mips/math-emu/ieee754int.h +++ b/arch/mips/math-emu/ieee754int.h @@ -70,7 +70,7 @@ #define COMPXSP \ - unsigned xm; int xe; int xs; int xc + unsigned xm; int xe; int xs __maybe_unused; int xc #define COMPYSP \ unsigned ym; int ye; int ys; int yc @@ -104,7 +104,7 @@ #define COMPXDP \ -u64 xm; int xe; int xs; int xc +u64 xm; int xe; int xs __maybe_unused; int xc #define COMPYDP \ u64 ym; int ye; int ys; int yc -- cgit v1.2.3 From d3ce0e98b7fe17bb1dec9f6d7c50213db01e7189 Mon Sep 17 00:00:00 2001 From: David Daney Date: Mon, 24 Jan 2011 14:51:37 -0800 Subject: MIPS: Fix GCC-4.6 'set but not used' warning in arch/mips/mm/init.c Under some combinations of CONFIG_*, lastpfn in page_is_ram is 'set but not used'. Mark it as __maybe_unused to quiet the warning/error. Signed-off-by: David Daney To: linux-mips@linux-mips.org Patchwork: https://patchwork.linux-mips.org/patch/2033/ Signed-off-by: Ralf Baechle --- arch/mips/mm/init.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c index 2efcbd24c82f..279599e9a779 100644 --- a/arch/mips/mm/init.c +++ b/arch/mips/mm/init.c @@ -324,7 +324,7 @@ int page_is_ram(unsigned long pagenr) void __init paging_init(void) { unsigned long max_zone_pfns[MAX_NR_ZONES]; - unsigned long lastpfn; + unsigned long lastpfn __maybe_unused; pagetable_init(); -- cgit v1.2.3 From 994fed2dd2166f86b62ec53c7424660145f138ee Mon Sep 17 00:00:00 2001 From: Stefan Weil Date: Sun, 30 Jan 2011 21:41:44 +0100 Subject: MIPS: Loongson: Fix potentially wrong string handling This error was reported by cppcheck: arch/mips/loongson/common/machtype.c:56: error: Dangerous usage of 'str' (strncpy doesn't always 0-terminate it) If strncpy copied MACHTYPE_LEN bytes, the destination string str was not terminated. The patch adds one more byte to str and makes sure that this byte is always 0. Signed-off-by: Stefan Weil Cc: Wu Zhangjin Cc: Arnaud Patard Cc: linux-mips@linux-mips.org Cc: linux-kernel@vger.kernel.org Patchwork: https://patchwork.linux-mips.org/patch/2053/ Signed-off-by: Ralf Baechle --- arch/mips/loongson/common/machtype.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/mips/loongson/common/machtype.c b/arch/mips/loongson/common/machtype.c index 81fbe6b73f91..2efd5d9dee27 100644 --- a/arch/mips/loongson/common/machtype.c +++ b/arch/mips/loongson/common/machtype.c @@ -41,7 +41,7 @@ void __weak __init mach_prom_init_machtype(void) void __init prom_init_machtype(void) { - char *p, str[MACHTYPE_LEN]; + char *p, str[MACHTYPE_LEN + 1]; int machtype = MACH_LEMOTE_FL2E; mips_machtype = LOONGSON_MACHTYPE; @@ -53,6 +53,7 @@ void __init prom_init_machtype(void) } p += strlen("machtype="); strncpy(str, p, MACHTYPE_LEN); + str[MACHTYPE_LEN] = '\0'; p = strstr(str, " "); if (p) *p = '\0'; -- cgit v1.2.3 From efe8dc556cd0f22e04c453188ffbc408b492eb82 Mon Sep 17 00:00:00 2001 From: Yoichi Yuasa Date: Mon, 7 Feb 2011 11:31:36 +0900 Subject: MIPS: Fix always CONFIG_LOONGSON_UART_BASE=y Signed-off-by: Yoichi Yuasa Cc: linux-mips Patchwork: https://patchwork.linux-mips.org/patch/2055/ Signed-off-by: Ralf Baechle --- arch/mips/loongson/Kconfig | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/mips/loongson/Kconfig b/arch/mips/loongson/Kconfig index 6e1b77fec7ea..aca93eed8779 100644 --- a/arch/mips/loongson/Kconfig +++ b/arch/mips/loongson/Kconfig @@ -1,6 +1,7 @@ +if MACH_LOONGSON + choice prompt "Machine Type" - depends on MACH_LOONGSON config LEMOTE_FULOONG2E bool "Lemote Fuloong(2e) mini-PC" @@ -87,3 +88,5 @@ config LOONGSON_UART_BASE config LOONGSON_MC146818 bool default n + +endif # MACH_LOONGSON -- cgit v1.2.3 From 91f017372a48d2d128d08964bcfeafbd98b6d739 Mon Sep 17 00:00:00 2001 From: Deng-Cheng Zhu Date: Fri, 21 Jan 2011 16:19:17 +0800 Subject: MIPS, Perf-events: Work with irq_work This is the MIPS part of the following commit by Peter Zijlstra: - e360adbe29241a0194e10e20595360dd7b98a2b3 irq_work: Add generic hardirq context callbacks Provide a mechanism that allows running code in IRQ context. It is most useful for NMI code that needs to interact with the rest of the system -- like wakeup a task to drain buffers. Perf currently has such a mechanism, so extract that and provide it as a generic feature, independent of perf so that others may also benefit. The IRQ context callback is generated through self-IPIs where possible, or on architectures like powerpc the decrementer (the built-in timer facility) is set to generate an interrupt immediately. Architectures that don't have anything like this get to do with a callback from the timer tick. These architectures can call irq_work_run() at the tail of any IRQ handlers that might enqueue such work (like the perf IRQ handler) to avoid undue latencies in processing the work. For MIPSXX, we need to call irq_work_run() at the tail of the perf IRQ handler as described above. Reported-by: Wu Zhangjin Acked-by: Peter Zijlstra Acked-by: David Daney Signed-off-by: Deng-Cheng Zhu To: fweisbec@gmail.com To: will.deacon@arm.com Cc: linux-mips@linux-mips.org Cc: linux-kernel@vger.kernel.org Cc: paulus@samba.org Cc: mingo@elte.hu Cc: acme@redhat.com Cc: matt@console-pimps.org Cc: sshtylyov@mvista.com, Patchwork: http://patchwork.linux-mips.org/patch/2011/ Signed-off-by: Ralf Baechle --- arch/mips/Kconfig | 1 + arch/mips/include/asm/perf_event.h | 12 +----------- arch/mips/kernel/perf_event_mipsxx.c | 2 +- 3 files changed, 3 insertions(+), 12 deletions(-) (limited to 'arch') diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index 972695e359eb..bd7b64d252dc 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig @@ -4,6 +4,7 @@ config MIPS select HAVE_GENERIC_DMA_COHERENT select HAVE_IDE select HAVE_OPROFILE + select HAVE_IRQ_WORK select HAVE_PERF_EVENTS select PERF_USE_VMALLOC select HAVE_ARCH_KGDB diff --git a/arch/mips/include/asm/perf_event.h b/arch/mips/include/asm/perf_event.h index e00007cf8162..d0c77496c728 100644 --- a/arch/mips/include/asm/perf_event.h +++ b/arch/mips/include/asm/perf_event.h @@ -11,15 +11,5 @@ #ifndef __MIPS_PERF_EVENT_H__ #define __MIPS_PERF_EVENT_H__ - -/* - * MIPS performance counters do not raise NMI upon overflow, a regular - * interrupt will be signaled. Hence we can do the pending perf event - * work at the tail of the irq handler. - */ -static inline void -set_perf_event_pending(void) -{ -} - +/* Leave it empty here. The file is required by linux/perf_event.h */ #endif /* __MIPS_PERF_EVENT_H__ */ diff --git a/arch/mips/kernel/perf_event_mipsxx.c b/arch/mips/kernel/perf_event_mipsxx.c index 183e0d226669..d45ea37062bc 100644 --- a/arch/mips/kernel/perf_event_mipsxx.c +++ b/arch/mips/kernel/perf_event_mipsxx.c @@ -696,7 +696,7 @@ static int mipsxx_pmu_handle_shared_irq(void) * interrupt, not NMI. */ if (handled == IRQ_HANDLED) - perf_event_do_pending(); + irq_work_run(); #ifdef CONFIG_MIPS_MT_SMP read_unlock(&pmuint_rwlock); -- cgit v1.2.3 From 404ff638403e9286691b9b1f86d514c1d7737e8f Mon Sep 17 00:00:00 2001 From: Deng-Cheng Zhu Date: Fri, 21 Jan 2011 16:19:18 +0800 Subject: MIPS, Perf-events: Work with the new PMU interface This is the MIPS part of the following commits by Peter Zijlstra: - a4eaf7f14675cb512d69f0c928055e73d0c6d252 perf: Rework the PMU methods Replace pmu::{enable,disable,start,stop,unthrottle} with pmu::{add,del,start,stop}, all of which take a flags argument. The new interface extends the capability to stop a counter while keeping it scheduled on the PMU. We replace the throttled state with the generic stopped state. This also allows us to efficiently stop/start counters over certain code paths (like IRQ handlers). It also allows scheduling a counter without it starting, allowing for a generic frozen state (useful for rotating stopped counters). The stopped state is implemented in two different ways, depending on how the architecture implemented the throttled state: 1) We disable the counter: a) the pmu has per-counter enable bits, we flip that b) we program a NOP event, preserving the counter state 2) We store the counter state and ignore all read/overflow events For MIPSXX, the stopped state is implemented in the way of 1.b as above. - 33696fc0d141bbbcb12f75b69608ea83282e3117 perf: Per PMU disable Changes perf_disable() into perf_pmu_disable(). - 24cd7f54a0d47e1d5b3de29e2456bfbd2d8447b7 perf: Reduce perf_disable() usage Since the current perf_disable() usage is only an optimization, remove it for now. This eases the removal of the __weak hw_perf_enable() interface. - b0a873ebbf87bf38bf70b5e39a7cadc96099fa13 perf: Register PMU implementations Simple registration interface for struct pmu, this provides the infrastructure for removing all the weak functions. - 51b0fe39549a04858001922919ab355dee9bdfcf perf: Deconstify struct pmu sed -ie 's/const struct pmu\>/struct pmu/g' `git grep -l "const struct pmu\>"` Reported-by: Wu Zhangjin Acked-by: David Daney Signed-off-by: Deng-Cheng Zhu To: a.p.zijlstra@chello.nl To: fweisbec@gmail.com To: will.deacon@arm.com Cc: linux-mips@linux-mips.org Cc: linux-kernel@vger.kernel.org Cc: wuzhangjin@gmail.com Cc: paulus@samba.org Cc: mingo@elte.hu Cc: acme@redhat.com Cc: dengcheng.zhu@gmail.com Cc: matt@console-pimps.org Cc: sshtylyov@mvista.com Cc: ddaney@caviumnetworks.com Patchwork: http://patchwork.linux-mips.org/patch/2012/ Signed-off-by: Ralf Baechle --- arch/mips/kernel/perf_event.c | 275 ++++++++++++++++++++--------------- arch/mips/kernel/perf_event_mipsxx.c | 2 + 2 files changed, 158 insertions(+), 119 deletions(-) (limited to 'arch') diff --git a/arch/mips/kernel/perf_event.c b/arch/mips/kernel/perf_event.c index 2b7f3f703b83..1ee44a3f9718 100644 --- a/arch/mips/kernel/perf_event.c +++ b/arch/mips/kernel/perf_event.c @@ -161,41 +161,6 @@ mipspmu_event_set_period(struct perf_event *event, return ret; } -static int mipspmu_enable(struct perf_event *event) -{ - struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); - struct hw_perf_event *hwc = &event->hw; - int idx; - int err = 0; - - /* To look for a free counter for this event. */ - idx = mipspmu->alloc_counter(cpuc, hwc); - if (idx < 0) { - err = idx; - goto out; - } - - /* - * If there is an event in the counter we are going to use then - * make sure it is disabled. - */ - event->hw.idx = idx; - mipspmu->disable_event(idx); - cpuc->events[idx] = event; - - /* Set the period for the event. */ - mipspmu_event_set_period(event, hwc, idx); - - /* Enable the event. */ - mipspmu->enable_event(hwc, idx); - - /* Propagate our changes to the userspace mapping. */ - perf_event_update_userpage(event); - -out: - return err; -} - static void mipspmu_event_update(struct perf_event *event, struct hw_perf_event *hwc, int idx) @@ -231,32 +196,90 @@ again: return; } -static void mipspmu_disable(struct perf_event *event) +static void mipspmu_start(struct perf_event *event, int flags) +{ + struct hw_perf_event *hwc = &event->hw; + + if (!mipspmu) + return; + + if (flags & PERF_EF_RELOAD) + WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE)); + + hwc->state = 0; + + /* Set the period for the event. */ + mipspmu_event_set_period(event, hwc, hwc->idx); + + /* Enable the event. */ + mipspmu->enable_event(hwc, hwc->idx); +} + +static void mipspmu_stop(struct perf_event *event, int flags) +{ + struct hw_perf_event *hwc = &event->hw; + + if (!mipspmu) + return; + + if (!(hwc->state & PERF_HES_STOPPED)) { + /* We are working on a local event. */ + mipspmu->disable_event(hwc->idx); + barrier(); + mipspmu_event_update(event, hwc, hwc->idx); + hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE; + } +} + +static int mipspmu_add(struct perf_event *event, int flags) { struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); struct hw_perf_event *hwc = &event->hw; - int idx = hwc->idx; + int idx; + int err = 0; + perf_pmu_disable(event->pmu); - WARN_ON(idx < 0 || idx >= mipspmu->num_counters); + /* To look for a free counter for this event. */ + idx = mipspmu->alloc_counter(cpuc, hwc); + if (idx < 0) { + err = idx; + goto out; + } - /* We are working on a local event. */ + /* + * If there is an event in the counter we are going to use then + * make sure it is disabled. + */ + event->hw.idx = idx; mipspmu->disable_event(idx); + cpuc->events[idx] = event; - barrier(); - - mipspmu_event_update(event, hwc, idx); - cpuc->events[idx] = NULL; - clear_bit(idx, cpuc->used_mask); + hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE; + if (flags & PERF_EF_START) + mipspmu_start(event, PERF_EF_RELOAD); + /* Propagate our changes to the userspace mapping. */ perf_event_update_userpage(event); + +out: + perf_pmu_enable(event->pmu); + return err; } -static void mipspmu_unthrottle(struct perf_event *event) +static void mipspmu_del(struct perf_event *event, int flags) { + struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); struct hw_perf_event *hwc = &event->hw; + int idx = hwc->idx; - mipspmu->enable_event(hwc, hwc->idx); + WARN_ON(idx < 0 || idx >= mipspmu->num_counters); + + mipspmu_stop(event, PERF_EF_UPDATE); + cpuc->events[idx] = NULL; + clear_bit(idx, cpuc->used_mask); + + perf_event_update_userpage(event); } static void mipspmu_read(struct perf_event *event) @@ -270,12 +293,17 @@ static void mipspmu_read(struct perf_event *event) mipspmu_event_update(event, hwc, hwc->idx); } -static struct pmu pmu = { - .enable = mipspmu_enable, - .disable = mipspmu_disable, - .unthrottle = mipspmu_unthrottle, - .read = mipspmu_read, -}; +static void mipspmu_enable(struct pmu *pmu) +{ + if (mipspmu) + mipspmu->start(); +} + +static void mipspmu_disable(struct pmu *pmu) +{ + if (mipspmu) + mipspmu->stop(); +} static atomic_t active_events = ATOMIC_INIT(0); static DEFINE_MUTEX(pmu_reserve_mutex); @@ -318,6 +346,82 @@ static void mipspmu_free_irq(void) perf_irq = save_perf_irq; } +/* + * mipsxx/rm9000/loongson2 have different performance counters, they have + * specific low-level init routines. + */ +static void reset_counters(void *arg); +static int __hw_perf_event_init(struct perf_event *event); + +static void hw_perf_event_destroy(struct perf_event *event) +{ + if (atomic_dec_and_mutex_lock(&active_events, + &pmu_reserve_mutex)) { + /* + * We must not call the destroy function with interrupts + * disabled. + */ + on_each_cpu(reset_counters, + (void *)(long)mipspmu->num_counters, 1); + mipspmu_free_irq(); + mutex_unlock(&pmu_reserve_mutex); + } +} + +static int mipspmu_event_init(struct perf_event *event) +{ + int err = 0; + + switch (event->attr.type) { + case PERF_TYPE_RAW: + case PERF_TYPE_HARDWARE: + case PERF_TYPE_HW_CACHE: + break; + + default: + return -ENOENT; + } + + if (!mipspmu || event->cpu >= nr_cpumask_bits || + (event->cpu >= 0 && !cpu_online(event->cpu))) + return -ENODEV; + + if (!atomic_inc_not_zero(&active_events)) { + if (atomic_read(&active_events) > MIPS_MAX_HWEVENTS) { + atomic_dec(&active_events); + return -ENOSPC; + } + + mutex_lock(&pmu_reserve_mutex); + if (atomic_read(&active_events) == 0) + err = mipspmu_get_irq(); + + if (!err) + atomic_inc(&active_events); + mutex_unlock(&pmu_reserve_mutex); + } + + if (err) + return err; + + err = __hw_perf_event_init(event); + if (err) + hw_perf_event_destroy(event); + + return err; +} + +static struct pmu pmu = { + .pmu_enable = mipspmu_enable, + .pmu_disable = mipspmu_disable, + .event_init = mipspmu_event_init, + .add = mipspmu_add, + .del = mipspmu_del, + .start = mipspmu_start, + .stop = mipspmu_stop, + .read = mipspmu_read, +}; + static inline unsigned int mipspmu_perf_event_encode(const struct mips_perf_event *pev) { @@ -409,73 +513,6 @@ static int validate_group(struct perf_event *event) return 0; } -/* - * mipsxx/rm9000/loongson2 have different performance counters, they have - * specific low-level init routines. - */ -static void reset_counters(void *arg); -static int __hw_perf_event_init(struct perf_event *event); - -static void hw_perf_event_destroy(struct perf_event *event) -{ - if (atomic_dec_and_mutex_lock(&active_events, - &pmu_reserve_mutex)) { - /* - * We must not call the destroy function with interrupts - * disabled. - */ - on_each_cpu(reset_counters, - (void *)(long)mipspmu->num_counters, 1); - mipspmu_free_irq(); - mutex_unlock(&pmu_reserve_mutex); - } -} - -const struct pmu *hw_perf_event_init(struct perf_event *event) -{ - int err = 0; - - if (!mipspmu || event->cpu >= nr_cpumask_bits || - (event->cpu >= 0 && !cpu_online(event->cpu))) - return ERR_PTR(-ENODEV); - - if (!atomic_inc_not_zero(&active_events)) { - if (atomic_read(&active_events) > MIPS_MAX_HWEVENTS) { - atomic_dec(&active_events); - return ERR_PTR(-ENOSPC); - } - - mutex_lock(&pmu_reserve_mutex); - if (atomic_read(&active_events) == 0) - err = mipspmu_get_irq(); - - if (!err) - atomic_inc(&active_events); - mutex_unlock(&pmu_reserve_mutex); - } - - if (err) - return ERR_PTR(err); - - err = __hw_perf_event_init(event); - if (err) - hw_perf_event_destroy(event); - - return err ? ERR_PTR(err) : &pmu; -} - -void hw_perf_enable(void) -{ - if (mipspmu) - mipspmu->start(); -} - -void hw_perf_disable(void) -{ - if (mipspmu) - mipspmu->stop(); -} - /* This is needed by specific irq handlers in perf_event_*.c */ static void handle_associated_event(struct cpu_hw_events *cpuc, diff --git a/arch/mips/kernel/perf_event_mipsxx.c b/arch/mips/kernel/perf_event_mipsxx.c index d45ea37062bc..d9a7db78ed62 100644 --- a/arch/mips/kernel/perf_event_mipsxx.c +++ b/arch/mips/kernel/perf_event_mipsxx.c @@ -1045,6 +1045,8 @@ init_hw_perf_events(void) "CPU, irq %d%s\n", mipspmu->name, counters, irq, irq < 0 ? " (share with timer interrupt)" : ""); + perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW); + return 0; } early_initcall(init_hw_perf_events); -- cgit v1.2.3 From c049b6a5f2d8ca16094a4f2a6d8ad39f888a551a Mon Sep 17 00:00:00 2001 From: Deng-Cheng Zhu Date: Fri, 21 Jan 2011 16:19:19 +0800 Subject: MIPS, Perf-events: Fix event check in validate_event() Ignore events that are in off/error state or belong to a different PMU. This patch originates from the following commit for ARM by Will Deacon: - 65b4711ff513767341aa1915c822de6ec0de65cb ARM: 6352/1: perf: fix event validation The validate_event function in the ARM perf events backend has the following problems: 1.) Events that are disabled count towards the cost. 2.) Events associated with other PMUs [for example, software events or breakpoints] do not count towards the cost, but do fail validation, causing the group to fail. This patch changes validate_event so that it ignores events in the PERF_EVENT_STATE_OFF state or that are scheduled for other PMUs. Acked-by: Will Deacon Acked-by: David Daney Signed-off-by: Deng-Cheng Zhu To: a.p.zijlstra@chello.nl To: fweisbec@gmail.com To: will.deacon@arm.com Cc: linux-mips@linux-mips.org Cc: linux-kernel@vger.kernel.org Cc: wuzhangjin@gmail.com Cc: paulus@samba.org Cc: mingo@elte.hu Cc: acme@redhat.com Cc: dengcheng.zhu@gmail.com Cc: matt@console-pimps.org Cc: sshtylyov@mvista.com Cc: ddaney@caviumnetworks.com Patchwork: http://patchwork.linux-mips.org/patch/2013/ Signed-off-by: Ralf Baechle --- arch/mips/kernel/perf_event.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/mips/kernel/perf_event.c b/arch/mips/kernel/perf_event.c index 1ee44a3f9718..3d55761146e5 100644 --- a/arch/mips/kernel/perf_event.c +++ b/arch/mips/kernel/perf_event.c @@ -486,8 +486,9 @@ static int validate_event(struct cpu_hw_events *cpuc, { struct hw_perf_event fake_hwc = event->hw; - if (event->pmu && event->pmu != &pmu) - return 0; + /* Allow mixed event group. So return 1 to pass validation. */ + if (event->pmu != &pmu || event->state <= PERF_EVENT_STATE_OFF) + return 1; return mipspmu->alloc_counter(cpuc, &fake_hwc) >= 0; } -- cgit v1.2.3 From 98f92f2f9e2fd959157b1d52f7ae160683812740 Mon Sep 17 00:00:00 2001 From: Deng-Cheng Zhu Date: Fri, 21 Jan 2011 16:19:20 +0800 Subject: MIPS, Perf-events: Work with the new callchain interface This is the MIPS part of the following commits by Frederic Weisbecker: - f72c1a931e311bb7780fee19e41a89ac42cab50e perf: Factorize callchain context handling Store the kernel and user contexts from the generic layer instead of archs, this gathers some repetitive code. - 56962b4449af34070bb1994621ef4f0265eed4d8 perf: Generalize some arch callchain code - Most archs use one callchain buffer per cpu, except x86 that needs to deal with NMIs. Provide a default perf_callchain_buffer() implementation that x86 overrides. - Centralize all the kernel/user regs handling and invoke new arch handlers from there: perf_callchain_user() / perf_callchain_kernel() That avoid all the user_mode(), current->mm checks and so... - Invert some parameters in perf_callchain_*() helpers: entry to the left, regs to the right, following the traditional (dst, src). - 70791ce9ba68a5921c9905ef05d23f62a90bc10c perf: Generalize callchain_store() callchain_store() is the same on every archs, inline it in perf_event.h and rename it to perf_callchain_store() to avoid any collision. This removes repetitive code. - c1a65932fd7216fdc9a0db8bbffe1d47842f862c perf: Drop unappropriate tests on arch callchains Drop the TASK_RUNNING test on user tasks for callchains as this check doesn't seem to make any sense. Also remove the tests for !current that is not supposed to happen and current->pid as this should be handled at the generic level, with exclude_idle attribute. Reported-by: Wu Zhangjin Acked-by: Frederic Weisbecker Acked-by: David Daney Signed-off-by: Deng-Cheng Zhu To: a.p.zijlstra@chello.nl To: will.deacon@arm.com Cc: linux-mips@linux-mips.org Cc: linux-kernel@vger.kernel.org Cc: paulus@samba.org Cc: mingo@elte.hu Cc: acme@redhat.com Cc: dengcheng.zhu@gmail.com Cc: matt@console-pimps.org Cc: sshtylyov@mvista.com Patchwork: http://patchwork.linux-mips.org/patch/2014/ Signed-off-by: Ralf Baechle --- arch/mips/kernel/perf_event.c | 63 +++++-------------------------------------- 1 file changed, 6 insertions(+), 57 deletions(-) (limited to 'arch') diff --git a/arch/mips/kernel/perf_event.c b/arch/mips/kernel/perf_event.c index 3d55761146e5..8f7d2f84d095 100644 --- a/arch/mips/kernel/perf_event.c +++ b/arch/mips/kernel/perf_event.c @@ -534,21 +534,13 @@ handle_associated_event(struct cpu_hw_events *cpuc, #include "perf_event_mipsxx.c" /* Callchain handling code. */ -static inline void -callchain_store(struct perf_callchain_entry *entry, - u64 ip) -{ - if (entry->nr < PERF_MAX_STACK_DEPTH) - entry->ip[entry->nr++] = ip; -} /* * Leave userspace callchain empty for now. When we find a way to trace * the user stack callchains, we add here. */ -static void -perf_callchain_user(struct pt_regs *regs, - struct perf_callchain_entry *entry) +void perf_callchain_user(struct perf_callchain_entry *entry, + struct pt_regs *regs) { } @@ -561,23 +553,21 @@ static void save_raw_perf_callchain(struct perf_callchain_entry *entry, while (!kstack_end(sp)) { addr = *sp++; if (__kernel_text_address(addr)) { - callchain_store(entry, addr); + perf_callchain_store(entry, addr); if (entry->nr >= PERF_MAX_STACK_DEPTH) break; } } } -static void -perf_callchain_kernel(struct pt_regs *regs, - struct perf_callchain_entry *entry) +void perf_callchain_kernel(struct perf_callchain_entry *entry, + struct pt_regs *regs) { unsigned long sp = regs->regs[29]; #ifdef CONFIG_KALLSYMS unsigned long ra = regs->regs[31]; unsigned long pc = regs->cp0_epc; - callchain_store(entry, PERF_CONTEXT_KERNEL); if (raw_show_trace || !__kernel_text_address(pc)) { unsigned long stack_page = (unsigned long)task_stack_page(current); @@ -587,53 +577,12 @@ perf_callchain_kernel(struct pt_regs *regs, return; } do { - callchain_store(entry, pc); + perf_callchain_store(entry, pc); if (entry->nr >= PERF_MAX_STACK_DEPTH) break; pc = unwind_stack(current, &sp, pc, &ra); } while (pc); #else - callchain_store(entry, PERF_CONTEXT_KERNEL); save_raw_perf_callchain(entry, sp); #endif } - -static void -perf_do_callchain(struct pt_regs *regs, - struct perf_callchain_entry *entry) -{ - int is_user; - - if (!regs) - return; - - is_user = user_mode(regs); - - if (!current || !current->pid) - return; - - if (is_user && current->state != TASK_RUNNING) - return; - - if (!is_user) { - perf_callchain_kernel(regs, entry); - if (current->mm) - regs = task_pt_regs(current); - else - regs = NULL; - } - if (regs) - perf_callchain_user(regs, entry); -} - -static DEFINE_PER_CPU(struct perf_callchain_entry, pmc_irq_entry); - -struct perf_callchain_entry * -perf_callchain(struct pt_regs *regs) -{ - struct perf_callchain_entry *entry = &__get_cpu_var(pmc_irq_entry); - - entry->nr = 0; - perf_do_callchain(regs, entry); - return entry; -} -- cgit v1.2.3 From ba9786f32473410bbec256db9745a7fbcaace69f Mon Sep 17 00:00:00 2001 From: Deng-Cheng Zhu Date: Fri, 21 Jan 2011 16:19:21 +0800 Subject: MIPS, Perf-events: Use unsigned delta for right shift in event update Leverage the commit for ARM by Will Deacon: - 446a5a8b1eb91a6990e5c8fe29f14e7a95b69132 ARM: 6205/1: perf: ensure counter delta is treated as unsigned Hardware performance counters on ARM are 32-bits wide but atomic64_t variables are used to represent counter data in the hw_perf_event structure. The armpmu_event_update function right-shifts a signed 64-bit delta variable and adds the result to the event count. This can lead to shifting in sign-bits if the MSB of the 32-bit counter value is set. This results in perf output such as: Performance counter stats for 'sleep 20': 18446744073460670464 cycles <-- 0xFFFFFFFFF12A6000 7783773 instructions # 0.000 IPC 465 context-switches 161 page-faults 1172393 branches 20.154242147 seconds time elapsed This patch ensures that the delta value is treated as unsigned so that the right shift sets the upper bits to zero. Acked-by: Will Deacon Acked-by: David Daney Signed-off-by: Deng-Cheng Zhu To: a.p.zijlstra@chello.nl To: fweisbec@gmail.com To: will.deacon@arm.com Cc: linux-mips@linux-mips.org Cc: linux-kernel@vger.kernel.org Cc: wuzhangjin@gmail.com Cc: paulus@samba.org Cc: mingo@elte.hu Cc: acme@redhat.com Cc: matt@console-pimps.org Cc: sshtylyov@mvista.com Patchwork: http://patchwork.linux-mips.org/patch/2015/ Signed-off-by: Ralf Baechle --- arch/mips/kernel/perf_event.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/mips/kernel/perf_event.c b/arch/mips/kernel/perf_event.c index 8f7d2f84d095..a8244854d3dc 100644 --- a/arch/mips/kernel/perf_event.c +++ b/arch/mips/kernel/perf_event.c @@ -169,7 +169,7 @@ static void mipspmu_event_update(struct perf_event *event, unsigned long flags; int shift = 64 - TOTAL_BITS; s64 prev_raw_count, new_raw_count; - s64 delta; + u64 delta; again: prev_raw_count = local64_read(&hwc->prev_count); -- cgit v1.2.3 From 6667deb69ee3b8a31ea88e1303cf3ad7d4f221da Mon Sep 17 00:00:00 2001 From: Maksim Rayskiy Date: Sat, 12 Feb 2011 10:21:32 -0800 Subject: MIPS: Move idle task creation to work queue To avoid forking usermode thread when creating an idle task, move fork_idle to a work queue. If kernel starts with maxcpus= option which does not bring all available cpus online at boot time, idle tasks for offline cpus are not created. If later offline cpus are hotplugged through sysfs, __cpu_up is called in the context of the user task, and fork_idle copies its non-zero mm pointer. This causes BUG() in per_cpu_trap_init. This also avoids issues with resource limits of the CPU writing to sysfs, containers, maybe others. Signed-off-by: Maksim Rayskiy To: linux-mips@linux-mips.org Patchwork: https://patchwork.linux-mips.org/patch/2070/ Signed-off-by: Ralf Baechle --- arch/mips/kernel/smp.c | 31 +++++++++++++++++++++++++++++-- 1 file changed, 29 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c index 383aeb95cb49..32a256101082 100644 --- a/arch/mips/kernel/smp.c +++ b/arch/mips/kernel/smp.c @@ -193,6 +193,22 @@ void __devinit smp_prepare_boot_cpu(void) */ static struct task_struct *cpu_idle_thread[NR_CPUS]; +struct create_idle { + struct work_struct work; + struct task_struct *idle; + struct completion done; + int cpu; +}; + +static void __cpuinit do_fork_idle(struct work_struct *work) +{ + struct create_idle *c_idle = + container_of(work, struct create_idle, work); + + c_idle->idle = fork_idle(c_idle->cpu); + complete(&c_idle->done); +} + int __cpuinit __cpu_up(unsigned int cpu) { struct task_struct *idle; @@ -203,8 +219,19 @@ int __cpuinit __cpu_up(unsigned int cpu) * Linux can schedule processes on this slave. */ if (!cpu_idle_thread[cpu]) { - idle = fork_idle(cpu); - cpu_idle_thread[cpu] = idle; + /* + * Schedule work item to avoid forking user task + * Ported from arch/x86/kernel/smpboot.c + */ + struct create_idle c_idle = { + .cpu = cpu, + .done = COMPLETION_INITIALIZER_ONSTACK(c_idle.done), + }; + + INIT_WORK_ONSTACK(&c_idle.work, do_fork_idle); + schedule_work(&c_idle.work); + wait_for_completion(&c_idle.done); + idle = cpu_idle_thread[cpu] = c_idle.idle; if (IS_ERR(idle)) panic(KERN_ERR "Fork failed for CPU %d", cpu); -- cgit v1.2.3 From ab5330eb26327675d28df19a8ab257c928c6a3bd Mon Sep 17 00:00:00 2001 From: Maurus Cuelenaere Date: Tue, 1 Mar 2011 00:20:01 +0100 Subject: MIPS: Jz4740: Add HAVE_CLK Jz4740 supports the clock framework but doesn't have HAVE_CLK defined, so define it! Signed-off-by: Maurus Cuelenaere To: linux-mips@linux-mips.org To: linux-kernel@vger.kernel.org Patchwork: https://patchwork.linux-mips.org/patch/2112/ Acked-by: Lars-Peter Clausen Signed-off-by: Ralf Baechle --- arch/mips/Kconfig | 1 + 1 file changed, 1 insertion(+) (limited to 'arch') diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index bd7b64d252dc..d88983516e26 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig @@ -209,6 +209,7 @@ config MACH_JZ4740 select ARCH_REQUIRE_GPIOLIB select SYS_HAS_EARLY_PRINTK select HAVE_PWM + select HAVE_CLK config LASAT bool "LASAT Networks platforms" -- cgit v1.2.3 From bf3a1eb85967dcbaae42f4fcb53c2392cec32677 Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Sun, 27 Feb 2011 19:53:53 +0100 Subject: MIPS: MTX-1: Make au1000_eth probe all PHY addresses When au1000_eth probes the MII bus for PHY address, if we do not set au1000_eth platform data's phy_search_highest_address, the MII probing logic will exit early and will assume a valid PHY is found at address 0. For MTX-1, the PHY is at address 31, and without this patch, the link detection/speed/duplex would not work correctly. CC: stable@kernel.org Signed-off-by: Florian Fainelli To: linux-mips@linux-mips.org Patchwork: https://patchwork.linux-mips.org/patch/2111/ Signed-off-by: Ralf Baechle --- arch/mips/alchemy/mtx-1/platform.c | 9 +++++++++ 1 file changed, 9 insertions(+) (limited to 'arch') diff --git a/arch/mips/alchemy/mtx-1/platform.c b/arch/mips/alchemy/mtx-1/platform.c index e30e42add697..956f946218c5 100644 --- a/arch/mips/alchemy/mtx-1/platform.c +++ b/arch/mips/alchemy/mtx-1/platform.c @@ -28,6 +28,8 @@ #include #include +#include + static struct gpio_keys_button mtx1_gpio_button[] = { { .gpio = 207, @@ -140,10 +142,17 @@ static struct __initdata platform_device * mtx1_devs[] = { &mtx1_mtd, }; +static struct au1000_eth_platform_data mtx1_au1000_eth0_pdata = { + .phy_search_highest_addr = 1, + .phy1_search_mac0 = 1, +}; + static int __init mtx1_register_devices(void) { int rc; + au1xxx_override_eth_cfg(0, &mtx1_au1000_eth0_pdata); + rc = gpio_request(mtx1_gpio_button[0].gpio, mtx1_gpio_button[0].desc); if (rc < 0) { -- cgit v1.2.3 From 9ced975711d605d7deb27ef027e0f9816a51b479 Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Mon, 21 Feb 2011 14:28:02 +0100 Subject: MIPS: Alchemy: Fix reset for MTX-1 and XXS1500 Since commit 32fd6901 (MIPS: Alchemy: get rid of common/reset.c) Alchemy-based boards use their own reset function. For MTX-1 and XXS1500, the reset function pokes at the BCSR.SYSTEM_RESET register, but this does not work. According to Bruno Randolf, this was not tested when written. Previously, the generic au1000_restart() routine called the board specific reset function, which for MTX-1 and XXS1500 did not work, but finally made a jump to the reset vector, which really triggers a system restart. Fix reboot for both targets by jumping to the reset vector. Signed-off-by: Florian Fainelli To: linux-mips@linux-mips.org Patchwork: https://patchwork.linux-mips.org/patch/2093/ Acked-by: Bruno Randolf Signed-off-by: Ralf Baechle --- arch/mips/alchemy/mtx-1/board_setup.c | 4 ++-- arch/mips/alchemy/xxs1500/board_setup.c | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) (limited to 'arch') diff --git a/arch/mips/alchemy/mtx-1/board_setup.c b/arch/mips/alchemy/mtx-1/board_setup.c index 6398fa95905c..40b84b991191 100644 --- a/arch/mips/alchemy/mtx-1/board_setup.c +++ b/arch/mips/alchemy/mtx-1/board_setup.c @@ -54,8 +54,8 @@ int mtx1_pci_idsel(unsigned int devsel, int assert); static void mtx1_reset(char *c) { - /* Hit BCSR.SYSTEM_CONTROL[SW_RST] */ - au_writel(0x00000000, 0xAE00001C); + /* Jump to the reset vector */ + __asm__ __volatile__("jr\t%0"::"r"(0xbfc00000)); } static void mtx1_power_off(void) diff --git a/arch/mips/alchemy/xxs1500/board_setup.c b/arch/mips/alchemy/xxs1500/board_setup.c index b43c918925d3..80c521e5290d 100644 --- a/arch/mips/alchemy/xxs1500/board_setup.c +++ b/arch/mips/alchemy/xxs1500/board_setup.c @@ -36,8 +36,8 @@ static void xxs1500_reset(char *c) { - /* Hit BCSR.SYSTEM_CONTROL[SW_RST] */ - au_writel(0x00000000, 0xAE00001C); + /* Jump to the reset vector */ + __asm__ __volatile__("jr\t%0"::"r"(0xbfc00000)); } static void xxs1500_power_off(void) -- cgit v1.2.3