From 5fcfad3d41cc70f39fb31e7ee314989cc4c5f02c Mon Sep 17 00:00:00 2001 From: Qing Zhang Date: Sat, 10 Dec 2022 22:40:15 +0800 Subject: LoongArch/ftrace: Add dynamic function graph tracer support Once the function_graph tracer is enabled, a filtered function has the following call sequence: 1) ftracer_caller ==> on/off by ftrace_make_call/ftrace_make_nop 2) ftrace_graph_caller 3) ftrace_graph_call ==> on/off by ftrace_en/disable_ftrace_graph_caller 4) prepare_ftrace_return Considering the following DYNAMIC_FTRACE_WITH_REGS feature, it would be more extendable to have a ftrace_graph_caller function, instead of calling prepare_ftrace_return directly in ftrace_caller. Co-developed-by: Jinyang He Signed-off-by: Jinyang He Signed-off-by: Qing Zhang Signed-off-by: Huacai Chen --- arch/loongarch/kernel/ftrace_dyn.c | 44 ++++++++++++++++++++++++++++++++++++++ arch/loongarch/kernel/inst.c | 24 +++++++++++++++++++++ arch/loongarch/kernel/mcount_dyn.S | 33 ++++++++++++++++++++++++++++ 3 files changed, 101 insertions(+) (limited to 'arch/loongarch') diff --git a/arch/loongarch/kernel/ftrace_dyn.c b/arch/loongarch/kernel/ftrace_dyn.c index 3b82bface840..5a801c328e2a 100644 --- a/arch/loongarch/kernel/ftrace_dyn.c +++ b/arch/loongarch/kernel/ftrace_dyn.c @@ -108,3 +108,47 @@ int __init ftrace_dyn_arch_init(void) { return 0; } + +#ifdef CONFIG_FUNCTION_GRAPH_TRACER +void prepare_ftrace_return(unsigned long self_addr, unsigned long *parent) +{ + unsigned long old; + unsigned long return_hooker = (unsigned long)&return_to_handler; + + if (unlikely(atomic_read(¤t->tracing_graph_pause))) + return; + + old = *parent; + + if (!function_graph_enter(old, self_addr, 0, NULL)) + *parent = return_hooker; +} + +static int ftrace_modify_graph_caller(bool enable) +{ + u32 branch, nop; + unsigned long pc, func; + extern void ftrace_graph_call(void); + + pc = (unsigned long)&ftrace_graph_call; + func = (unsigned long)&ftrace_graph_caller; + + nop = larch_insn_gen_nop(); + branch = larch_insn_gen_b(pc, func); + + if (enable) + return ftrace_modify_code(pc, nop, branch, true); + else + return ftrace_modify_code(pc, branch, nop, true); +} + +int ftrace_enable_ftrace_graph_caller(void) +{ + return ftrace_modify_graph_caller(true); +} + +int ftrace_disable_ftrace_graph_caller(void) +{ + return ftrace_modify_graph_caller(false); +} +#endif /* CONFIG_FUNCTION_GRAPH_TRACER */ diff --git a/arch/loongarch/kernel/inst.c b/arch/loongarch/kernel/inst.c index 4fd22b4413d0..39671e87e31c 100644 --- a/arch/loongarch/kernel/inst.c +++ b/arch/loongarch/kernel/inst.c @@ -55,6 +55,30 @@ u32 larch_insn_gen_nop(void) return INSN_NOP; } +u32 larch_insn_gen_b(unsigned long pc, unsigned long dest) +{ + long offset = dest - pc; + unsigned int immediate_l, immediate_h; + union loongarch_instruction insn; + + if ((offset & 3) || offset < -SZ_128M || offset >= SZ_128M) { + pr_warn("The generated b instruction is out of range.\n"); + return INSN_BREAK; + } + + offset >>= 2; + + immediate_l = offset & 0xffff; + offset >>= 16; + immediate_h = offset & 0x3ff; + + insn.reg0i26_format.opcode = b_op; + insn.reg0i26_format.immediate_l = immediate_l; + insn.reg0i26_format.immediate_h = immediate_h; + + return insn.word; +} + u32 larch_insn_gen_bl(unsigned long pc, unsigned long dest) { long offset = dest - pc; diff --git a/arch/loongarch/kernel/mcount_dyn.S b/arch/loongarch/kernel/mcount_dyn.S index 45ba88d2aacc..cce3daa2eb1e 100644 --- a/arch/loongarch/kernel/mcount_dyn.S +++ b/arch/loongarch/kernel/mcount_dyn.S @@ -57,6 +57,11 @@ SYM_CODE_START(ftrace_common) SYM_INNER_LABEL(ftrace_call, SYM_L_GLOBAL) bl ftrace_stub +#ifdef CONFIG_FUNCTION_GRAPH_TRACER +SYM_INNER_LABEL(ftrace_graph_call, SYM_L_GLOBAL) + nop /* b ftrace_graph_caller */ +#endif + /* * As we didn't use S series regs in this assmembly code and all calls * are C function which will save S series regs by themselves, there is @@ -83,3 +88,31 @@ SYM_CODE_START(ftrace_caller) ftrace_regs_entry b ftrace_common SYM_CODE_END(ftrace_caller) + +#ifdef CONFIG_FUNCTION_GRAPH_TRACER +SYM_CODE_START(ftrace_graph_caller) + PTR_L a0, sp, PT_ERA + PTR_ADDI a0, a0, -8 /* arg0: self_addr */ + PTR_ADDI a1, sp, PT_R1 /* arg1: parent */ + bl prepare_ftrace_return + b ftrace_common_return +SYM_CODE_END(ftrace_graph_caller) + +SYM_CODE_START(return_to_handler) + /* Save return value regs */ + PTR_ADDI sp, sp, -2 * SZREG + PTR_S a0, sp, 0 + PTR_S a1, sp, SZREG + + move a0, zero + bl ftrace_return_to_handler + move ra, a0 + + /* Restore return value regs */ + PTR_L a0, sp, 0 + PTR_L a1, sp, SZREG + PTR_ADDI sp, sp, 2 * SZREG + + jr ra +SYM_CODE_END(return_to_handler) +#endif -- cgit v1.2.3