summaryrefslogtreecommitdiff
path: root/arch/x86
diff options
context:
space:
mode:
authorLeon Hwang <hffilwlqm@gmail.com>2023-09-12 18:04:41 +0300
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2023-11-20 13:58:55 +0300
commit8f873cc3f67f2257493063e57d0caf07ef889ee9 (patch)
tree9ee7a19cea035ecfd15e881d8e27c3e601bf4178 /arch/x86
parentf6071cf7f3ee27e6be6c05edf6a95c7151897807 (diff)
downloadlinux-8f873cc3f67f2257493063e57d0caf07ef889ee9.tar.xz
bpf, x64: Fix tailcall infinite loop
[ Upstream commit 2b5dcb31a19a2e0acd869b12c9db9b2d696ef544 ] From commit ebf7d1f508a73871 ("bpf, x64: rework pro/epilogue and tailcall handling in JIT"), the tailcall on x64 works better than before. From commit e411901c0b775a3a ("bpf: allow for tailcalls in BPF subprograms for x64 JIT"), tailcall is able to run in BPF subprograms on x64. From commit 5b92a28aae4dd0f8 ("bpf: Support attaching tracing BPF program to other BPF programs"), BPF program is able to trace other BPF programs. How about combining them all together? 1. FENTRY/FEXIT on a BPF subprogram. 2. A tailcall runs in the BPF subprogram. 3. The tailcall calls the subprogram's caller. As a result, a tailcall infinite loop comes up. And the loop would halt the machine. As we know, in tail call context, the tail_call_cnt propagates by stack and rax register between BPF subprograms. So do in trampolines. Fixes: ebf7d1f508a7 ("bpf, x64: rework pro/epilogue and tailcall handling in JIT") Fixes: e411901c0b77 ("bpf: allow for tailcalls in BPF subprograms for x64 JIT") Reviewed-by: Maciej Fijalkowski <maciej.fijalkowski@intel.com> Signed-off-by: Leon Hwang <hffilwlqm@gmail.com> Link: https://lore.kernel.org/r/20230912150442.2009-3-hffilwlqm@gmail.com Signed-off-by: Alexei Starovoitov <ast@kernel.org> Signed-off-by: Sasha Levin <sashal@kernel.org>
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/net/bpf_jit_comp.c28
1 files changed, 22 insertions, 6 deletions
diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
index a5930042139d..52f36c48c1b9 100644
--- a/arch/x86/net/bpf_jit_comp.c
+++ b/arch/x86/net/bpf_jit_comp.c
@@ -1018,6 +1018,10 @@ static void emit_shiftx(u8 **pprog, u32 dst_reg, u8 src_reg, bool is64, u8 op)
#define INSN_SZ_DIFF (((addrs[i] - addrs[i - 1]) - (prog - temp)))
+/* mov rax, qword ptr [rbp - rounded_stack_depth - 8] */
+#define RESTORE_TAIL_CALL_CNT(stack) \
+ EMIT3_off32(0x48, 0x8B, 0x85, -round_up(stack, 8) - 8)
+
static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, u8 *rw_image,
int oldproglen, struct jit_context *ctx, bool jmp_padding)
{
@@ -1623,9 +1627,7 @@ st: if (is_imm8(insn->off))
func = (u8 *) __bpf_call_base + imm32;
if (tail_call_reachable) {
- /* mov rax, qword ptr [rbp - rounded_stack_depth - 8] */
- EMIT3_off32(0x48, 0x8B, 0x85,
- -round_up(bpf_prog->aux->stack_depth, 8) - 8);
+ RESTORE_TAIL_CALL_CNT(bpf_prog->aux->stack_depth);
if (!imm32)
return -EINVAL;
offs = 7 + x86_call_depth_emit_accounting(&prog, func);
@@ -2400,6 +2402,7 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i
* [ ... ]
* [ stack_arg2 ]
* RBP - arg_stack_off [ stack_arg1 ]
+ * RSP [ tail_call_cnt ] BPF_TRAMP_F_TAIL_CALL_CTX
*/
/* room for return value of orig_call or fentry prog */
@@ -2464,6 +2467,8 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i
else
/* sub rsp, stack_size */
EMIT4(0x48, 0x83, 0xEC, stack_size);
+ if (flags & BPF_TRAMP_F_TAIL_CALL_CTX)
+ EMIT1(0x50); /* push rax */
/* mov QWORD PTR [rbp - rbx_off], rbx */
emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_6, -rbx_off);
@@ -2516,9 +2521,15 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i
restore_regs(m, &prog, regs_off);
save_args(m, &prog, arg_stack_off, true);
+ if (flags & BPF_TRAMP_F_TAIL_CALL_CTX)
+ /* Before calling the original function, restore the
+ * tail_call_cnt from stack to rax.
+ */
+ RESTORE_TAIL_CALL_CNT(stack_size);
+
if (flags & BPF_TRAMP_F_ORIG_STACK) {
- emit_ldx(&prog, BPF_DW, BPF_REG_0, BPF_REG_FP, 8);
- EMIT2(0xff, 0xd0); /* call *rax */
+ emit_ldx(&prog, BPF_DW, BPF_REG_6, BPF_REG_FP, 8);
+ EMIT2(0xff, 0xd3); /* call *rbx */
} else {
/* call original function */
if (emit_rsb_call(&prog, orig_call, prog)) {
@@ -2569,7 +2580,12 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i
ret = -EINVAL;
goto cleanup;
}
- }
+ } else if (flags & BPF_TRAMP_F_TAIL_CALL_CTX)
+ /* Before running the original function, restore the
+ * tail_call_cnt from stack to rax.
+ */
+ RESTORE_TAIL_CALL_CNT(stack_size);
+
/* restore return value of orig_call or fentry prog back into RAX */
if (save_ret)
emit_ldx(&prog, BPF_DW, BPF_REG_0, BPF_REG_FP, -8);