summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDaniel Borkmann <daniel@iogearbox.net>2018-11-26 16:05:39 +0300
committerAlexei Starovoitov <ast@kernel.org>2018-11-27 04:34:24 +0300
commit8c11ea5ce13da0252fc92f91e90b0cb0c8fe5619 (patch)
treeeaa5f5849390692a575aa95280313f41b5475ebe
parente2c95a61656d29ceaac97b6a975c8a1f26e26f15 (diff)
downloadlinux-8c11ea5ce13da0252fc92f91e90b0cb0c8fe5619.tar.xz
bpf, arm64: fix getting subprog addr from aux for calls
The arm64 JIT has the same issue as ppc64 JIT in that the relative BPF to BPF call offset can be too far away from core kernel in that relative encoding into imm is not sufficient and could potentially be truncated, see also fd045f6cd98e ("arm64: add support for module PLTs") which adds spill-over space for module_alloc() and therefore bpf_jit_binary_alloc(). Therefore, use the recently added bpf_jit_get_func_addr() helper for properly fetching the address through prog->aux->func[off]->bpf_func instead. This also has the benefit to optimize normal helper calls since their address can use the optimized emission. Tested on Cavium ThunderX CN8890. Fixes: db496944fdaa ("bpf: arm64: add JIT support for multi-function programs") Signed-off-by: Daniel Borkmann <daniel@iogearbox.net> Signed-off-by: Alexei Starovoitov <ast@kernel.org>
-rw-r--r--arch/arm64/net/bpf_jit_comp.c26
1 files changed, 17 insertions, 9 deletions
diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c
index a6fdaea07c63..89198017e8e6 100644
--- a/arch/arm64/net/bpf_jit_comp.c
+++ b/arch/arm64/net/bpf_jit_comp.c
@@ -351,7 +351,8 @@ static void build_epilogue(struct jit_ctx *ctx)
* >0 - successfully JITed a 16-byte eBPF instruction.
* <0 - failed to JIT.
*/
-static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx)
+static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
+ bool extra_pass)
{
const u8 code = insn->code;
const u8 dst = bpf2a64[insn->dst_reg];
@@ -625,12 +626,19 @@ emit_cond_jmp:
case BPF_JMP | BPF_CALL:
{
const u8 r0 = bpf2a64[BPF_REG_0];
- const u64 func = (u64)__bpf_call_base + imm;
+ bool func_addr_fixed;
+ u64 func_addr;
+ int ret;
- if (ctx->prog->is_func)
- emit_addr_mov_i64(tmp, func, ctx);
+ ret = bpf_jit_get_func_addr(ctx->prog, insn, extra_pass,
+ &func_addr, &func_addr_fixed);
+ if (ret < 0)
+ return ret;
+ if (func_addr_fixed)
+ /* We can use optimized emission here. */
+ emit_a64_mov_i64(tmp, func_addr, ctx);
else
- emit_a64_mov_i64(tmp, func, ctx);
+ emit_addr_mov_i64(tmp, func_addr, ctx);
emit(A64_BLR(tmp), ctx);
emit(A64_MOV(1, r0, A64_R(0)), ctx);
break;
@@ -753,7 +761,7 @@ emit_cond_jmp:
return 0;
}
-static int build_body(struct jit_ctx *ctx)
+static int build_body(struct jit_ctx *ctx, bool extra_pass)
{
const struct bpf_prog *prog = ctx->prog;
int i;
@@ -762,7 +770,7 @@ static int build_body(struct jit_ctx *ctx)
const struct bpf_insn *insn = &prog->insnsi[i];
int ret;
- ret = build_insn(insn, ctx);
+ ret = build_insn(insn, ctx, extra_pass);
if (ret > 0) {
i++;
if (ctx->image == NULL)
@@ -858,7 +866,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
/* 1. Initial fake pass to compute ctx->idx. */
/* Fake pass to fill in ctx->offset. */
- if (build_body(&ctx)) {
+ if (build_body(&ctx, extra_pass)) {
prog = orig_prog;
goto out_off;
}
@@ -888,7 +896,7 @@ skip_init_ctx:
build_prologue(&ctx, was_classic);
- if (build_body(&ctx)) {
+ if (build_body(&ctx, extra_pass)) {
bpf_jit_binary_free(header);
prog = orig_prog;
goto out_off;