summaryrefslogtreecommitdiff
path: root/arch/arm64/net/bpf_jit_comp.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm64/net/bpf_jit_comp.c')
-rw-r--r--arch/arm64/net/bpf_jit_comp.c286
1 files changed, 234 insertions, 52 deletions
diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c
index ceed77c29fe6..13eac43c632d 100644
--- a/arch/arm64/net/bpf_jit_comp.c
+++ b/arch/arm64/net/bpf_jit_comp.c
@@ -76,6 +76,7 @@ struct jit_ctx {
int *offset;
int exentry_idx;
__le32 *image;
+ __le32 *ro_image;
u32 stack_size;
int fpb_offset;
};
@@ -205,6 +206,14 @@ static void jit_fill_hole(void *area, unsigned int size)
*ptr++ = cpu_to_le32(AARCH64_BREAK_FAULT);
}
+int bpf_arch_text_invalidate(void *dst, size_t len)
+{
+ if (!aarch64_insn_set(dst, AARCH64_BREAK_FAULT, len))
+ return -EINVAL;
+
+ return 0;
+}
+
static inline int epilogue_offset(const struct jit_ctx *ctx)
{
int to = ctx->epilogue_offset;
@@ -285,7 +294,8 @@ static bool is_lsi_offset(int offset, int scale)
/* Tail call offset to jump into */
#define PROLOGUE_OFFSET (BTI_INSNS + 2 + PAC_INSNS + 8)
-static int build_prologue(struct jit_ctx *ctx, bool ebpf_from_cbpf)
+static int build_prologue(struct jit_ctx *ctx, bool ebpf_from_cbpf,
+ bool is_exception_cb)
{
const struct bpf_prog *prog = ctx->prog;
const bool is_main_prog = !bpf_is_subprog(prog);
@@ -333,19 +343,34 @@ static int build_prologue(struct jit_ctx *ctx, bool ebpf_from_cbpf)
emit(A64_MOV(1, A64_R(9), A64_LR), ctx);
emit(A64_NOP, ctx);
- /* Sign lr */
- if (IS_ENABLED(CONFIG_ARM64_PTR_AUTH_KERNEL))
- emit(A64_PACIASP, ctx);
-
- /* Save FP and LR registers to stay align with ARM64 AAPCS */
- emit(A64_PUSH(A64_FP, A64_LR, A64_SP), ctx);
- emit(A64_MOV(1, A64_FP, A64_SP), ctx);
-
- /* Save callee-saved registers */
- emit(A64_PUSH(r6, r7, A64_SP), ctx);
- emit(A64_PUSH(r8, r9, A64_SP), ctx);
- emit(A64_PUSH(fp, tcc, A64_SP), ctx);
- emit(A64_PUSH(fpb, A64_R(28), A64_SP), ctx);
+ if (!is_exception_cb) {
+ /* Sign lr */
+ if (IS_ENABLED(CONFIG_ARM64_PTR_AUTH_KERNEL))
+ emit(A64_PACIASP, ctx);
+ /* Save FP and LR registers to stay align with ARM64 AAPCS */
+ emit(A64_PUSH(A64_FP, A64_LR, A64_SP), ctx);
+ emit(A64_MOV(1, A64_FP, A64_SP), ctx);
+
+ /* Save callee-saved registers */
+ emit(A64_PUSH(r6, r7, A64_SP), ctx);
+ emit(A64_PUSH(r8, r9, A64_SP), ctx);
+ emit(A64_PUSH(fp, tcc, A64_SP), ctx);
+ emit(A64_PUSH(fpb, A64_R(28), A64_SP), ctx);
+ } else {
+ /*
+ * Exception callback receives FP of Main Program as third
+ * parameter
+ */
+ emit(A64_MOV(1, A64_FP, A64_R(2)), ctx);
+ /*
+ * Main Program already pushed the frame record and the
+ * callee-saved registers. The exception callback will not push
+ * anything and re-use the main program's stack.
+ *
+ * 10 registers are on the stack
+ */
+ emit(A64_SUB_I(1, A64_SP, A64_FP, 80), ctx);
+ }
/* Set up BPF prog stack base register */
emit(A64_MOV(1, fp, A64_SP), ctx);
@@ -365,6 +390,20 @@ static int build_prologue(struct jit_ctx *ctx, bool ebpf_from_cbpf)
emit_bti(A64_BTI_J, ctx);
}
+ /*
+ * Program acting as exception boundary should save all ARM64
+ * Callee-saved registers as the exception callback needs to recover
+ * all ARM64 Callee-saved registers in its epilogue.
+ */
+ if (prog->aux->exception_boundary) {
+ /*
+ * As we are pushing two more registers, BPF_FP should be moved
+ * 16 bytes
+ */
+ emit(A64_SUB_I(1, fp, fp, 16), ctx);
+ emit(A64_PUSH(A64_R(23), A64_R(24), A64_SP), ctx);
+ }
+
emit(A64_SUB_I(1, fpb, fp, ctx->fpb_offset), ctx);
/* Stack must be multiples of 16B */
@@ -653,7 +692,7 @@ static void build_plt(struct jit_ctx *ctx)
plt->target = (u64)&dummy_tramp;
}
-static void build_epilogue(struct jit_ctx *ctx)
+static void build_epilogue(struct jit_ctx *ctx, bool is_exception_cb)
{
const u8 r0 = bpf2a64[BPF_REG_0];
const u8 r6 = bpf2a64[BPF_REG_6];
@@ -666,6 +705,15 @@ static void build_epilogue(struct jit_ctx *ctx)
/* We're done with BPF stack */
emit(A64_ADD_I(1, A64_SP, A64_SP, ctx->stack_size), ctx);
+ /*
+ * Program acting as exception boundary pushes R23 and R24 in addition
+ * to BPF callee-saved registers. Exception callback uses the boundary
+ * program's stack frame, so recover these extra registers in the above
+ * two cases.
+ */
+ if (ctx->prog->aux->exception_boundary || is_exception_cb)
+ emit(A64_POP(A64_R(23), A64_R(24), A64_SP), ctx);
+
/* Restore x27 and x28 */
emit(A64_POP(fpb, A64_R(28), A64_SP), ctx);
/* Restore fs (x25) and x26 */
@@ -707,7 +755,8 @@ static int add_exception_handler(const struct bpf_insn *insn,
struct jit_ctx *ctx,
int dst_reg)
{
- off_t offset;
+ off_t ins_offset;
+ off_t fixup_offset;
unsigned long pc;
struct exception_table_entry *ex;
@@ -724,12 +773,17 @@ static int add_exception_handler(const struct bpf_insn *insn,
return -EINVAL;
ex = &ctx->prog->aux->extable[ctx->exentry_idx];
- pc = (unsigned long)&ctx->image[ctx->idx - 1];
+ pc = (unsigned long)&ctx->ro_image[ctx->idx - 1];
- offset = pc - (long)&ex->insn;
- if (WARN_ON_ONCE(offset >= 0 || offset < INT_MIN))
+ /*
+ * This is the relative offset of the instruction that may fault from
+ * the exception table itself. This will be written to the exception
+ * table and if this instruction faults, the destination register will
+ * be set to '0' and the execution will jump to the next instruction.
+ */
+ ins_offset = pc - (long)&ex->insn;
+ if (WARN_ON_ONCE(ins_offset >= 0 || ins_offset < INT_MIN))
return -ERANGE;
- ex->insn = offset;
/*
* Since the extable follows the program, the fixup offset is always
@@ -738,12 +792,25 @@ static int add_exception_handler(const struct bpf_insn *insn,
* bits. We don't need to worry about buildtime or runtime sort
* modifying the upper bits because the table is already sorted, and
* isn't part of the main exception table.
+ *
+ * The fixup_offset is set to the next instruction from the instruction
+ * that may fault. The execution will jump to this after handling the
+ * fault.
*/
- offset = (long)&ex->fixup - (pc + AARCH64_INSN_SIZE);
- if (!FIELD_FIT(BPF_FIXUP_OFFSET_MASK, offset))
+ fixup_offset = (long)&ex->fixup - (pc + AARCH64_INSN_SIZE);
+ if (!FIELD_FIT(BPF_FIXUP_OFFSET_MASK, fixup_offset))
return -ERANGE;
- ex->fixup = FIELD_PREP(BPF_FIXUP_OFFSET_MASK, offset) |
+ /*
+ * The offsets above have been calculated using the RO buffer but we
+ * need to use the R/W buffer for writes.
+ * switch ex to rw buffer for writing.
+ */
+ ex = (void *)ctx->image + ((void *)ex - (void *)ctx->ro_image);
+
+ ex->insn = ins_offset;
+
+ ex->fixup = FIELD_PREP(BPF_FIXUP_OFFSET_MASK, fixup_offset) |
FIELD_PREP(BPF_FIXUP_REG_MASK, dst_reg);
ex->type = EX_TYPE_BPF;
@@ -1511,7 +1578,8 @@ static inline void bpf_flush_icache(void *start, void *end)
struct arm64_jit_data {
struct bpf_binary_header *header;
- u8 *image;
+ u8 *ro_image;
+ struct bpf_binary_header *ro_header;
struct jit_ctx ctx;
};
@@ -1520,12 +1588,14 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
int image_size, prog_size, extable_size, extable_align, extable_offset;
struct bpf_prog *tmp, *orig_prog = prog;
struct bpf_binary_header *header;
+ struct bpf_binary_header *ro_header;
struct arm64_jit_data *jit_data;
bool was_classic = bpf_prog_was_classic(prog);
bool tmp_blinded = false;
bool extra_pass = false;
struct jit_ctx ctx;
u8 *image_ptr;
+ u8 *ro_image_ptr;
if (!prog->jit_requested)
return orig_prog;
@@ -1552,8 +1622,11 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
}
if (jit_data->ctx.offset) {
ctx = jit_data->ctx;
- image_ptr = jit_data->image;
+ ro_image_ptr = jit_data->ro_image;
+ ro_header = jit_data->ro_header;
header = jit_data->header;
+ image_ptr = (void *)header + ((void *)ro_image_ptr
+ - (void *)ro_header);
extra_pass = true;
prog_size = sizeof(u32) * ctx.idx;
goto skip_init_ctx;
@@ -1575,7 +1648,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
* BPF line info needs ctx->offset[i] to be the offset of
* instruction[i] in jited image, so build prologue first.
*/
- if (build_prologue(&ctx, was_classic)) {
+ if (build_prologue(&ctx, was_classic, prog->aux->exception_cb)) {
prog = orig_prog;
goto out_off;
}
@@ -1586,7 +1659,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
}
ctx.epilogue_offset = ctx.idx;
- build_epilogue(&ctx);
+ build_epilogue(&ctx, prog->aux->exception_cb);
build_plt(&ctx);
extable_align = __alignof__(struct exception_table_entry);
@@ -1598,63 +1671,81 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
/* also allocate space for plt target */
extable_offset = round_up(prog_size + PLT_TARGET_SIZE, extable_align);
image_size = extable_offset + extable_size;
- header = bpf_jit_binary_alloc(image_size, &image_ptr,
- sizeof(u32), jit_fill_hole);
- if (header == NULL) {
+ ro_header = bpf_jit_binary_pack_alloc(image_size, &ro_image_ptr,
+ sizeof(u32), &header, &image_ptr,
+ jit_fill_hole);
+ if (!ro_header) {
prog = orig_prog;
goto out_off;
}
/* 2. Now, the actual pass. */
+ /*
+ * Use the image(RW) for writing the JITed instructions. But also save
+ * the ro_image(RX) for calculating the offsets in the image. The RW
+ * image will be later copied to the RX image from where the program
+ * will run. The bpf_jit_binary_pack_finalize() will do this copy in the
+ * final step.
+ */
ctx.image = (__le32 *)image_ptr;
+ ctx.ro_image = (__le32 *)ro_image_ptr;
if (extable_size)
- prog->aux->extable = (void *)image_ptr + extable_offset;
+ prog->aux->extable = (void *)ro_image_ptr + extable_offset;
skip_init_ctx:
ctx.idx = 0;
ctx.exentry_idx = 0;
- build_prologue(&ctx, was_classic);
+ build_prologue(&ctx, was_classic, prog->aux->exception_cb);
if (build_body(&ctx, extra_pass)) {
- bpf_jit_binary_free(header);
prog = orig_prog;
- goto out_off;
+ goto out_free_hdr;
}
- build_epilogue(&ctx);
+ build_epilogue(&ctx, prog->aux->exception_cb);
build_plt(&ctx);
/* 3. Extra pass to validate JITed code. */
if (validate_ctx(&ctx)) {
- bpf_jit_binary_free(header);
prog = orig_prog;
- goto out_off;
+ goto out_free_hdr;
}
/* And we're done. */
if (bpf_jit_enable > 1)
bpf_jit_dump(prog->len, prog_size, 2, ctx.image);
- bpf_flush_icache(header, ctx.image + ctx.idx);
-
if (!prog->is_func || extra_pass) {
if (extra_pass && ctx.idx != jit_data->ctx.idx) {
pr_err_once("multi-func JIT bug %d != %d\n",
ctx.idx, jit_data->ctx.idx);
- bpf_jit_binary_free(header);
prog->bpf_func = NULL;
prog->jited = 0;
prog->jited_len = 0;
+ goto out_free_hdr;
+ }
+ if (WARN_ON(bpf_jit_binary_pack_finalize(prog, ro_header,
+ header))) {
+ /* ro_header has been freed */
+ ro_header = NULL;
+ prog = orig_prog;
goto out_off;
}
- bpf_jit_binary_lock_ro(header);
+ /*
+ * The instructions have now been copied to the ROX region from
+ * where they will execute. Now the data cache has to be cleaned to
+ * the PoU and the I-cache has to be invalidated for the VAs.
+ */
+ bpf_flush_icache(ro_header, ctx.ro_image + ctx.idx);
} else {
jit_data->ctx = ctx;
- jit_data->image = image_ptr;
+ jit_data->ro_image = ro_image_ptr;
jit_data->header = header;
+ jit_data->ro_header = ro_header;
}
- prog->bpf_func = (void *)ctx.image;
+
+ prog->bpf_func = (void *)ctx.ro_image;
prog->jited = 1;
prog->jited_len = prog_size;
@@ -1675,6 +1766,14 @@ out:
bpf_jit_prog_release_other(prog, prog == orig_prog ?
tmp : orig_prog);
return prog;
+
+out_free_hdr:
+ if (header) {
+ bpf_arch_text_copy(&ro_header->size, &header->size,
+ sizeof(header->size));
+ bpf_jit_binary_pack_free(ro_header, header);
+ }
+ goto out_off;
}
bool bpf_jit_supports_kfunc_call(void)
@@ -1682,6 +1781,13 @@ bool bpf_jit_supports_kfunc_call(void)
return true;
}
+void *bpf_arch_text_copy(void *dst, void *src, size_t len)
+{
+ if (!aarch64_insn_copy(dst, src, len))
+ return ERR_PTR(-EINVAL);
+ return dst;
+}
+
u64 bpf_jit_alloc_exec_limit(void)
{
return VMALLOC_END - VMALLOC_START;
@@ -1970,7 +2076,7 @@ static int prepare_trampoline(struct jit_ctx *ctx, struct bpf_tramp_image *im,
/* store return value */
emit(A64_STR64I(A64_R(0), A64_SP, retval_off), ctx);
/* reserve a nop for bpf_tramp_image_put */
- im->ip_after_call = ctx->image + ctx->idx;
+ im->ip_after_call = ctx->ro_image + ctx->idx;
emit(A64_NOP, ctx);
}
@@ -1985,7 +2091,7 @@ static int prepare_trampoline(struct jit_ctx *ctx, struct bpf_tramp_image *im,
run_ctx_off, false);
if (flags & BPF_TRAMP_F_CALL_ORIG) {
- im->ip_epilogue = ctx->image + ctx->idx;
+ im->ip_epilogue = ctx->ro_image + ctx->idx;
emit_addr_mov_i64(A64_R(0), (const u64)im, ctx);
emit_call((const u64)__bpf_tramp_exit, ctx);
}
@@ -2018,9 +2124,6 @@ static int prepare_trampoline(struct jit_ctx *ctx, struct bpf_tramp_image *im,
emit(A64_RET(A64_R(10)), ctx);
}
- if (ctx->image)
- bpf_flush_icache(ctx->image, ctx->image + ctx->idx);
-
kfree(branches);
return ctx->idx;
@@ -2063,14 +2166,43 @@ int arch_bpf_trampoline_size(const struct btf_func_model *m, u32 flags,
return ret < 0 ? ret : ret * AARCH64_INSN_SIZE;
}
-int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image,
- void *image_end, const struct btf_func_model *m,
+void *arch_alloc_bpf_trampoline(unsigned int size)
+{
+ return bpf_prog_pack_alloc(size, jit_fill_hole);
+}
+
+void arch_free_bpf_trampoline(void *image, unsigned int size)
+{
+ bpf_prog_pack_free(image, size);
+}
+
+void arch_protect_bpf_trampoline(void *image, unsigned int size)
+{
+}
+
+void arch_unprotect_bpf_trampoline(void *image, unsigned int size)
+{
+}
+
+int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *ro_image,
+ void *ro_image_end, const struct btf_func_model *m,
u32 flags, struct bpf_tramp_links *tlinks,
void *func_addr)
{
int ret, nregs;
+ void *image, *tmp;
+ u32 size = ro_image_end - ro_image;
+
+ /* image doesn't need to be in module memory range, so we can
+ * use kvmalloc.
+ */
+ image = kvmalloc(size, GFP_KERNEL);
+ if (!image)
+ return -ENOMEM;
+
struct jit_ctx ctx = {
.image = image,
+ .ro_image = ro_image,
.idx = 0,
};
@@ -2079,15 +2211,26 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image,
if (nregs > 8)
return -ENOTSUPP;
- jit_fill_hole(image, (unsigned int)(image_end - image));
+ jit_fill_hole(image, (unsigned int)(ro_image_end - ro_image));
ret = prepare_trampoline(&ctx, im, tlinks, func_addr, nregs, flags);
- if (ret > 0 && validate_code(&ctx) < 0)
+ if (ret > 0 && validate_code(&ctx) < 0) {
ret = -EINVAL;
+ goto out;
+ }
if (ret > 0)
ret *= AARCH64_INSN_SIZE;
+ tmp = bpf_arch_text_copy(ro_image, image, size);
+ if (IS_ERR(tmp)) {
+ ret = PTR_ERR(tmp);
+ goto out;
+ }
+
+ bpf_flush_icache(ro_image, ro_image + size);
+out:
+ kvfree(image);
return ret;
}
@@ -2305,3 +2448,42 @@ out:
return ret;
}
+
+bool bpf_jit_supports_ptr_xchg(void)
+{
+ return true;
+}
+
+bool bpf_jit_supports_exceptions(void)
+{
+ /* We unwind through both kernel frames starting from within bpf_throw
+ * call and BPF frames. Therefore we require FP unwinder to be enabled
+ * to walk kernel frames and reach BPF frames in the stack trace.
+ * ARM64 kernel is aways compiled with CONFIG_FRAME_POINTER=y
+ */
+ return true;
+}
+
+void bpf_jit_free(struct bpf_prog *prog)
+{
+ if (prog->jited) {
+ struct arm64_jit_data *jit_data = prog->aux->jit_data;
+ struct bpf_binary_header *hdr;
+
+ /*
+ * If we fail the final pass of JIT (from jit_subprogs),
+ * the program may not be finalized yet. Call finalize here
+ * before freeing it.
+ */
+ if (jit_data) {
+ bpf_arch_text_copy(&jit_data->ro_header->size, &jit_data->header->size,
+ sizeof(jit_data->header->size));
+ kfree(jit_data);
+ }
+ hdr = bpf_jit_binary_pack_hdr(prog);
+ bpf_jit_binary_pack_free(hdr, NULL);
+ WARN_ON_ONCE(!bpf_prog_kallsyms_verify_off(prog));
+ }
+
+ bpf_prog_unlock_free(prog);
+}