summaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2020-07-22 22:34:55 +0300
committerDavid S. Miller <davem@davemloft.net>2020-07-22 22:35:33 +0300
commitdee72f8a0c2d4ddb566151b2982d553461339315 (patch)
treeb52b7479eb55397044a4e93d8cc92e8fdccd8d83 /arch
parentfa56a987449bcf4c1cb68369a187af3515b85c78 (diff)
parent9165e1d70fb34ce438e78aad90408cfa86e4c2d0 (diff)
downloadlinux-dee72f8a0c2d4ddb566151b2982d553461339315.tar.xz
Merge git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next
Alexei Starovoitov says: ==================== pull-request: bpf-next 2020-07-21 The following pull-request contains BPF updates for your *net-next* tree. We've added 46 non-merge commits during the last 6 day(s) which contain a total of 68 files changed, 4929 insertions(+), 526 deletions(-). The main changes are: 1) Run BPF program on socket lookup, from Jakub. 2) Introduce cpumap, from Lorenzo. 3) s390 JIT fixes, from Ilya. 4) teach riscv JIT to emit compressed insns, from Luke. 5) use build time computed BTF ids in bpf iter, from Yonghong. ==================== Purely independent overlapping changes in both filter.h and xdp.h Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch')
-rw-r--r--arch/riscv/net/bpf_jit.h483
-rw-r--r--arch/riscv/net/bpf_jit_comp32.c14
-rw-r--r--arch/riscv/net/bpf_jit_comp64.c293
-rw-r--r--arch/riscv/net/bpf_jit_core.c6
-rw-r--r--arch/s390/net/bpf_jit_comp.c63
5 files changed, 684 insertions, 175 deletions
diff --git a/arch/riscv/net/bpf_jit.h b/arch/riscv/net/bpf_jit.h
index 20e235d06f66..75c1e9996867 100644
--- a/arch/riscv/net/bpf_jit.h
+++ b/arch/riscv/net/bpf_jit.h
@@ -13,6 +13,11 @@
#include <linux/filter.h>
#include <asm/cacheflush.h>
+static inline bool rvc_enabled(void)
+{
+ return IS_ENABLED(CONFIG_RISCV_ISA_C);
+}
+
enum {
RV_REG_ZERO = 0, /* The constant value 0 */
RV_REG_RA = 1, /* Return address */
@@ -48,9 +53,21 @@ enum {
RV_REG_T6 = 31,
};
+static inline bool is_creg(u8 reg)
+{
+ return (1 << reg) & (BIT(RV_REG_FP) |
+ BIT(RV_REG_S1) |
+ BIT(RV_REG_A0) |
+ BIT(RV_REG_A1) |
+ BIT(RV_REG_A2) |
+ BIT(RV_REG_A3) |
+ BIT(RV_REG_A4) |
+ BIT(RV_REG_A5));
+}
+
struct rv_jit_context {
struct bpf_prog *prog;
- u32 *insns; /* RV insns */
+ u16 *insns; /* RV insns */
int ninsns;
int epilogue_offset;
int *offset; /* BPF to RV */
@@ -58,6 +75,12 @@ struct rv_jit_context {
int stack_size;
};
+/* Convert from ninsns to bytes. */
+static inline int ninsns_rvoff(int ninsns)
+{
+ return ninsns << 1;
+}
+
struct rv_jit_data {
struct bpf_binary_header *header;
u8 *image;
@@ -74,8 +97,22 @@ static inline void bpf_flush_icache(void *start, void *end)
flush_icache_range((unsigned long)start, (unsigned long)end);
}
+/* Emit a 4-byte riscv instruction. */
static inline void emit(const u32 insn, struct rv_jit_context *ctx)
{
+ if (ctx->insns) {
+ ctx->insns[ctx->ninsns] = insn;
+ ctx->insns[ctx->ninsns + 1] = (insn >> 16);
+ }
+
+ ctx->ninsns += 2;
+}
+
+/* Emit a 2-byte riscv compressed instruction. */
+static inline void emitc(const u16 insn, struct rv_jit_context *ctx)
+{
+ BUILD_BUG_ON(!rvc_enabled());
+
if (ctx->insns)
ctx->insns[ctx->ninsns] = insn;
@@ -86,7 +123,7 @@ static inline int epilogue_offset(struct rv_jit_context *ctx)
{
int to = ctx->epilogue_offset, from = ctx->ninsns;
- return (to - from) << 2;
+ return ninsns_rvoff(to - from);
}
/* Return -1 or inverted cond. */
@@ -117,6 +154,36 @@ static inline int invert_bpf_cond(u8 cond)
return -1;
}
+static inline bool is_6b_int(long val)
+{
+ return -(1L << 5) <= val && val < (1L << 5);
+}
+
+static inline bool is_7b_uint(unsigned long val)
+{
+ return val < (1UL << 7);
+}
+
+static inline bool is_8b_uint(unsigned long val)
+{
+ return val < (1UL << 8);
+}
+
+static inline bool is_9b_uint(unsigned long val)
+{
+ return val < (1UL << 9);
+}
+
+static inline bool is_10b_int(long val)
+{
+ return -(1L << 9) <= val && val < (1L << 9);
+}
+
+static inline bool is_10b_uint(unsigned long val)
+{
+ return val < (1UL << 10);
+}
+
static inline bool is_12b_int(long val)
{
return -(1L << 11) <= val && val < (1L << 11);
@@ -149,7 +216,7 @@ static inline int rv_offset(int insn, int off, struct rv_jit_context *ctx)
off++; /* BPF branch is from PC+1, RV is from PC */
from = (insn > 0) ? ctx->offset[insn - 1] : 0;
to = (insn + off > 0) ? ctx->offset[insn + off - 1] : 0;
- return (to - from) << 2;
+ return ninsns_rvoff(to - from);
}
/* Instruction formats. */
@@ -207,6 +274,59 @@ static inline u32 rv_amo_insn(u8 funct5, u8 aq, u8 rl, u8 rs2, u8 rs1,
return rv_r_insn(funct7, rs2, rs1, funct3, rd, opcode);
}
+/* RISC-V compressed instruction formats. */
+
+static inline u16 rv_cr_insn(u8 funct4, u8 rd, u8 rs2, u8 op)
+{
+ return (funct4 << 12) | (rd << 7) | (rs2 << 2) | op;
+}
+
+static inline u16 rv_ci_insn(u8 funct3, u32 imm6, u8 rd, u8 op)
+{
+ u32 imm;
+
+ imm = ((imm6 & 0x20) << 7) | ((imm6 & 0x1f) << 2);
+ return (funct3 << 13) | (rd << 7) | op | imm;
+}
+
+static inline u16 rv_css_insn(u8 funct3, u32 uimm, u8 rs2, u8 op)
+{
+ return (funct3 << 13) | (uimm << 7) | (rs2 << 2) | op;
+}
+
+static inline u16 rv_ciw_insn(u8 funct3, u32 uimm, u8 rd, u8 op)
+{
+ return (funct3 << 13) | (uimm << 5) | ((rd & 0x7) << 2) | op;
+}
+
+static inline u16 rv_cl_insn(u8 funct3, u32 imm_hi, u8 rs1, u32 imm_lo, u8 rd,
+ u8 op)
+{
+ return (funct3 << 13) | (imm_hi << 10) | ((rs1 & 0x7) << 7) |
+ (imm_lo << 5) | ((rd & 0x7) << 2) | op;
+}
+
+static inline u16 rv_cs_insn(u8 funct3, u32 imm_hi, u8 rs1, u32 imm_lo, u8 rs2,
+ u8 op)
+{
+ return (funct3 << 13) | (imm_hi << 10) | ((rs1 & 0x7) << 7) |
+ (imm_lo << 5) | ((rs2 & 0x7) << 2) | op;
+}
+
+static inline u16 rv_ca_insn(u8 funct6, u8 rd, u8 funct2, u8 rs2, u8 op)
+{
+ return (funct6 << 10) | ((rd & 0x7) << 7) | (funct2 << 5) |
+ ((rs2 & 0x7) << 2) | op;
+}
+
+static inline u16 rv_cb_insn(u8 funct3, u32 imm6, u8 funct2, u8 rd, u8 op)
+{
+ u32 imm;
+
+ imm = ((imm6 & 0x20) << 7) | ((imm6 & 0x1f) << 2);
+ return (funct3 << 13) | (funct2 << 10) | ((rd & 0x7) << 7) | op | imm;
+}
+
/* Instructions shared by both RV32 and RV64. */
static inline u32 rv_addi(u8 rd, u8 rs1, u16 imm11_0)
@@ -414,6 +534,135 @@ static inline u32 rv_amoadd_w(u8 rd, u8 rs2, u8 rs1, u8 aq, u8 rl)
return rv_amo_insn(0, aq, rl, rs2, rs1, 2, rd, 0x2f);
}
+/* RVC instrutions. */
+
+static inline u16 rvc_addi4spn(u8 rd, u32 imm10)
+{
+ u32 imm;
+
+ imm = ((imm10 & 0x30) << 2) | ((imm10 & 0x3c0) >> 4) |
+ ((imm10 & 0x4) >> 1) | ((imm10 & 0x8) >> 3);
+ return rv_ciw_insn(0x0, imm, rd, 0x0);
+}
+
+static inline u16 rvc_lw(u8 rd, u32 imm7, u8 rs1)
+{
+ u32 imm_hi, imm_lo;
+
+ imm_hi = (imm7 & 0x38) >> 3;
+ imm_lo = ((imm7 & 0x4) >> 1) | ((imm7 & 0x40) >> 6);
+ return rv_cl_insn(0x2, imm_hi, rs1, imm_lo, rd, 0x0);
+}
+
+static inline u16 rvc_sw(u8 rs1, u32 imm7, u8 rs2)
+{
+ u32 imm_hi, imm_lo;
+
+ imm_hi = (imm7 & 0x38) >> 3;
+ imm_lo = ((imm7 & 0x4) >> 1) | ((imm7 & 0x40) >> 6);
+ return rv_cs_insn(0x6, imm_hi, rs1, imm_lo, rs2, 0x0);
+}
+
+static inline u16 rvc_addi(u8 rd, u32 imm6)
+{
+ return rv_ci_insn(0, imm6, rd, 0x1);
+}
+
+static inline u16 rvc_li(u8 rd, u32 imm6)
+{
+ return rv_ci_insn(0x2, imm6, rd, 0x1);
+}
+
+static inline u16 rvc_addi16sp(u32 imm10)
+{
+ u32 imm;
+
+ imm = ((imm10 & 0x200) >> 4) | (imm10 & 0x10) | ((imm10 & 0x40) >> 3) |
+ ((imm10 & 0x180) >> 6) | ((imm10 & 0x20) >> 5);
+ return rv_ci_insn(0x3, imm, RV_REG_SP, 0x1);
+}
+
+static inline u16 rvc_lui(u8 rd, u32 imm6)
+{
+ return rv_ci_insn(0x3, imm6, rd, 0x1);
+}
+
+static inline u16 rvc_srli(u8 rd, u32 imm6)
+{
+ return rv_cb_insn(0x4, imm6, 0, rd, 0x1);
+}
+
+static inline u16 rvc_srai(u8 rd, u32 imm6)
+{
+ return rv_cb_insn(0x4, imm6, 0x1, rd, 0x1);
+}
+
+static inline u16 rvc_andi(u8 rd, u32 imm6)
+{
+ return rv_cb_insn(0x4, imm6, 0x2, rd, 0x1);
+}
+
+static inline u16 rvc_sub(u8 rd, u8 rs)
+{
+ return rv_ca_insn(0x23, rd, 0, rs, 0x1);
+}
+
+static inline u16 rvc_xor(u8 rd, u8 rs)
+{
+ return rv_ca_insn(0x23, rd, 0x1, rs, 0x1);
+}
+
+static inline u16 rvc_or(u8 rd, u8 rs)
+{
+ return rv_ca_insn(0x23, rd, 0x2, rs, 0x1);
+}
+
+static inline u16 rvc_and(u8 rd, u8 rs)
+{
+ return rv_ca_insn(0x23, rd, 0x3, rs, 0x1);
+}
+
+static inline u16 rvc_slli(u8 rd, u32 imm6)
+{
+ return rv_ci_insn(0, imm6, rd, 0x2);
+}
+
+static inline u16 rvc_lwsp(u8 rd, u32 imm8)
+{
+ u32 imm;
+
+ imm = ((imm8 & 0xc0) >> 6) | (imm8 & 0x3c);
+ return rv_ci_insn(0x2, imm, rd, 0x2);
+}
+
+static inline u16 rvc_jr(u8 rs1)
+{
+ return rv_cr_insn(0x8, rs1, RV_REG_ZERO, 0x2);
+}
+
+static inline u16 rvc_mv(u8 rd, u8 rs)
+{
+ return rv_cr_insn(0x8, rd, rs, 0x2);
+}
+
+static inline u16 rvc_jalr(u8 rs1)
+{
+ return rv_cr_insn(0x9, rs1, RV_REG_ZERO, 0x2);
+}
+
+static inline u16 rvc_add(u8 rd, u8 rs)
+{
+ return rv_cr_insn(0x9, rd, rs, 0x2);
+}
+
+static inline u16 rvc_swsp(u32 imm8, u8 rs2)
+{
+ u32 imm;
+
+ imm = (imm8 & 0x3c) | ((imm8 & 0xc0) >> 6);
+ return rv_css_insn(0x6, imm, rs2, 0x2);
+}
+
/*
* RV64-only instructions.
*
@@ -503,6 +752,234 @@ static inline u32 rv_amoadd_d(u8 rd, u8 rs2, u8 rs1, u8 aq, u8 rl)
return rv_amo_insn(0, aq, rl, rs2, rs1, 3, rd, 0x2f);
}
+/* RV64-only RVC instructions. */
+
+static inline u16 rvc_ld(u8 rd, u32 imm8, u8 rs1)
+{
+ u32 imm_hi, imm_lo;
+
+ imm_hi = (imm8 & 0x38) >> 3;
+ imm_lo = (imm8 & 0xc0) >> 6;
+ return rv_cl_insn(0x3, imm_hi, rs1, imm_lo, rd, 0x0);
+}
+
+static inline u16 rvc_sd(u8 rs1, u32 imm8, u8 rs2)
+{
+ u32 imm_hi, imm_lo;
+
+ imm_hi = (imm8 & 0x38) >> 3;
+ imm_lo = (imm8 & 0xc0) >> 6;
+ return rv_cs_insn(0x7, imm_hi, rs1, imm_lo, rs2, 0x0);
+}
+
+static inline u16 rvc_subw(u8 rd, u8 rs)
+{
+ return rv_ca_insn(0x27, rd, 0, rs, 0x1);
+}
+
+static inline u16 rvc_addiw(u8 rd, u32 imm6)
+{
+ return rv_ci_insn(0x1, imm6, rd, 0x1);
+}
+
+static inline u16 rvc_ldsp(u8 rd, u32 imm9)
+{
+ u32 imm;
+
+ imm = ((imm9 & 0x1c0) >> 6) | (imm9 & 0x38);
+ return rv_ci_insn(0x3, imm, rd, 0x2);
+}
+
+static inline u16 rvc_sdsp(u32 imm9, u8 rs2)
+{
+ u32 imm;
+
+ imm = (imm9 & 0x38) | ((imm9 & 0x1c0) >> 6);
+ return rv_css_insn(0x7, imm, rs2, 0x2);
+}
+
+#endif /* __riscv_xlen == 64 */
+
+/* Helper functions that emit RVC instructions when possible. */
+
+static inline void emit_jalr(u8 rd, u8 rs, s32 imm, struct rv_jit_context *ctx)
+{
+ if (rvc_enabled() && rd == RV_REG_RA && rs && !imm)
+ emitc(rvc_jalr(rs), ctx);
+ else if (rvc_enabled() && !rd && rs && !imm)
+ emitc(rvc_jr(rs), ctx);
+ else
+ emit(rv_jalr(rd, rs, imm), ctx);
+}
+
+static inline void emit_mv(u8 rd, u8 rs, struct rv_jit_context *ctx)
+{
+ if (rvc_enabled() && rd && rs)
+ emitc(rvc_mv(rd, rs), ctx);
+ else
+ emit(rv_addi(rd, rs, 0), ctx);
+}
+
+static inline void emit_add(u8 rd, u8 rs1, u8 rs2, struct rv_jit_context *ctx)
+{
+ if (rvc_enabled() && rd && rd == rs1 && rs2)
+ emitc(rvc_add(rd, rs2), ctx);
+ else
+ emit(rv_add(rd, rs1, rs2), ctx);
+}
+
+static inline void emit_addi(u8 rd, u8 rs, s32 imm, struct rv_jit_context *ctx)
+{
+ if (rvc_enabled() && rd == RV_REG_SP && rd == rs && is_10b_int(imm) && imm && !(imm & 0xf))
+ emitc(rvc_addi16sp(imm), ctx);
+ else if (rvc_enabled() && is_creg(rd) && rs == RV_REG_SP && is_10b_uint(imm) &&
+ !(imm & 0x3) && imm)
+ emitc(rvc_addi4spn(rd, imm), ctx);
+ else if (rvc_enabled() && rd && rd == rs && imm && is_6b_int(imm))
+ emitc(rvc_addi(rd, imm), ctx);
+ else
+ emit(rv_addi(rd, rs, imm), ctx);
+}
+
+static inline void emit_li(u8 rd, s32 imm, struct rv_jit_context *ctx)
+{
+ if (rvc_enabled() && rd && is_6b_int(imm))
+ emitc(rvc_li(rd, imm), ctx);
+ else
+ emit(rv_addi(rd, RV_REG_ZERO, imm), ctx);
+}
+
+static inline void emit_lui(u8 rd, s32 imm, struct rv_jit_context *ctx)
+{
+ if (rvc_enabled() && rd && rd != RV_REG_SP && is_6b_int(imm) && imm)
+ emitc(rvc_lui(rd, imm), ctx);
+ else
+ emit(rv_lui(rd, imm), ctx);
+}
+
+static inline void emit_slli(u8 rd, u8 rs, s32 imm, struct rv_jit_context *ctx)
+{
+ if (rvc_enabled() && rd && rd == rs && imm && (u32)imm < __riscv_xlen)
+ emitc(rvc_slli(rd, imm), ctx);
+ else
+ emit(rv_slli(rd, rs, imm), ctx);
+}
+
+static inline void emit_andi(u8 rd, u8 rs, s32 imm, struct rv_jit_context *ctx)
+{
+ if (rvc_enabled() && is_creg(rd) && rd == rs && is_6b_int(imm))
+ emitc(rvc_andi(rd, imm), ctx);
+ else
+ emit(rv_andi(rd, rs, imm), ctx);
+}
+
+static inline void emit_srli(u8 rd, u8 rs, s32 imm, struct rv_jit_context *ctx)
+{
+ if (rvc_enabled() && is_creg(rd) && rd == rs && imm && (u32)imm < __riscv_xlen)
+ emitc(rvc_srli(rd, imm), ctx);
+ else
+ emit(rv_srli(rd, rs, imm), ctx);
+}
+
+static inline void emit_srai(u8 rd, u8 rs, s32 imm, struct rv_jit_context *ctx)
+{
+ if (rvc_enabled() && is_creg(rd) && rd == rs && imm && (u32)imm < __riscv_xlen)
+ emitc(rvc_srai(rd, imm), ctx);
+ else
+ emit(rv_srai(rd, rs, imm), ctx);
+}
+
+static inline void emit_sub(u8 rd, u8 rs1, u8 rs2, struct rv_jit_context *ctx)
+{
+ if (rvc_enabled() && is_creg(rd) && rd == rs1 && is_creg(rs2))
+ emitc(rvc_sub(rd, rs2), ctx);
+ else
+ emit(rv_sub(rd, rs1, rs2), ctx);
+}
+
+static inline void emit_or(u8 rd, u8 rs1, u8 rs2, struct rv_jit_context *ctx)
+{
+ if (rvc_enabled() && is_creg(rd) && rd == rs1 && is_creg(rs2))
+ emitc(rvc_or(rd, rs2), ctx);
+ else
+ emit(rv_or(rd, rs1, rs2), ctx);
+}
+
+static inline void emit_and(u8 rd, u8 rs1, u8 rs2, struct rv_jit_context *ctx)
+{
+ if (rvc_enabled() && is_creg(rd) && rd == rs1 && is_creg(rs2))
+ emitc(rvc_and(rd, rs2), ctx);
+ else
+ emit(rv_and(rd, rs1, rs2), ctx);
+}
+
+static inline void emit_xor(u8 rd, u8 rs1, u8 rs2, struct rv_jit_context *ctx)
+{
+ if (rvc_enabled() && is_creg(rd) && rd == rs1 && is_creg(rs2))
+ emitc(rvc_xor(rd, rs2), ctx);
+ else
+ emit(rv_xor(rd, rs1, rs2), ctx);
+}
+
+static inline void emit_lw(u8 rd, s32 off, u8 rs1, struct rv_jit_context *ctx)
+{
+ if (rvc_enabled() && rs1 == RV_REG_SP && rd && is_8b_uint(off) && !(off & 0x3))
+ emitc(rvc_lwsp(rd, off), ctx);
+ else if (rvc_enabled() && is_creg(rd) && is_creg(rs1) && is_7b_uint(off) && !(off & 0x3))
+ emitc(rvc_lw(rd, off, rs1), ctx);
+ else
+ emit(rv_lw(rd, off, rs1), ctx);
+}
+
+static inline void emit_sw(u8 rs1, s32 off, u8 rs2, struct rv_jit_context *ctx)
+{
+ if (rvc_enabled() && rs1 == RV_REG_SP && is_8b_uint(off) && !(off & 0x3))
+ emitc(rvc_swsp(off, rs2), ctx);
+ else if (rvc_enabled() && is_creg(rs1) && is_creg(rs2) && is_7b_uint(off) && !(off & 0x3))
+ emitc(rvc_sw(rs1, off, rs2), ctx);
+ else
+ emit(rv_sw(rs1, off, rs2), ctx);
+}
+
+/* RV64-only helper functions. */
+#if __riscv_xlen == 64
+
+static inline void emit_addiw(u8 rd, u8 rs, s32 imm, struct rv_jit_context *ctx)
+{
+ if (rvc_enabled() && rd && rd == rs && is_6b_int(imm))
+ emitc(rvc_addiw(rd, imm), ctx);
+ else
+ emit(rv_addiw(rd, rs, imm), ctx);
+}
+
+static inline void emit_ld(u8 rd, s32 off, u8 rs1, struct rv_jit_context *ctx)
+{
+ if (rvc_enabled() && rs1 == RV_REG_SP && rd && is_9b_uint(off) && !(off & 0x7))
+ emitc(rvc_ldsp(rd, off), ctx);
+ else if (rvc_enabled() && is_creg(rd) && is_creg(rs1) && is_8b_uint(off) && !(off & 0x7))
+ emitc(rvc_ld(rd, off, rs1), ctx);
+ else
+ emit(rv_ld(rd, off, rs1), ctx);
+}
+
+static inline void emit_sd(u8 rs1, s32 off, u8 rs2, struct rv_jit_context *ctx)
+{
+ if (rvc_enabled() && rs1 == RV_REG_SP && is_9b_uint(off) && !(off & 0x7))
+ emitc(rvc_sdsp(off, rs2), ctx);
+ else if (rvc_enabled() && is_creg(rs1) && is_creg(rs2) && is_8b_uint(off) && !(off & 0x7))
+ emitc(rvc_sd(rs1, off, rs2), ctx);
+ else
+ emit(rv_sd(rs1, off, rs2), ctx);
+}
+
+static inline void emit_subw(u8 rd, u8 rs1, u8 rs2, struct rv_jit_context *ctx)
+{
+ if (rvc_enabled() && is_creg(rd) && rd == rs1 && is_creg(rs2))
+ emitc(rvc_subw(rd, rs2), ctx);
+ else
+ emit(rv_subw(rd, rs1, rs2), ctx);
+}
+
#endif /* __riscv_xlen == 64 */
void bpf_jit_build_prologue(struct rv_jit_context *ctx);
diff --git a/arch/riscv/net/bpf_jit_comp32.c b/arch/riscv/net/bpf_jit_comp32.c
index b198eaa74456..bc5f2204693f 100644
--- a/arch/riscv/net/bpf_jit_comp32.c
+++ b/arch/riscv/net/bpf_jit_comp32.c
@@ -644,7 +644,7 @@ static int emit_branch_r64(const s8 *src1, const s8 *src2, s32 rvoff,
e = ctx->ninsns;
/* Adjust for extra insns. */
- rvoff -= (e - s) << 2;
+ rvoff -= ninsns_rvoff(e - s);
emit_jump_and_link(RV_REG_ZERO, rvoff, true, ctx);
return 0;
}
@@ -713,7 +713,7 @@ static int emit_bcc(u8 op, u8 rd, u8 rs, int rvoff, struct rv_jit_context *ctx)
if (far) {
e = ctx->ninsns;
/* Adjust for extra insns. */
- rvoff -= (e - s) << 2;
+ rvoff -= ninsns_rvoff(e - s);
emit_jump_and_link(RV_REG_ZERO, rvoff, true, ctx);
}
return 0;
@@ -731,7 +731,7 @@ static int emit_branch_r32(const s8 *src1, const s8 *src2, s32 rvoff,
e = ctx->ninsns;
/* Adjust for extra insns. */
- rvoff -= (e - s) << 2;
+ rvoff -= ninsns_rvoff(e - s);
if (emit_bcc(op, lo(rs1), lo(rs2), rvoff, ctx))
return -1;
@@ -795,7 +795,7 @@ static int emit_bpf_tail_call(int insn, struct rv_jit_context *ctx)
* if (index >= max_entries)
* goto out;
*/
- off = (tc_ninsn - (ctx->ninsns - start_insn)) << 2;
+ off = ninsns_rvoff(tc_ninsn - (ctx->ninsns - start_insn));
emit_bcc(BPF_JGE, lo(idx_reg), RV_REG_T1, off, ctx);
/*
@@ -804,7 +804,7 @@ static int emit_bpf_tail_call(int insn, struct rv_jit_context *ctx)
* goto out;
*/
emit(rv_addi(RV_REG_T1, RV_REG_TCC, -1), ctx);
- off = (tc_ninsn - (ctx->ninsns - start_insn)) << 2;
+ off = ninsns_rvoff(tc_ninsn - (ctx->ninsns - start_insn));
emit_bcc(BPF_JSLT, RV_REG_TCC, RV_REG_ZERO, off, ctx);
/*
@@ -818,7 +818,7 @@ static int emit_bpf_tail_call(int insn, struct rv_jit_context *ctx)
if (is_12b_check(off, insn))
return -1;
emit(rv_lw(RV_REG_T0, off, RV_REG_T0), ctx);
- off = (tc_ninsn - (ctx->ninsns - start_insn)) << 2;
+ off = ninsns_rvoff(tc_ninsn - (ctx->ninsns - start_insn));
emit_bcc(BPF_JEQ, RV_REG_T0, RV_REG_ZERO, off, ctx);
/*
@@ -1214,7 +1214,7 @@ int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx,
emit_imm32(tmp2, imm, ctx);
src = tmp2;
e = ctx->ninsns;
- rvoff -= (e - s) << 2;
+ rvoff -= ninsns_rvoff(e - s);
}
if (is64)
diff --git a/arch/riscv/net/bpf_jit_comp64.c b/arch/riscv/net/bpf_jit_comp64.c
index 6cfd164cbe88..8a56b5293117 100644
--- a/arch/riscv/net/bpf_jit_comp64.c
+++ b/arch/riscv/net/bpf_jit_comp64.c
@@ -132,19 +132,23 @@ static void emit_imm(u8 rd, s64 val, struct rv_jit_context *ctx)
*
* This also means that we need to process LSB to MSB.
*/
- s64 upper = (val + (1 << 11)) >> 12, lower = val & 0xfff;
+ s64 upper = (val + (1 << 11)) >> 12;
+ /* Sign-extend lower 12 bits to 64 bits since immediates for li, addiw,
+ * and addi are signed and RVC checks will perform signed comparisons.
+ */
+ s64 lower = ((val & 0xfff) << 52) >> 52;
int shift;
if (is_32b_int(val)) {
if (upper)
- emit(rv_lui(rd, upper), ctx);
+ emit_lui(rd, upper, ctx);
if (!upper) {
- emit(rv_addi(rd, RV_REG_ZERO, lower), ctx);
+ emit_li(rd, lower, ctx);
return;
}
- emit(rv_addiw(rd, rd, lower), ctx);
+ emit_addiw(rd, rd, lower, ctx);
return;
}
@@ -154,9 +158,9 @@ static void emit_imm(u8 rd, s64 val, struct rv_jit_context *ctx)
emit_imm(rd, upper, ctx);
- emit(rv_slli(rd, rd, shift), ctx);
+ emit_slli(rd, rd, shift, ctx);
if (lower)
- emit(rv_addi(rd, rd, lower), ctx);
+ emit_addi(rd, rd, lower, ctx);
}
static void __build_epilogue(bool is_tail_call, struct rv_jit_context *ctx)
@@ -164,43 +168,43 @@ static void __build_epilogue(bool is_tail_call, struct rv_jit_context *ctx)
int stack_adjust = ctx->stack_size, store_offset = stack_adjust - 8;
if (seen_reg(RV_REG_RA, ctx)) {
- emit(rv_ld(RV_REG_RA, store_offset, RV_REG_SP), ctx);
+ emit_ld(RV_REG_RA, store_offset, RV_REG_SP, ctx);
store_offset -= 8;
}
- emit(rv_ld(RV_REG_FP, store_offset, RV_REG_SP), ctx);
+ emit_ld(RV_REG_FP, store_offset, RV_REG_SP, ctx);
store_offset -= 8;
if (seen_reg(RV_REG_S1, ctx)) {
- emit(rv_ld(RV_REG_S1, store_offset, RV_REG_SP), ctx);
+ emit_ld(RV_REG_S1, store_offset, RV_REG_SP, ctx);
store_offset -= 8;
}
if (seen_reg(RV_REG_S2, ctx)) {
- emit(rv_ld(RV_REG_S2, store_offset, RV_REG_SP), ctx);
+ emit_ld(RV_REG_S2, store_offset, RV_REG_SP, ctx);
store_offset -= 8;
}
if (seen_reg(RV_REG_S3, ctx)) {
- emit(rv_ld(RV_REG_S3, store_offset, RV_REG_SP), ctx);
+ emit_ld(RV_REG_S3, store_offset, RV_REG_SP, ctx);
store_offset -= 8;
}
if (seen_reg(RV_REG_S4, ctx)) {
- emit(rv_ld(RV_REG_S4, store_offset, RV_REG_SP), ctx);
+ emit_ld(RV_REG_S4, store_offset, RV_REG_SP, ctx);
store_offset -= 8;
}
if (seen_reg(RV_REG_S5, ctx)) {
- emit(rv_ld(RV_REG_S5, store_offset, RV_REG_SP), ctx);
+ emit_ld(RV_REG_S5, store_offset, RV_REG_SP, ctx);
store_offset -= 8;
}
if (seen_reg(RV_REG_S6, ctx)) {
- emit(rv_ld(RV_REG_S6, store_offset, RV_REG_SP), ctx);
+ emit_ld(RV_REG_S6, store_offset, RV_REG_SP, ctx);
store_offset -= 8;
}
- emit(rv_addi(RV_REG_SP, RV_REG_SP, stack_adjust), ctx);
+ emit_addi(RV_REG_SP, RV_REG_SP, stack_adjust, ctx);
/* Set return value. */
if (!is_tail_call)
- emit(rv_addi(RV_REG_A0, RV_REG_A5, 0), ctx);
- emit(rv_jalr(RV_REG_ZERO, is_tail_call ? RV_REG_T3 : RV_REG_RA,
- is_tail_call ? 4 : 0), /* skip TCC init */
- ctx);
+ emit_mv(RV_REG_A0, RV_REG_A5, ctx);
+ emit_jalr(RV_REG_ZERO, is_tail_call ? RV_REG_T3 : RV_REG_RA,
+ is_tail_call ? 4 : 0, /* skip TCC init */
+ ctx);
}
static void emit_bcc(u8 cond, u8 rd, u8 rs, int rvoff,
@@ -280,8 +284,8 @@ static void emit_branch(u8 cond, u8 rd, u8 rs, int rvoff,
static void emit_zext_32(u8 reg, struct rv_jit_context *ctx)
{
- emit(rv_slli(reg, reg, 32), ctx);
- emit(rv_srli(reg, reg, 32), ctx);
+ emit_slli(reg, reg, 32, ctx);
+ emit_srli(reg, reg, 32, ctx);
}
static int emit_bpf_tail_call(int insn, struct rv_jit_context *ctx)
@@ -304,35 +308,35 @@ static int emit_bpf_tail_call(int insn, struct rv_jit_context *ctx)
if (is_12b_check(off, insn))
return -1;
emit(rv_lwu(RV_REG_T1, off, RV_REG_A1), ctx);
- off = (tc_ninsn - (ctx->ninsns - start_insn)) << 2;
+ off = ninsns_rvoff(tc_ninsn - (ctx->ninsns - start_insn));
emit_branch(BPF_JGE, RV_REG_A2, RV_REG_T1, off, ctx);
/* if (TCC-- < 0)
* goto out;
*/
- emit(rv_addi(RV_REG_T1, tcc, -1), ctx);
- off = (tc_ninsn - (ctx->ninsns - start_insn)) << 2;
+ emit_addi(RV_REG_T1, tcc, -1, ctx);
+ off = ninsns_rvoff(tc_ninsn - (ctx->ninsns - start_insn));
emit_branch(BPF_JSLT, tcc, RV_REG_ZERO, off, ctx);
/* prog = array->ptrs[index];
* if (!prog)
* goto out;
*/
- emit(rv_slli(RV_REG_T2, RV_REG_A2, 3), ctx);
- emit(rv_add(RV_REG_T2, RV_REG_T2, RV_REG_A1), ctx);
+ emit_slli(RV_REG_T2, RV_REG_A2, 3, ctx);
+ emit_add(RV_REG_T2, RV_REG_T2, RV_REG_A1, ctx);
off = offsetof(struct bpf_array, ptrs);
if (is_12b_check(off, insn))
return -1;
- emit(rv_ld(RV_REG_T2, off, RV_REG_T2), ctx);
- off = (tc_ninsn - (ctx->ninsns - start_insn)) << 2;
+ emit_ld(RV_REG_T2, off, RV_REG_T2, ctx);
+ off = ninsns_rvoff(tc_ninsn - (ctx->ninsns - start_insn));
emit_branch(BPF_JEQ, RV_REG_T2, RV_REG_ZERO, off, ctx);
/* goto *(prog->bpf_func + 4); */
off = offsetof(struct bpf_prog, bpf_func);
if (is_12b_check(off, insn))
return -1;
- emit(rv_ld(RV_REG_T3, off, RV_REG_T2), ctx);
- emit(rv_addi(RV_REG_TCC, RV_REG_T1, 0), ctx);
+ emit_ld(RV_REG_T3, off, RV_REG_T2, ctx);
+ emit_mv(RV_REG_TCC, RV_REG_T1, ctx);
__build_epilogue(true, ctx);
return 0;
}
@@ -360,9 +364,9 @@ static void init_regs(u8 *rd, u8 *rs, const struct bpf_insn *insn,
static void emit_zext_32_rd_rs(u8 *rd, u8 *rs, struct rv_jit_context *ctx)
{
- emit(rv_addi(RV_REG_T2, *rd, 0), ctx);
+ emit_mv(RV_REG_T2, *rd, ctx);
emit_zext_32(RV_REG_T2, ctx);
- emit(rv_addi(RV_REG_T1, *rs, 0), ctx);
+ emit_mv(RV_REG_T1, *rs, ctx);
emit_zext_32(RV_REG_T1, ctx);
*rd = RV_REG_T2;
*rs = RV_REG_T1;
@@ -370,15 +374,15 @@ static void emit_zext_32_rd_rs(u8 *rd, u8 *rs, struct rv_jit_context *ctx)
static void emit_sext_32_rd_rs(u8 *rd, u8 *rs, struct rv_jit_context *ctx)
{
- emit(rv_addiw(RV_REG_T2, *rd, 0), ctx);
- emit(rv_addiw(RV_REG_T1, *rs, 0), ctx);
+ emit_addiw(RV_REG_T2, *rd, 0, ctx);
+ emit_addiw(RV_REG_T1, *rs, 0, ctx);
*rd = RV_REG_T2;
*rs = RV_REG_T1;
}
static void emit_zext_32_rd_t1(u8 *rd, struct rv_jit_context *ctx)
{
- emit(rv_addi(RV_REG_T2, *rd, 0), ctx);
+ emit_mv(RV_REG_T2, *rd, ctx);
emit_zext_32(RV_REG_T2, ctx);
emit_zext_32(RV_REG_T1, ctx);
*rd = RV_REG_T2;
@@ -386,7 +390,7 @@ static void emit_zext_32_rd_t1(u8 *rd, struct rv_jit_context *ctx)
static void emit_sext_32_rd(u8 *rd, struct rv_jit_context *ctx)
{
- emit(rv_addiw(RV_REG_T2, *rd, 0), ctx);
+ emit_addiw(RV_REG_T2, *rd, 0, ctx);
*rd = RV_REG_T2;
}
@@ -432,7 +436,7 @@ static int emit_call(bool fixed, u64 addr, struct rv_jit_context *ctx)
if (ret)
return ret;
rd = bpf_to_rv_reg(BPF_REG_0, ctx);
- emit(rv_addi(rd, RV_REG_A0, 0), ctx);
+ emit_mv(rd, RV_REG_A0, ctx);
return 0;
}
@@ -458,7 +462,7 @@ int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx,
emit_zext_32(rd, ctx);
break;
}
- emit(is64 ? rv_addi(rd, rs, 0) : rv_addiw(rd, rs, 0), ctx);
+ emit_mv(rd, rs, ctx);
if (!is64 && !aux->verifier_zext)
emit_zext_32(rd, ctx);
break;
@@ -466,31 +470,35 @@ int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx,
/* dst = dst OP src */
case BPF_ALU | BPF_ADD | BPF_X:
case BPF_ALU64 | BPF_ADD | BPF_X:
- emit(is64 ? rv_add(rd, rd, rs) : rv_addw(rd, rd, rs), ctx);
+ emit_add(rd, rd, rs, ctx);
if (!is64 && !aux->verifier_zext)
emit_zext_32(rd, ctx);
break;
case BPF_ALU | BPF_SUB | BPF_X:
case BPF_ALU64 | BPF_SUB | BPF_X:
- emit(is64 ? rv_sub(rd, rd, rs) : rv_subw(rd, rd, rs), ctx);
+ if (is64)
+ emit_sub(rd, rd, rs, ctx);
+ else
+ emit_subw(rd, rd, rs, ctx);
+
if (!is64 && !aux->verifier_zext)
emit_zext_32(rd, ctx);
break;
case BPF_ALU | BPF_AND | BPF_X:
case BPF_ALU64 | BPF_AND | BPF_X:
- emit(rv_and(rd, rd, rs), ctx);
+ emit_and(rd, rd, rs, ctx);
if (!is64 && !aux->verifier_zext)
emit_zext_32(rd, ctx);
break;
case BPF_ALU | BPF_OR | BPF_X:
case BPF_ALU64 | BPF_OR | BPF_X:
- emit(rv_or(rd, rd, rs), ctx);
+ emit_or(rd, rd, rs, ctx);
if (!is64 && !aux->verifier_zext)
emit_zext_32(rd, ctx);
break;
case BPF_ALU | BPF_XOR | BPF_X:
case BPF_ALU64 | BPF_XOR | BPF_X:
- emit(rv_xor(rd, rd, rs), ctx);
+ emit_xor(rd, rd, rs, ctx);
if (!is64 && !aux->verifier_zext)
emit_zext_32(rd, ctx);
break;
@@ -534,8 +542,7 @@ int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx,
/* dst = -dst */
case BPF_ALU | BPF_NEG:
case BPF_ALU64 | BPF_NEG:
- emit(is64 ? rv_sub(rd, RV_REG_ZERO, rd) :
- rv_subw(rd, RV_REG_ZERO, rd), ctx);
+ emit_sub(rd, RV_REG_ZERO, rd, ctx);
if (!is64 && !aux->verifier_zext)
emit_zext_32(rd, ctx);
break;
@@ -544,8 +551,8 @@ int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx,
case BPF_ALU | BPF_END | BPF_FROM_LE:
switch (imm) {
case 16:
- emit(rv_slli(rd, rd, 48), ctx);
- emit(rv_srli(rd, rd, 48), ctx);
+ emit_slli(rd, rd, 48, ctx);
+ emit_srli(rd, rd, 48, ctx);
break;
case 32:
if (!aux->verifier_zext)
@@ -558,51 +565,51 @@ int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx,
break;
case BPF_ALU | BPF_END | BPF_FROM_BE:
- emit(rv_addi(RV_REG_T2, RV_REG_ZERO, 0), ctx);
+ emit_li(RV_REG_T2, 0, ctx);
- emit(rv_andi(RV_REG_T1, rd, 0xff), ctx);
- emit(rv_add(RV_REG_T2, RV_REG_T2, RV_REG_T1), ctx);
- emit(rv_slli(RV_REG_T2, RV_REG_T2, 8), ctx);
- emit(rv_srli(rd, rd, 8), ctx);
+ emit_andi(RV_REG_T1, rd, 0xff, ctx);
+ emit_add(RV_REG_T2, RV_REG_T2, RV_REG_T1, ctx);
+ emit_slli(RV_REG_T2, RV_REG_T2, 8, ctx);
+ emit_srli(rd, rd, 8, ctx);
if (imm == 16)
goto out_be;
- emit(rv_andi(RV_REG_T1, rd, 0xff), ctx);
- emit(rv_add(RV_REG_T2, RV_REG_T2, RV_REG_T1), ctx);
- emit(rv_slli(RV_REG_T2, RV_REG_T2, 8), ctx);
- emit(rv_srli(rd, rd, 8), ctx);
+ emit_andi(RV_REG_T1, rd, 0xff, ctx);
+ emit_add(RV_REG_T2, RV_REG_T2, RV_REG_T1, ctx);
+ emit_slli(RV_REG_T2, RV_REG_T2, 8, ctx);
+ emit_srli(rd, rd, 8, ctx);
- emit(rv_andi(RV_REG_T1, rd, 0xff), ctx);
- emit(rv_add(RV_REG_T2, RV_REG_T2, RV_REG_T1), ctx);
- emit(rv_slli(RV_REG_T2, RV_REG_T2, 8), ctx);
- emit(rv_srli(rd, rd, 8), ctx);
+ emit_andi(RV_REG_T1, rd, 0xff, ctx);
+ emit_add(RV_REG_T2, RV_REG_T2, RV_REG_T1, ctx);
+ emit_slli(RV_REG_T2, RV_REG_T2, 8, ctx);
+ emit_srli(rd, rd, 8, ctx);
if (imm == 32)
goto out_be;
- emit(rv_andi(RV_REG_T1, rd, 0xff), ctx);
- emit(rv_add(RV_REG_T2, RV_REG_T2, RV_REG_T1), ctx);
- emit(rv_slli(RV_REG_T2, RV_REG_T2, 8), ctx);
- emit(rv_srli(rd, rd, 8), ctx);
-
- emit(rv_andi(RV_REG_T1, rd, 0xff), ctx);
- emit(rv_add(RV_REG_T2, RV_REG_T2, RV_REG_T1), ctx);
- emit(rv_slli(RV_REG_T2, RV_REG_T2, 8), ctx);
- emit(rv_srli(rd, rd, 8), ctx);
-
- emit(rv_andi(RV_REG_T1, rd, 0xff), ctx);
- emit(rv_add(RV_REG_T2, RV_REG_T2, RV_REG_T1), ctx);
- emit(rv_slli(RV_REG_T2, RV_REG_T2, 8), ctx);
- emit(rv_srli(rd, rd, 8), ctx);
-
- emit(rv_andi(RV_REG_T1, rd, 0xff), ctx);
- emit(rv_add(RV_REG_T2, RV_REG_T2, RV_REG_T1), ctx);
- emit(rv_slli(RV_REG_T2, RV_REG_T2, 8), ctx);
- emit(rv_srli(rd, rd, 8), ctx);
+ emit_andi(RV_REG_T1, rd, 0xff, ctx);
+ emit_add(RV_REG_T2, RV_REG_T2, RV_REG_T1, ctx);
+ emit_slli(RV_REG_T2, RV_REG_T2, 8, ctx);
+ emit_srli(rd, rd, 8, ctx);
+
+ emit_andi(RV_REG_T1, rd, 0xff, ctx);
+ emit_add(RV_REG_T2, RV_REG_T2, RV_REG_T1, ctx);
+ emit_slli(RV_REG_T2, RV_REG_T2, 8, ctx);
+ emit_srli(rd, rd, 8, ctx);
+
+ emit_andi(RV_REG_T1, rd, 0xff, ctx);
+ emit_add(RV_REG_T2, RV_REG_T2, RV_REG_T1, ctx);
+ emit_slli(RV_REG_T2, RV_REG_T2, 8, ctx);
+ emit_srli(rd, rd, 8, ctx);
+
+ emit_andi(RV_REG_T1, rd, 0xff, ctx);
+ emit_add(RV_REG_T2, RV_REG_T2, RV_REG_T1, ctx);
+ emit_slli(RV_REG_T2, RV_REG_T2, 8, ctx);
+ emit_srli(rd, rd, 8, ctx);
out_be:
- emit(rv_andi(RV_REG_T1, rd, 0xff), ctx);
- emit(rv_add(RV_REG_T2, RV_REG_T2, RV_REG_T1), ctx);
+ emit_andi(RV_REG_T1, rd, 0xff, ctx);
+ emit_add(RV_REG_T2, RV_REG_T2, RV_REG_T1, ctx);
- emit(rv_addi(rd, RV_REG_T2, 0), ctx);
+ emit_mv(rd, RV_REG_T2, ctx);
break;
/* dst = imm */
@@ -617,12 +624,10 @@ out_be:
case BPF_ALU | BPF_ADD | BPF_K:
case BPF_ALU64 | BPF_ADD | BPF_K:
if (is_12b_int(imm)) {
- emit(is64 ? rv_addi(rd, rd, imm) :
- rv_addiw(rd, rd, imm), ctx);
+ emit_addi(rd, rd, imm, ctx);
} else {
emit_imm(RV_REG_T1, imm, ctx);
- emit(is64 ? rv_add(rd, rd, RV_REG_T1) :
- rv_addw(rd, rd, RV_REG_T1), ctx);
+ emit_add(rd, rd, RV_REG_T1, ctx);
}
if (!is64 && !aux->verifier_zext)
emit_zext_32(rd, ctx);
@@ -630,12 +635,10 @@ out_be:
case BPF_ALU | BPF_SUB | BPF_K:
case BPF_ALU64 | BPF_SUB | BPF_K:
if (is_12b_int(-imm)) {
- emit(is64 ? rv_addi(rd, rd, -imm) :
- rv_addiw(rd, rd, -imm), ctx);
+ emit_addi(rd, rd, -imm, ctx);
} else {
emit_imm(RV_REG_T1, imm, ctx);
- emit(is64 ? rv_sub(rd, rd, RV_REG_T1) :
- rv_subw(rd, rd, RV_REG_T1), ctx);
+ emit_sub(rd, rd, RV_REG_T1, ctx);
}
if (!is64 && !aux->verifier_zext)
emit_zext_32(rd, ctx);
@@ -643,10 +646,10 @@ out_be:
case BPF_ALU | BPF_AND | BPF_K:
case BPF_ALU64 | BPF_AND | BPF_K:
if (is_12b_int(imm)) {
- emit(rv_andi(rd, rd, imm), ctx);
+ emit_andi(rd, rd, imm, ctx);
} else {
emit_imm(RV_REG_T1, imm, ctx);
- emit(rv_and(rd, rd, RV_REG_T1), ctx);
+ emit_and(rd, rd, RV_REG_T1, ctx);
}
if (!is64 && !aux->verifier_zext)
emit_zext_32(rd, ctx);
@@ -657,7 +660,7 @@ out_be:
emit(rv_ori(rd, rd, imm), ctx);
} else {
emit_imm(RV_REG_T1, imm, ctx);
- emit(rv_or(rd, rd, RV_REG_T1), ctx);
+ emit_or(rd, rd, RV_REG_T1, ctx);
}
if (!is64 && !aux->verifier_zext)
emit_zext_32(rd, ctx);
@@ -668,7 +671,7 @@ out_be:
emit(rv_xori(rd, rd, imm), ctx);
} else {
emit_imm(RV_REG_T1, imm, ctx);
- emit(rv_xor(rd, rd, RV_REG_T1), ctx);
+ emit_xor(rd, rd, RV_REG_T1, ctx);
}
if (!is64 && !aux->verifier_zext)
emit_zext_32(rd, ctx);
@@ -699,19 +702,28 @@ out_be:
break;
case BPF_ALU | BPF_LSH | BPF_K:
case BPF_ALU64 | BPF_LSH | BPF_K:
- emit(is64 ? rv_slli(rd, rd, imm) : rv_slliw(rd, rd, imm), ctx);
+ emit_slli(rd, rd, imm, ctx);
+
if (!is64 && !aux->verifier_zext)
emit_zext_32(rd, ctx);
break;
case BPF_ALU | BPF_RSH | BPF_K:
case BPF_ALU64 | BPF_RSH | BPF_K:
- emit(is64 ? rv_srli(rd, rd, imm) : rv_srliw(rd, rd, imm), ctx);
+ if (is64)
+ emit_srli(rd, rd, imm, ctx);
+ else
+ emit(rv_srliw(rd, rd, imm), ctx);
+
if (!is64 && !aux->verifier_zext)
emit_zext_32(rd, ctx);
break;
case BPF_ALU | BPF_ARSH | BPF_K:
case BPF_ALU64 | BPF_ARSH | BPF_K:
- emit(is64 ? rv_srai(rd, rd, imm) : rv_sraiw(rd, rd, imm), ctx);
+ if (is64)
+ emit_srai(rd, rd, imm, ctx);
+ else
+ emit(rv_sraiw(rd, rd, imm), ctx);
+
if (!is64 && !aux->verifier_zext)
emit_zext_32(rd, ctx);
break;
@@ -757,13 +769,13 @@ out_be:
e = ctx->ninsns;
/* Adjust for extra insns */
- rvoff -= (e - s) << 2;
+ rvoff -= ninsns_rvoff(e - s);
}
if (BPF_OP(code) == BPF_JSET) {
/* Adjust for and */
rvoff -= 4;
- emit(rv_and(RV_REG_T1, rd, rs), ctx);
+ emit_and(RV_REG_T1, rd, rs, ctx);
emit_branch(BPF_JNE, RV_REG_T1, RV_REG_ZERO, rvoff,
ctx);
} else {
@@ -810,7 +822,7 @@ out_be:
e = ctx->ninsns;
/* Adjust for extra insns */
- rvoff -= (e - s) << 2;
+ rvoff -= ninsns_rvoff(e - s);
emit_branch(BPF_OP(code), rd, rs, rvoff, ctx);
break;
@@ -819,19 +831,19 @@ out_be:
rvoff = rv_offset(i, off, ctx);
s = ctx->ninsns;
if (is_12b_int(imm)) {
- emit(rv_andi(RV_REG_T1, rd, imm), ctx);
+ emit_andi(RV_REG_T1, rd, imm, ctx);
} else {
emit_imm(RV_REG_T1, imm, ctx);
- emit(rv_and(RV_REG_T1, rd, RV_REG_T1), ctx);
+ emit_and(RV_REG_T1, rd, RV_REG_T1, ctx);
}
/* For jset32, we should clear the upper 32 bits of t1, but
* sign-extension is sufficient here and saves one instruction,
* as t1 is used only in comparison against zero.
*/
if (!is64 && imm < 0)
- emit(rv_addiw(RV_REG_T1, RV_REG_T1, 0), ctx);
+ emit_addiw(RV_REG_T1, RV_REG_T1, 0, ctx);
e = ctx->ninsns;
- rvoff -= (e - s) << 2;
+ rvoff -= ninsns_rvoff(e - s);
emit_branch(BPF_JNE, RV_REG_T1, RV_REG_ZERO, rvoff, ctx);
break;
@@ -887,7 +899,7 @@ out_be:
}
emit_imm(RV_REG_T1, off, ctx);
- emit(rv_add(RV_REG_T1, RV_REG_T1, rs), ctx);
+ emit_add(RV_REG_T1, RV_REG_T1, rs, ctx);
emit(rv_lbu(rd, 0, RV_REG_T1), ctx);
if (insn_is_zext(&insn[1]))
return 1;
@@ -899,7 +911,7 @@ out_be:
}
emit_imm(RV_REG_T1, off, ctx);
- emit(rv_add(RV_REG_T1, RV_REG_T1, rs), ctx);
+ emit_add(RV_REG_T1, RV_REG_T1, rs, ctx);
emit(rv_lhu(rd, 0, RV_REG_T1), ctx);
if (insn_is_zext(&insn[1]))
return 1;
@@ -911,20 +923,20 @@ out_be:
}
emit_imm(RV_REG_T1, off, ctx);
- emit(rv_add(RV_REG_T1, RV_REG_T1, rs), ctx);
+ emit_add(RV_REG_T1, RV_REG_T1, rs, ctx);
emit(rv_lwu(rd, 0, RV_REG_T1), ctx);
if (insn_is_zext(&insn[1]))
return 1;
break;
case BPF_LDX | BPF_MEM | BPF_DW:
if (is_12b_int(off)) {
- emit(rv_ld(rd, off, rs), ctx);
+ emit_ld(rd, off, rs, ctx);
break;
}
emit_imm(RV_REG_T1, off, ctx);
- emit(rv_add(RV_REG_T1, RV_REG_T1, rs), ctx);
- emit(rv_ld(rd, 0, RV_REG_T1), ctx);
+ emit_add(RV_REG_T1, RV_REG_T1, rs, ctx);
+ emit_ld(rd, 0, RV_REG_T1, ctx);
break;
/* ST: *(size *)(dst + off) = imm */
@@ -936,7 +948,7 @@ out_be:
}
emit_imm(RV_REG_T2, off, ctx);
- emit(rv_add(RV_REG_T2, RV_REG_T2, rd), ctx);
+ emit_add(RV_REG_T2, RV_REG_T2, rd, ctx);
emit(rv_sb(RV_REG_T2, 0, RV_REG_T1), ctx);
break;
@@ -948,30 +960,30 @@ out_be:
}
emit_imm(RV_REG_T2, off, ctx);
- emit(rv_add(RV_REG_T2, RV_REG_T2, rd), ctx);
+ emit_add(RV_REG_T2, RV_REG_T2, rd, ctx);
emit(rv_sh(RV_REG_T2, 0, RV_REG_T1), ctx);
break;
case BPF_ST | BPF_MEM | BPF_W:
emit_imm(RV_REG_T1, imm, ctx);
if (is_12b_int(off)) {
- emit(rv_sw(rd, off, RV_REG_T1), ctx);
+ emit_sw(rd, off, RV_REG_T1, ctx);
break;
}
emit_imm(RV_REG_T2, off, ctx);
- emit(rv_add(RV_REG_T2, RV_REG_T2, rd), ctx);
- emit(rv_sw(RV_REG_T2, 0, RV_REG_T1), ctx);
+ emit_add(RV_REG_T2, RV_REG_T2, rd, ctx);
+ emit_sw(RV_REG_T2, 0, RV_REG_T1, ctx);
break;
case BPF_ST | BPF_MEM | BPF_DW:
emit_imm(RV_REG_T1, imm, ctx);
if (is_12b_int(off)) {
- emit(rv_sd(rd, off, RV_REG_T1), ctx);
+ emit_sd(rd, off, RV_REG_T1, ctx);
break;
}
emit_imm(RV_REG_T2, off, ctx);
- emit(rv_add(RV_REG_T2, RV_REG_T2, rd), ctx);
- emit(rv_sd(RV_REG_T2, 0, RV_REG_T1), ctx);
+ emit_add(RV_REG_T2, RV_REG_T2, rd, ctx);
+ emit_sd(RV_REG_T2, 0, RV_REG_T1, ctx);
break;
/* STX: *(size *)(dst + off) = src */
@@ -982,7 +994,7 @@ out_be:
}
emit_imm(RV_REG_T1, off, ctx);
- emit(rv_add(RV_REG_T1, RV_REG_T1, rd), ctx);
+ emit_add(RV_REG_T1, RV_REG_T1, rd, ctx);
emit(rv_sb(RV_REG_T1, 0, rs), ctx);
break;
case BPF_STX | BPF_MEM | BPF_H:
@@ -992,28 +1004,28 @@ out_be:
}
emit_imm(RV_REG_T1, off, ctx);
- emit(rv_add(RV_REG_T1, RV_REG_T1, rd), ctx);
+ emit_add(RV_REG_T1, RV_REG_T1, rd, ctx);
emit(rv_sh(RV_REG_T1, 0, rs), ctx);
break;
case BPF_STX | BPF_MEM | BPF_W:
if (is_12b_int(off)) {
- emit(rv_sw(rd, off, rs), ctx);
+ emit_sw(rd, off, rs, ctx);
break;
}
emit_imm(RV_REG_T1, off, ctx);
- emit(rv_add(RV_REG_T1, RV_REG_T1, rd), ctx);
- emit(rv_sw(RV_REG_T1, 0, rs), ctx);
+ emit_add(RV_REG_T1, RV_REG_T1, rd, ctx);
+ emit_sw(RV_REG_T1, 0, rs, ctx);
break;
case BPF_STX | BPF_MEM | BPF_DW:
if (is_12b_int(off)) {
- emit(rv_sd(rd, off, rs), ctx);
+ emit_sd(rd, off, rs, ctx);
break;
}
emit_imm(RV_REG_T1, off, ctx);
- emit(rv_add(RV_REG_T1, RV_REG_T1, rd), ctx);
- emit(rv_sd(RV_REG_T1, 0, rs), ctx);
+ emit_add(RV_REG_T1, RV_REG_T1, rd, ctx);
+ emit_sd(RV_REG_T1, 0, rs, ctx);
break;
/* STX XADD: lock *(u32 *)(dst + off) += src */
case BPF_STX | BPF_XADD | BPF_W:
@@ -1021,10 +1033,10 @@ out_be:
case BPF_STX | BPF_XADD | BPF_DW:
if (off) {
if (is_12b_int(off)) {
- emit(rv_addi(RV_REG_T1, rd, off), ctx);
+ emit_addi(RV_REG_T1, rd, off, ctx);
} else {
emit_imm(RV_REG_T1, off, ctx);
- emit(rv_add(RV_REG_T1, RV_REG_T1, rd), ctx);
+ emit_add(RV_REG_T1, RV_REG_T1, rd, ctx);
}
rd = RV_REG_T1;
@@ -1073,52 +1085,53 @@ void bpf_jit_build_prologue(struct rv_jit_context *ctx)
/* First instruction is always setting the tail-call-counter
* (TCC) register. This instruction is skipped for tail calls.
+ * Force using a 4-byte (non-compressed) instruction.
*/
emit(rv_addi(RV_REG_TCC, RV_REG_ZERO, MAX_TAIL_CALL_CNT), ctx);
- emit(rv_addi(RV_REG_SP, RV_REG_SP, -stack_adjust), ctx);
+ emit_addi(RV_REG_SP, RV_REG_SP, -stack_adjust, ctx);
if (seen_reg(RV_REG_RA, ctx)) {
- emit(rv_sd(RV_REG_SP, store_offset, RV_REG_RA), ctx);
+ emit_sd(RV_REG_SP, store_offset, RV_REG_RA, ctx);
store_offset -= 8;
}
- emit(rv_sd(RV_REG_SP, store_offset, RV_REG_FP), ctx);
+ emit_sd(RV_REG_SP, store_offset, RV_REG_FP, ctx);
store_offset -= 8;
if (seen_reg(RV_REG_S1, ctx)) {
- emit(rv_sd(RV_REG_SP, store_offset, RV_REG_S1), ctx);
+ emit_sd(RV_REG_SP, store_offset, RV_REG_S1, ctx);
store_offset -= 8;
}
if (seen_reg(RV_REG_S2, ctx)) {
- emit(rv_sd(RV_REG_SP, store_offset, RV_REG_S2), ctx);
+ emit_sd(RV_REG_SP, store_offset, RV_REG_S2, ctx);
store_offset -= 8;
}
if (seen_reg(RV_REG_S3, ctx)) {
- emit(rv_sd(RV_REG_SP, store_offset, RV_REG_S3), ctx);
+ emit_sd(RV_REG_SP, store_offset, RV_REG_S3, ctx);
store_offset -= 8;
}
if (seen_reg(RV_REG_S4, ctx)) {
- emit(rv_sd(RV_REG_SP, store_offset, RV_REG_S4), ctx);
+ emit_sd(RV_REG_SP, store_offset, RV_REG_S4, ctx);
store_offset -= 8;
}
if (seen_reg(RV_REG_S5, ctx)) {
- emit(rv_sd(RV_REG_SP, store_offset, RV_REG_S5), ctx);
+ emit_sd(RV_REG_SP, store_offset, RV_REG_S5, ctx);
store_offset -= 8;
}
if (seen_reg(RV_REG_S6, ctx)) {
- emit(rv_sd(RV_REG_SP, store_offset, RV_REG_S6), ctx);
+ emit_sd(RV_REG_SP, store_offset, RV_REG_S6, ctx);
store_offset -= 8;
}
- emit(rv_addi(RV_REG_FP, RV_REG_SP, stack_adjust), ctx);
+ emit_addi(RV_REG_FP, RV_REG_SP, stack_adjust, ctx);
if (bpf_stack_adjust)
- emit(rv_addi(RV_REG_S5, RV_REG_SP, bpf_stack_adjust), ctx);
+ emit_addi(RV_REG_S5, RV_REG_SP, bpf_stack_adjust, ctx);
/* Program contains calls and tail calls, so RV_REG_TCC need
* to be saved across calls.
*/
if (seen_tail_call(ctx) && seen_call(ctx))
- emit(rv_addi(RV_REG_TCC_SAVED, RV_REG_TCC, 0), ctx);
+ emit_mv(RV_REG_TCC_SAVED, RV_REG_TCC, ctx);
ctx->stack_size = stack_adjust;
}
diff --git a/arch/riscv/net/bpf_jit_core.c b/arch/riscv/net/bpf_jit_core.c
index 709b94ece3ed..3630d447352c 100644
--- a/arch/riscv/net/bpf_jit_core.c
+++ b/arch/riscv/net/bpf_jit_core.c
@@ -73,7 +73,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
if (ctx->offset) {
extra_pass = true;
- image_size = sizeof(u32) * ctx->ninsns;
+ image_size = sizeof(*ctx->insns) * ctx->ninsns;
goto skip_init_ctx;
}
@@ -103,7 +103,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
if (jit_data->header)
break;
- image_size = sizeof(u32) * ctx->ninsns;
+ image_size = sizeof(*ctx->insns) * ctx->ninsns;
jit_data->header =
bpf_jit_binary_alloc(image_size,
&jit_data->image,
@@ -114,7 +114,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
goto out_offset;
}
- ctx->insns = (u32 *)jit_data->image;
+ ctx->insns = (u16 *)jit_data->image;
/*
* Now, when the image is allocated, the image can
* potentially shrink more (auipc/jalr -> jal).
diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c
index f4242b894cf2..26f97a10e793 100644
--- a/arch/s390/net/bpf_jit_comp.c
+++ b/arch/s390/net/bpf_jit_comp.c
@@ -489,6 +489,24 @@ static void save_restore_regs(struct bpf_jit *jit, int op, u32 stack_depth)
} while (re <= last);
}
+static void bpf_skip(struct bpf_jit *jit, int size)
+{
+ if (size >= 6 && !is_valid_rel(size)) {
+ /* brcl 0xf,size */
+ EMIT6_PCREL_RIL(0xc0f4000000, size);
+ size -= 6;
+ } else if (size >= 4 && is_valid_rel(size)) {
+ /* brc 0xf,size */
+ EMIT4_PCREL(0xa7f40000, size);
+ size -= 4;
+ }
+ while (size >= 2) {
+ /* bcr 0,%0 */
+ _EMIT2(0x0700);
+ size -= 2;
+ }
+}
+
/*
* Emit function prologue
*
@@ -501,10 +519,11 @@ static void bpf_jit_prologue(struct bpf_jit *jit, u32 stack_depth)
/* xc STK_OFF_TCCNT(4,%r15),STK_OFF_TCCNT(%r15) */
_EMIT6(0xd703f000 | STK_OFF_TCCNT, 0xf000 | STK_OFF_TCCNT);
} else {
- /* j tail_call_start: NOP if no tail calls are used */
- EMIT4_PCREL(0xa7f40000, 6);
- /* bcr 0,%0 */
- EMIT2(0x0700, 0, REG_0);
+ /*
+ * There are no tail calls. Insert nops in order to have
+ * tail_call_start at a predictable offset.
+ */
+ bpf_skip(jit, 6);
}
/* Tail calls have to skip above initialization */
jit->tail_call_start = jit->prg;
@@ -1268,8 +1287,12 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
last = (i == fp->len - 1) ? 1 : 0;
if (last)
break;
- /* j <exit> */
- EMIT4_PCREL(0xa7f40000, jit->exit_ip - jit->prg);
+ if (!is_first_pass(jit) && can_use_rel(jit, jit->exit_ip))
+ /* brc 0xf, <exit> */
+ EMIT4_PCREL_RIC(0xa7040000, 0xf, jit->exit_ip);
+ else
+ /* brcl 0xf, <exit> */
+ EMIT6_PCREL_RILC(0xc0040000, 0xf, jit->exit_ip);
break;
/*
* Branch relative (number of skipped instructions) to offset on
@@ -1417,21 +1440,10 @@ branch_ks:
}
break;
branch_ku:
- is_jmp32 = BPF_CLASS(insn->code) == BPF_JMP32;
- /* clfi or clgfi %dst,imm */
- EMIT6_IMM(is_jmp32 ? 0xc20f0000 : 0xc20e0000,
- dst_reg, imm);
- if (!is_first_pass(jit) &&
- can_use_rel(jit, addrs[i + off + 1])) {
- /* brc mask,off */
- EMIT4_PCREL_RIC(0xa7040000,
- mask >> 12, addrs[i + off + 1]);
- } else {
- /* brcl mask,off */
- EMIT6_PCREL_RILC(0xc0040000,
- mask >> 12, addrs[i + off + 1]);
- }
- break;
+ /* lgfi %w1,imm (load sign extend imm) */
+ src_reg = REG_1;
+ EMIT6_IMM(0xc0010000, src_reg, imm);
+ goto branch_xu;
branch_xs:
is_jmp32 = BPF_CLASS(insn->code) == BPF_JMP32;
if (!is_first_pass(jit) &&
@@ -1510,7 +1522,14 @@ static bool bpf_is_new_addr_sane(struct bpf_jit *jit, int i)
*/
static int bpf_set_addr(struct bpf_jit *jit, int i)
{
- if (!bpf_is_new_addr_sane(jit, i))
+ int delta;
+
+ if (is_codegen_pass(jit)) {
+ delta = jit->prg - jit->addrs[i];
+ if (delta < 0)
+ bpf_skip(jit, -delta);
+ }
+ if (WARN_ON_ONCE(!bpf_is_new_addr_sane(jit, i)))
return -1;
jit->addrs[i] = jit->prg;
return 0;