summaryrefslogtreecommitdiff
path: root/arch/arm64
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm64')
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-lx2160a-clearfog-itx.dtsi4
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-lx2160a.dtsi6
-rw-r--r--arch/arm64/boot/dts/mediatek/mt2712-evb.dts1
-rw-r--r--arch/arm64/boot/dts/mediatek/mt2712e.dtsi14
-rw-r--r--arch/arm64/boot/dts/microchip/sparx5.dtsi5
-rw-r--r--arch/arm64/boot/dts/xilinx/zynqmp.dtsi8
-rw-r--r--arch/arm64/include/asm/debug-monitors.h12
-rw-r--r--arch/arm64/include/asm/insn-def.h14
-rw-r--r--arch/arm64/include/asm/insn.h80
-rw-r--r--arch/arm64/lib/insn.c187
-rw-r--r--arch/arm64/net/bpf_jit.h44
-rw-r--r--arch/arm64/net/bpf_jit_comp.c246
12 files changed, 531 insertions, 90 deletions
diff --git a/arch/arm64/boot/dts/freescale/fsl-lx2160a-clearfog-itx.dtsi b/arch/arm64/boot/dts/freescale/fsl-lx2160a-clearfog-itx.dtsi
index 17f8e733972a..41702e7386e3 100644
--- a/arch/arm64/boot/dts/freescale/fsl-lx2160a-clearfog-itx.dtsi
+++ b/arch/arm64/boot/dts/freescale/fsl-lx2160a-clearfog-itx.dtsi
@@ -63,21 +63,25 @@
&dpmac7 {
sfp = <&sfp0>;
managed = "in-band-status";
+ phys = <&serdes_1 3>;
};
&dpmac8 {
sfp = <&sfp1>;
managed = "in-band-status";
+ phys = <&serdes_1 2>;
};
&dpmac9 {
sfp = <&sfp2>;
managed = "in-band-status";
+ phys = <&serdes_1 1>;
};
&dpmac10 {
sfp = <&sfp3>;
managed = "in-band-status";
+ phys = <&serdes_1 0>;
};
&emdio2 {
diff --git a/arch/arm64/boot/dts/freescale/fsl-lx2160a.dtsi b/arch/arm64/boot/dts/freescale/fsl-lx2160a.dtsi
index 7032505f5ef3..92a881302708 100644
--- a/arch/arm64/boot/dts/freescale/fsl-lx2160a.dtsi
+++ b/arch/arm64/boot/dts/freescale/fsl-lx2160a.dtsi
@@ -612,6 +612,12 @@
ranges;
dma-ranges = <0x0 0x0 0x0 0x0 0x10000 0x00000000>;
+ serdes_1: phy@1ea0000 {
+ compatible = "fsl,lynx-28g";
+ reg = <0x0 0x1ea0000 0x0 0x1e30>;
+ #phy-cells = <1>;
+ };
+
crypto: crypto@8000000 {
compatible = "fsl,sec-v5.0", "fsl,sec-v4.0";
fsl,sec-era = <10>;
diff --git a/arch/arm64/boot/dts/mediatek/mt2712-evb.dts b/arch/arm64/boot/dts/mediatek/mt2712-evb.dts
index 7d369fdd3117..11aa135aa0f3 100644
--- a/arch/arm64/boot/dts/mediatek/mt2712-evb.dts
+++ b/arch/arm64/boot/dts/mediatek/mt2712-evb.dts
@@ -110,6 +110,7 @@
phy-handle = <&ethernet_phy0>;
mediatek,tx-delay-ps = <1530>;
snps,reset-gpio = <&pio 87 GPIO_ACTIVE_LOW>;
+ snps,reset-delays-us = <0 10000 10000>;
pinctrl-names = "default", "sleep";
pinctrl-0 = <&eth_default>;
pinctrl-1 = <&eth_sleep>;
diff --git a/arch/arm64/boot/dts/mediatek/mt2712e.dtsi b/arch/arm64/boot/dts/mediatek/mt2712e.dtsi
index de16c0d80c30..a27b7628c5f7 100644
--- a/arch/arm64/boot/dts/mediatek/mt2712e.dtsi
+++ b/arch/arm64/boot/dts/mediatek/mt2712e.dtsi
@@ -726,7 +726,7 @@
};
eth: ethernet@1101c000 {
- compatible = "mediatek,mt2712-gmac";
+ compatible = "mediatek,mt2712-gmac", "snps,dwmac-4.20a";
reg = <0 0x1101c000 0 0x1300>;
interrupts = <GIC_SPI 237 IRQ_TYPE_LEVEL_LOW>;
interrupt-names = "macirq";
@@ -734,15 +734,19 @@
clock-names = "axi",
"apb",
"mac_main",
- "ptp_ref";
+ "ptp_ref",
+ "rmii_internal";
clocks = <&pericfg CLK_PERI_GMAC>,
<&pericfg CLK_PERI_GMAC_PCLK>,
<&topckgen CLK_TOP_ETHER_125M_SEL>,
- <&topckgen CLK_TOP_ETHER_50M_SEL>;
+ <&topckgen CLK_TOP_ETHER_50M_SEL>,
+ <&topckgen CLK_TOP_ETHER_50M_RMII_SEL>;
assigned-clocks = <&topckgen CLK_TOP_ETHER_125M_SEL>,
- <&topckgen CLK_TOP_ETHER_50M_SEL>;
+ <&topckgen CLK_TOP_ETHER_50M_SEL>,
+ <&topckgen CLK_TOP_ETHER_50M_RMII_SEL>;
assigned-clock-parents = <&topckgen CLK_TOP_ETHERPLL_125M>,
- <&topckgen CLK_TOP_APLL1_D3>;
+ <&topckgen CLK_TOP_APLL1_D3>,
+ <&topckgen CLK_TOP_ETHERPLL_50M>;
power-domains = <&scpsys MT2712_POWER_DOMAIN_AUDIO>;
mediatek,pericfg = <&pericfg>;
snps,axi-config = <&stmmac_axi_setup>;
diff --git a/arch/arm64/boot/dts/microchip/sparx5.dtsi b/arch/arm64/boot/dts/microchip/sparx5.dtsi
index 787ebcec121d..2dd5e38820b1 100644
--- a/arch/arm64/boot/dts/microchip/sparx5.dtsi
+++ b/arch/arm64/boot/dts/microchip/sparx5.dtsi
@@ -471,9 +471,10 @@
<0x6 0x10004000 0x7fc000>,
<0x6 0x11010000 0xaf0000>;
reg-names = "cpu", "dev", "gcb";
- interrupt-names = "xtr", "fdma";
+ interrupt-names = "xtr", "fdma", "ptp";
interrupts = <GIC_SPI 30 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 25 IRQ_TYPE_LEVEL_HIGH>;
+ <GIC_SPI 25 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 27 IRQ_TYPE_LEVEL_HIGH>;
resets = <&reset 0>;
reset-names = "switch";
};
diff --git a/arch/arm64/boot/dts/xilinx/zynqmp.dtsi b/arch/arm64/boot/dts/xilinx/zynqmp.dtsi
index 74e66443e4ce..9bec3ba20c69 100644
--- a/arch/arm64/boot/dts/xilinx/zynqmp.dtsi
+++ b/arch/arm64/boot/dts/xilinx/zynqmp.dtsi
@@ -512,6 +512,8 @@
#stream-id-cells = <1>;
iommus = <&smmu 0x874>;
power-domains = <&zynqmp_firmware PD_ETH_0>;
+ resets = <&zynqmp_reset ZYNQMP_RESET_GEM0>;
+ reset-names = "gem0_rst";
};
gem1: ethernet@ff0c0000 {
@@ -526,6 +528,8 @@
#stream-id-cells = <1>;
iommus = <&smmu 0x875>;
power-domains = <&zynqmp_firmware PD_ETH_1>;
+ resets = <&zynqmp_reset ZYNQMP_RESET_GEM1>;
+ reset-names = "gem1_rst";
};
gem2: ethernet@ff0d0000 {
@@ -540,6 +544,8 @@
#stream-id-cells = <1>;
iommus = <&smmu 0x876>;
power-domains = <&zynqmp_firmware PD_ETH_2>;
+ resets = <&zynqmp_reset ZYNQMP_RESET_GEM2>;
+ reset-names = "gem2_rst";
};
gem3: ethernet@ff0e0000 {
@@ -554,6 +560,8 @@
#stream-id-cells = <1>;
iommus = <&smmu 0x877>;
power-domains = <&zynqmp_firmware PD_ETH_3>;
+ resets = <&zynqmp_reset ZYNQMP_RESET_GEM3>;
+ reset-names = "gem3_rst";
};
gpio: gpio@ff0a0000 {
diff --git a/arch/arm64/include/asm/debug-monitors.h b/arch/arm64/include/asm/debug-monitors.h
index 657c921fd784..00c291067e57 100644
--- a/arch/arm64/include/asm/debug-monitors.h
+++ b/arch/arm64/include/asm/debug-monitors.h
@@ -34,18 +34,6 @@
*/
#define BREAK_INSTR_SIZE AARCH64_INSN_SIZE
-/*
- * BRK instruction encoding
- * The #imm16 value should be placed at bits[20:5] within BRK ins
- */
-#define AARCH64_BREAK_MON 0xd4200000
-
-/*
- * BRK instruction for provoking a fault on purpose
- * Unlike kgdb, #imm16 value with unallocated handler is used for faulting.
- */
-#define AARCH64_BREAK_FAULT (AARCH64_BREAK_MON | (FAULT_BRK_IMM << 5))
-
#define AARCH64_BREAK_KGDB_DYN_DBG \
(AARCH64_BREAK_MON | (KGDB_DYN_DBG_BRK_IMM << 5))
diff --git a/arch/arm64/include/asm/insn-def.h b/arch/arm64/include/asm/insn-def.h
index 2c075f615c6a..1a7d0d483698 100644
--- a/arch/arm64/include/asm/insn-def.h
+++ b/arch/arm64/include/asm/insn-def.h
@@ -3,7 +3,21 @@
#ifndef __ASM_INSN_DEF_H
#define __ASM_INSN_DEF_H
+#include <asm/brk-imm.h>
+
/* A64 instructions are always 32 bits. */
#define AARCH64_INSN_SIZE 4
+/*
+ * BRK instruction encoding
+ * The #imm16 value should be placed at bits[20:5] within BRK ins
+ */
+#define AARCH64_BREAK_MON 0xd4200000
+
+/*
+ * BRK instruction for provoking a fault on purpose
+ * Unlike kgdb, #imm16 value with unallocated handler is used for faulting.
+ */
+#define AARCH64_BREAK_FAULT (AARCH64_BREAK_MON | (FAULT_BRK_IMM << 5))
+
#endif /* __ASM_INSN_DEF_H */
diff --git a/arch/arm64/include/asm/insn.h b/arch/arm64/include/asm/insn.h
index b02f0c328c8e..1e5760d567ae 100644
--- a/arch/arm64/include/asm/insn.h
+++ b/arch/arm64/include/asm/insn.h
@@ -206,7 +206,9 @@ enum aarch64_insn_ldst_type {
AARCH64_INSN_LDST_LOAD_PAIR_POST_INDEX,
AARCH64_INSN_LDST_STORE_PAIR_POST_INDEX,
AARCH64_INSN_LDST_LOAD_EX,
+ AARCH64_INSN_LDST_LOAD_ACQ_EX,
AARCH64_INSN_LDST_STORE_EX,
+ AARCH64_INSN_LDST_STORE_REL_EX,
};
enum aarch64_insn_adsb_type {
@@ -281,6 +283,36 @@ enum aarch64_insn_adr_type {
AARCH64_INSN_ADR_TYPE_ADR,
};
+enum aarch64_insn_mem_atomic_op {
+ AARCH64_INSN_MEM_ATOMIC_ADD,
+ AARCH64_INSN_MEM_ATOMIC_CLR,
+ AARCH64_INSN_MEM_ATOMIC_EOR,
+ AARCH64_INSN_MEM_ATOMIC_SET,
+ AARCH64_INSN_MEM_ATOMIC_SWP,
+};
+
+enum aarch64_insn_mem_order_type {
+ AARCH64_INSN_MEM_ORDER_NONE,
+ AARCH64_INSN_MEM_ORDER_ACQ,
+ AARCH64_INSN_MEM_ORDER_REL,
+ AARCH64_INSN_MEM_ORDER_ACQREL,
+};
+
+enum aarch64_insn_mb_type {
+ AARCH64_INSN_MB_SY,
+ AARCH64_INSN_MB_ST,
+ AARCH64_INSN_MB_LD,
+ AARCH64_INSN_MB_ISH,
+ AARCH64_INSN_MB_ISHST,
+ AARCH64_INSN_MB_ISHLD,
+ AARCH64_INSN_MB_NSH,
+ AARCH64_INSN_MB_NSHST,
+ AARCH64_INSN_MB_NSHLD,
+ AARCH64_INSN_MB_OSH,
+ AARCH64_INSN_MB_OSHST,
+ AARCH64_INSN_MB_OSHLD,
+};
+
#define __AARCH64_INSN_FUNCS(abbr, mask, val) \
static __always_inline bool aarch64_insn_is_##abbr(u32 code) \
{ \
@@ -304,6 +336,11 @@ __AARCH64_INSN_FUNCS(store_post, 0x3FE00C00, 0x38000400)
__AARCH64_INSN_FUNCS(load_post, 0x3FE00C00, 0x38400400)
__AARCH64_INSN_FUNCS(str_reg, 0x3FE0EC00, 0x38206800)
__AARCH64_INSN_FUNCS(ldadd, 0x3F20FC00, 0x38200000)
+__AARCH64_INSN_FUNCS(ldclr, 0x3F20FC00, 0x38201000)
+__AARCH64_INSN_FUNCS(ldeor, 0x3F20FC00, 0x38202000)
+__AARCH64_INSN_FUNCS(ldset, 0x3F20FC00, 0x38203000)
+__AARCH64_INSN_FUNCS(swp, 0x3F20FC00, 0x38208000)
+__AARCH64_INSN_FUNCS(cas, 0x3FA07C00, 0x08A07C00)
__AARCH64_INSN_FUNCS(ldr_reg, 0x3FE0EC00, 0x38606800)
__AARCH64_INSN_FUNCS(ldr_lit, 0xBF000000, 0x18000000)
__AARCH64_INSN_FUNCS(ldrsw_lit, 0xFF000000, 0x98000000)
@@ -475,13 +512,6 @@ u32 aarch64_insn_gen_load_store_ex(enum aarch64_insn_register reg,
enum aarch64_insn_register state,
enum aarch64_insn_size_type size,
enum aarch64_insn_ldst_type type);
-u32 aarch64_insn_gen_ldadd(enum aarch64_insn_register result,
- enum aarch64_insn_register address,
- enum aarch64_insn_register value,
- enum aarch64_insn_size_type size);
-u32 aarch64_insn_gen_stadd(enum aarch64_insn_register address,
- enum aarch64_insn_register value,
- enum aarch64_insn_size_type size);
u32 aarch64_insn_gen_add_sub_imm(enum aarch64_insn_register dst,
enum aarch64_insn_register src,
int imm, enum aarch64_insn_variant variant,
@@ -542,6 +572,42 @@ u32 aarch64_insn_gen_prefetch(enum aarch64_insn_register base,
enum aarch64_insn_prfm_type type,
enum aarch64_insn_prfm_target target,
enum aarch64_insn_prfm_policy policy);
+#ifdef CONFIG_ARM64_LSE_ATOMICS
+u32 aarch64_insn_gen_atomic_ld_op(enum aarch64_insn_register result,
+ enum aarch64_insn_register address,
+ enum aarch64_insn_register value,
+ enum aarch64_insn_size_type size,
+ enum aarch64_insn_mem_atomic_op op,
+ enum aarch64_insn_mem_order_type order);
+u32 aarch64_insn_gen_cas(enum aarch64_insn_register result,
+ enum aarch64_insn_register address,
+ enum aarch64_insn_register value,
+ enum aarch64_insn_size_type size,
+ enum aarch64_insn_mem_order_type order);
+#else
+static inline
+u32 aarch64_insn_gen_atomic_ld_op(enum aarch64_insn_register result,
+ enum aarch64_insn_register address,
+ enum aarch64_insn_register value,
+ enum aarch64_insn_size_type size,
+ enum aarch64_insn_mem_atomic_op op,
+ enum aarch64_insn_mem_order_type order)
+{
+ return AARCH64_BREAK_FAULT;
+}
+
+static inline
+u32 aarch64_insn_gen_cas(enum aarch64_insn_register result,
+ enum aarch64_insn_register address,
+ enum aarch64_insn_register value,
+ enum aarch64_insn_size_type size,
+ enum aarch64_insn_mem_order_type order)
+{
+ return AARCH64_BREAK_FAULT;
+}
+#endif
+u32 aarch64_insn_gen_dmb(enum aarch64_insn_mb_type type);
+
s32 aarch64_get_branch_offset(u32 insn);
u32 aarch64_set_branch_offset(u32 insn, s32 offset);
diff --git a/arch/arm64/lib/insn.c b/arch/arm64/lib/insn.c
index fccfe363e567..5e90887deec4 100644
--- a/arch/arm64/lib/insn.c
+++ b/arch/arm64/lib/insn.c
@@ -578,10 +578,16 @@ u32 aarch64_insn_gen_load_store_ex(enum aarch64_insn_register reg,
switch (type) {
case AARCH64_INSN_LDST_LOAD_EX:
+ case AARCH64_INSN_LDST_LOAD_ACQ_EX:
insn = aarch64_insn_get_load_ex_value();
+ if (type == AARCH64_INSN_LDST_LOAD_ACQ_EX)
+ insn |= BIT(15);
break;
case AARCH64_INSN_LDST_STORE_EX:
+ case AARCH64_INSN_LDST_STORE_REL_EX:
insn = aarch64_insn_get_store_ex_value();
+ if (type == AARCH64_INSN_LDST_STORE_REL_EX)
+ insn |= BIT(15);
break;
default:
pr_err("%s: unknown load/store exclusive encoding %d\n", __func__, type);
@@ -603,12 +609,65 @@ u32 aarch64_insn_gen_load_store_ex(enum aarch64_insn_register reg,
state);
}
-u32 aarch64_insn_gen_ldadd(enum aarch64_insn_register result,
- enum aarch64_insn_register address,
- enum aarch64_insn_register value,
- enum aarch64_insn_size_type size)
+#ifdef CONFIG_ARM64_LSE_ATOMICS
+static u32 aarch64_insn_encode_ldst_order(enum aarch64_insn_mem_order_type type,
+ u32 insn)
{
- u32 insn = aarch64_insn_get_ldadd_value();
+ u32 order;
+
+ switch (type) {
+ case AARCH64_INSN_MEM_ORDER_NONE:
+ order = 0;
+ break;
+ case AARCH64_INSN_MEM_ORDER_ACQ:
+ order = 2;
+ break;
+ case AARCH64_INSN_MEM_ORDER_REL:
+ order = 1;
+ break;
+ case AARCH64_INSN_MEM_ORDER_ACQREL:
+ order = 3;
+ break;
+ default:
+ pr_err("%s: unknown mem order %d\n", __func__, type);
+ return AARCH64_BREAK_FAULT;
+ }
+
+ insn &= ~GENMASK(23, 22);
+ insn |= order << 22;
+
+ return insn;
+}
+
+u32 aarch64_insn_gen_atomic_ld_op(enum aarch64_insn_register result,
+ enum aarch64_insn_register address,
+ enum aarch64_insn_register value,
+ enum aarch64_insn_size_type size,
+ enum aarch64_insn_mem_atomic_op op,
+ enum aarch64_insn_mem_order_type order)
+{
+ u32 insn;
+
+ switch (op) {
+ case AARCH64_INSN_MEM_ATOMIC_ADD:
+ insn = aarch64_insn_get_ldadd_value();
+ break;
+ case AARCH64_INSN_MEM_ATOMIC_CLR:
+ insn = aarch64_insn_get_ldclr_value();
+ break;
+ case AARCH64_INSN_MEM_ATOMIC_EOR:
+ insn = aarch64_insn_get_ldeor_value();
+ break;
+ case AARCH64_INSN_MEM_ATOMIC_SET:
+ insn = aarch64_insn_get_ldset_value();
+ break;
+ case AARCH64_INSN_MEM_ATOMIC_SWP:
+ insn = aarch64_insn_get_swp_value();
+ break;
+ default:
+ pr_err("%s: unimplemented mem atomic op %d\n", __func__, op);
+ return AARCH64_BREAK_FAULT;
+ }
switch (size) {
case AARCH64_INSN_SIZE_32:
@@ -621,6 +680,8 @@ u32 aarch64_insn_gen_ldadd(enum aarch64_insn_register result,
insn = aarch64_insn_encode_ldst_size(size, insn);
+ insn = aarch64_insn_encode_ldst_order(order, insn);
+
insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn,
result);
@@ -631,17 +692,68 @@ u32 aarch64_insn_gen_ldadd(enum aarch64_insn_register result,
value);
}
-u32 aarch64_insn_gen_stadd(enum aarch64_insn_register address,
- enum aarch64_insn_register value,
- enum aarch64_insn_size_type size)
+static u32 aarch64_insn_encode_cas_order(enum aarch64_insn_mem_order_type type,
+ u32 insn)
{
- /*
- * STADD is simply encoded as an alias for LDADD with XZR as
- * the destination register.
- */
- return aarch64_insn_gen_ldadd(AARCH64_INSN_REG_ZR, address,
- value, size);
+ u32 order;
+
+ switch (type) {
+ case AARCH64_INSN_MEM_ORDER_NONE:
+ order = 0;
+ break;
+ case AARCH64_INSN_MEM_ORDER_ACQ:
+ order = BIT(22);
+ break;
+ case AARCH64_INSN_MEM_ORDER_REL:
+ order = BIT(15);
+ break;
+ case AARCH64_INSN_MEM_ORDER_ACQREL:
+ order = BIT(15) | BIT(22);
+ break;
+ default:
+ pr_err("%s: unknown mem order %d\n", __func__, type);
+ return AARCH64_BREAK_FAULT;
+ }
+
+ insn &= ~(BIT(15) | BIT(22));
+ insn |= order;
+
+ return insn;
+}
+
+u32 aarch64_insn_gen_cas(enum aarch64_insn_register result,
+ enum aarch64_insn_register address,
+ enum aarch64_insn_register value,
+ enum aarch64_insn_size_type size,
+ enum aarch64_insn_mem_order_type order)
+{
+ u32 insn;
+
+ switch (size) {
+ case AARCH64_INSN_SIZE_32:
+ case AARCH64_INSN_SIZE_64:
+ break;
+ default:
+ pr_err("%s: unimplemented size encoding %d\n", __func__, size);
+ return AARCH64_BREAK_FAULT;
+ }
+
+ insn = aarch64_insn_get_cas_value();
+
+ insn = aarch64_insn_encode_ldst_size(size, insn);
+
+ insn = aarch64_insn_encode_cas_order(order, insn);
+
+ insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn,
+ result);
+
+ insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
+ address);
+
+ return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RS, insn,
+ value);
}
+#endif
static u32 aarch64_insn_encode_prfm_imm(enum aarch64_insn_prfm_type type,
enum aarch64_insn_prfm_target target,
@@ -1379,7 +1491,7 @@ static u32 aarch64_encode_immediate(u64 imm,
* Compute the rotation to get a continuous set of
* ones, with the first bit set at position 0
*/
- ror = fls(~imm);
+ ror = fls64(~imm);
}
/*
@@ -1456,3 +1568,48 @@ u32 aarch64_insn_gen_extr(enum aarch64_insn_variant variant,
insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, Rn);
return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn, Rm);
}
+
+u32 aarch64_insn_gen_dmb(enum aarch64_insn_mb_type type)
+{
+ u32 opt;
+ u32 insn;
+
+ switch (type) {
+ case AARCH64_INSN_MB_SY:
+ opt = 0xf;
+ break;
+ case AARCH64_INSN_MB_ST:
+ opt = 0xe;
+ break;
+ case AARCH64_INSN_MB_LD:
+ opt = 0xd;
+ break;
+ case AARCH64_INSN_MB_ISH:
+ opt = 0xb;
+ break;
+ case AARCH64_INSN_MB_ISHST:
+ opt = 0xa;
+ break;
+ case AARCH64_INSN_MB_ISHLD:
+ opt = 0x9;
+ break;
+ case AARCH64_INSN_MB_NSH:
+ opt = 0x7;
+ break;
+ case AARCH64_INSN_MB_NSHST:
+ opt = 0x6;
+ break;
+ case AARCH64_INSN_MB_NSHLD:
+ opt = 0x5;
+ break;
+ default:
+ pr_err("%s: unknown dmb type %d\n", __func__, type);
+ return AARCH64_BREAK_FAULT;
+ }
+
+ insn = aarch64_insn_get_dmb_value();
+ insn &= ~GENMASK(11, 8);
+ insn |= (opt << 8);
+
+ return insn;
+}
diff --git a/arch/arm64/net/bpf_jit.h b/arch/arm64/net/bpf_jit.h
index cc0cf0f5c7c3..dd59b5ad8fe4 100644
--- a/arch/arm64/net/bpf_jit.h
+++ b/arch/arm64/net/bpf_jit.h
@@ -88,10 +88,42 @@
/* [Rn] = Rt; (atomic) Rs = [state] */
#define A64_STXR(sf, Rt, Rn, Rs) \
A64_LSX(sf, Rt, Rn, Rs, STORE_EX)
+/* [Rn] = Rt (store release); (atomic) Rs = [state] */
+#define A64_STLXR(sf, Rt, Rn, Rs) \
+ aarch64_insn_gen_load_store_ex(Rt, Rn, Rs, A64_SIZE(sf), \
+ AARCH64_INSN_LDST_STORE_REL_EX)
+
+/*
+ * LSE atomics
+ *
+ * ST{ADD,CLR,SET,EOR} is simply encoded as an alias for
+ * LDD{ADD,CLR,SET,EOR} with XZR as the destination register.
+ */
+#define A64_ST_OP(sf, Rn, Rs, op) \
+ aarch64_insn_gen_atomic_ld_op(A64_ZR, Rn, Rs, \
+ A64_SIZE(sf), AARCH64_INSN_MEM_ATOMIC_##op, \
+ AARCH64_INSN_MEM_ORDER_NONE)
+/* [Rn] <op>= Rs */
+#define A64_STADD(sf, Rn, Rs) A64_ST_OP(sf, Rn, Rs, ADD)
+#define A64_STCLR(sf, Rn, Rs) A64_ST_OP(sf, Rn, Rs, CLR)
+#define A64_STEOR(sf, Rn, Rs) A64_ST_OP(sf, Rn, Rs, EOR)
+#define A64_STSET(sf, Rn, Rs) A64_ST_OP(sf, Rn, Rs, SET)
-/* LSE atomics */
-#define A64_STADD(sf, Rn, Rs) \
- aarch64_insn_gen_stadd(Rn, Rs, A64_SIZE(sf))
+#define A64_LD_OP_AL(sf, Rt, Rn, Rs, op) \
+ aarch64_insn_gen_atomic_ld_op(Rt, Rn, Rs, \
+ A64_SIZE(sf), AARCH64_INSN_MEM_ATOMIC_##op, \
+ AARCH64_INSN_MEM_ORDER_ACQREL)
+/* Rt = [Rn] (load acquire); [Rn] <op>= Rs (store release) */
+#define A64_LDADDAL(sf, Rt, Rn, Rs) A64_LD_OP_AL(sf, Rt, Rn, Rs, ADD)
+#define A64_LDCLRAL(sf, Rt, Rn, Rs) A64_LD_OP_AL(sf, Rt, Rn, Rs, CLR)
+#define A64_LDEORAL(sf, Rt, Rn, Rs) A64_LD_OP_AL(sf, Rt, Rn, Rs, EOR)
+#define A64_LDSETAL(sf, Rt, Rn, Rs) A64_LD_OP_AL(sf, Rt, Rn, Rs, SET)
+/* Rt = [Rn] (load acquire); [Rn] = Rs (store release) */
+#define A64_SWPAL(sf, Rt, Rn, Rs) A64_LD_OP_AL(sf, Rt, Rn, Rs, SWP)
+/* Rs = CAS(Rn, Rs, Rt) (load acquire & store release) */
+#define A64_CASAL(sf, Rt, Rn, Rs) \
+ aarch64_insn_gen_cas(Rt, Rn, Rs, A64_SIZE(sf), \
+ AARCH64_INSN_MEM_ORDER_ACQREL)
/* Add/subtract (immediate) */
#define A64_ADDSUB_IMM(sf, Rd, Rn, imm12, type) \
@@ -196,6 +228,9 @@
#define A64_ANDS(sf, Rd, Rn, Rm) A64_LOGIC_SREG(sf, Rd, Rn, Rm, AND_SETFLAGS)
/* Rn & Rm; set condition flags */
#define A64_TST(sf, Rn, Rm) A64_ANDS(sf, A64_ZR, Rn, Rm)
+/* Rd = ~Rm (alias of ORN with A64_ZR as Rn) */
+#define A64_MVN(sf, Rd, Rm) \
+ A64_LOGIC_SREG(sf, Rd, A64_ZR, Rm, ORN)
/* Logical (immediate) */
#define A64_LOGIC_IMM(sf, Rd, Rn, imm, type) ({ \
@@ -219,4 +254,7 @@
#define A64_BTI_J A64_HINT(AARCH64_INSN_HINT_BTIJ)
#define A64_BTI_JC A64_HINT(AARCH64_INSN_HINT_BTIJC)
+/* DMB */
+#define A64_DMB_ISH aarch64_insn_gen_dmb(AARCH64_INSN_MB_ISH)
+
#endif /* _BPF_JIT_H */
diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c
index e96d4d87291f..e850c69e128c 100644
--- a/arch/arm64/net/bpf_jit_comp.c
+++ b/arch/arm64/net/bpf_jit_comp.c
@@ -27,6 +27,17 @@
#define TCALL_CNT (MAX_BPF_JIT_REG + 2)
#define TMP_REG_3 (MAX_BPF_JIT_REG + 3)
+#define check_imm(bits, imm) do { \
+ if ((((imm) > 0) && ((imm) >> (bits))) || \
+ (((imm) < 0) && (~(imm) >> (bits)))) { \
+ pr_info("[%2d] imm=%d(0x%x) out of range\n", \
+ i, imm, imm); \
+ return -EINVAL; \
+ } \
+} while (0)
+#define check_imm19(imm) check_imm(19, imm)
+#define check_imm26(imm) check_imm(26, imm)
+
/* Map BPF registers to A64 registers */
static const int bpf2a64[] = {
/* return value from in-kernel function, and exit value from eBPF */
@@ -329,6 +340,170 @@ static int emit_bpf_tail_call(struct jit_ctx *ctx)
#undef jmp_offset
}
+#ifdef CONFIG_ARM64_LSE_ATOMICS
+static int emit_lse_atomic(const struct bpf_insn *insn, struct jit_ctx *ctx)
+{
+ const u8 code = insn->code;
+ const u8 dst = bpf2a64[insn->dst_reg];
+ const u8 src = bpf2a64[insn->src_reg];
+ const u8 tmp = bpf2a64[TMP_REG_1];
+ const u8 tmp2 = bpf2a64[TMP_REG_2];
+ const bool isdw = BPF_SIZE(code) == BPF_DW;
+ const s16 off = insn->off;
+ u8 reg;
+
+ if (!off) {
+ reg = dst;
+ } else {
+ emit_a64_mov_i(1, tmp, off, ctx);
+ emit(A64_ADD(1, tmp, tmp, dst), ctx);
+ reg = tmp;
+ }
+
+ switch (insn->imm) {
+ /* lock *(u32/u64 *)(dst_reg + off) <op>= src_reg */
+ case BPF_ADD:
+ emit(A64_STADD(isdw, reg, src), ctx);
+ break;
+ case BPF_AND:
+ emit(A64_MVN(isdw, tmp2, src), ctx);
+ emit(A64_STCLR(isdw, reg, tmp2), ctx);
+ break;
+ case BPF_OR:
+ emit(A64_STSET(isdw, reg, src), ctx);
+ break;
+ case BPF_XOR:
+ emit(A64_STEOR(isdw, reg, src), ctx);
+ break;
+ /* src_reg = atomic_fetch_<op>(dst_reg + off, src_reg) */
+ case BPF_ADD | BPF_FETCH:
+ emit(A64_LDADDAL(isdw, src, reg, src), ctx);
+ break;
+ case BPF_AND | BPF_FETCH:
+ emit(A64_MVN(isdw, tmp2, src), ctx);
+ emit(A64_LDCLRAL(isdw, src, reg, tmp2), ctx);
+ break;
+ case BPF_OR | BPF_FETCH:
+ emit(A64_LDSETAL(isdw, src, reg, src), ctx);
+ break;
+ case BPF_XOR | BPF_FETCH:
+ emit(A64_LDEORAL(isdw, src, reg, src), ctx);
+ break;
+ /* src_reg = atomic_xchg(dst_reg + off, src_reg); */
+ case BPF_XCHG:
+ emit(A64_SWPAL(isdw, src, reg, src), ctx);
+ break;
+ /* r0 = atomic_cmpxchg(dst_reg + off, r0, src_reg); */
+ case BPF_CMPXCHG:
+ emit(A64_CASAL(isdw, src, reg, bpf2a64[BPF_REG_0]), ctx);
+ break;
+ default:
+ pr_err_once("unknown atomic op code %02x\n", insn->imm);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+#else
+static inline int emit_lse_atomic(const struct bpf_insn *insn, struct jit_ctx *ctx)
+{
+ return -EINVAL;
+}
+#endif
+
+static int emit_ll_sc_atomic(const struct bpf_insn *insn, struct jit_ctx *ctx)
+{
+ const u8 code = insn->code;
+ const u8 dst = bpf2a64[insn->dst_reg];
+ const u8 src = bpf2a64[insn->src_reg];
+ const u8 tmp = bpf2a64[TMP_REG_1];
+ const u8 tmp2 = bpf2a64[TMP_REG_2];
+ const u8 tmp3 = bpf2a64[TMP_REG_3];
+ const int i = insn - ctx->prog->insnsi;
+ const s32 imm = insn->imm;
+ const s16 off = insn->off;
+ const bool isdw = BPF_SIZE(code) == BPF_DW;
+ u8 reg;
+ s32 jmp_offset;
+
+ if (!off) {
+ reg = dst;
+ } else {
+ emit_a64_mov_i(1, tmp, off, ctx);
+ emit(A64_ADD(1, tmp, tmp, dst), ctx);
+ reg = tmp;
+ }
+
+ if (imm == BPF_ADD || imm == BPF_AND ||
+ imm == BPF_OR || imm == BPF_XOR) {
+ /* lock *(u32/u64 *)(dst_reg + off) <op>= src_reg */
+ emit(A64_LDXR(isdw, tmp2, reg), ctx);
+ if (imm == BPF_ADD)
+ emit(A64_ADD(isdw, tmp2, tmp2, src), ctx);
+ else if (imm == BPF_AND)
+ emit(A64_AND(isdw, tmp2, tmp2, src), ctx);
+ else if (imm == BPF_OR)
+ emit(A64_ORR(isdw, tmp2, tmp2, src), ctx);
+ else
+ emit(A64_EOR(isdw, tmp2, tmp2, src), ctx);
+ emit(A64_STXR(isdw, tmp2, reg, tmp3), ctx);
+ jmp_offset = -3;
+ check_imm19(jmp_offset);
+ emit(A64_CBNZ(0, tmp3, jmp_offset), ctx);
+ } else if (imm == (BPF_ADD | BPF_FETCH) ||
+ imm == (BPF_AND | BPF_FETCH) ||
+ imm == (BPF_OR | BPF_FETCH) ||
+ imm == (BPF_XOR | BPF_FETCH)) {
+ /* src_reg = atomic_fetch_<op>(dst_reg + off, src_reg) */
+ const u8 ax = bpf2a64[BPF_REG_AX];
+
+ emit(A64_MOV(isdw, ax, src), ctx);
+ emit(A64_LDXR(isdw, src, reg), ctx);
+ if (imm == (BPF_ADD | BPF_FETCH))
+ emit(A64_ADD(isdw, tmp2, src, ax), ctx);
+ else if (imm == (BPF_AND | BPF_FETCH))
+ emit(A64_AND(isdw, tmp2, src, ax), ctx);
+ else if (imm == (BPF_OR | BPF_FETCH))
+ emit(A64_ORR(isdw, tmp2, src, ax), ctx);
+ else
+ emit(A64_EOR(isdw, tmp2, src, ax), ctx);
+ emit(A64_STLXR(isdw, tmp2, reg, tmp3), ctx);
+ jmp_offset = -3;
+ check_imm19(jmp_offset);
+ emit(A64_CBNZ(0, tmp3, jmp_offset), ctx);
+ emit(A64_DMB_ISH, ctx);
+ } else if (imm == BPF_XCHG) {
+ /* src_reg = atomic_xchg(dst_reg + off, src_reg); */
+ emit(A64_MOV(isdw, tmp2, src), ctx);
+ emit(A64_LDXR(isdw, src, reg), ctx);
+ emit(A64_STLXR(isdw, tmp2, reg, tmp3), ctx);
+ jmp_offset = -2;
+ check_imm19(jmp_offset);
+ emit(A64_CBNZ(0, tmp3, jmp_offset), ctx);
+ emit(A64_DMB_ISH, ctx);
+ } else if (imm == BPF_CMPXCHG) {
+ /* r0 = atomic_cmpxchg(dst_reg + off, r0, src_reg); */
+ const u8 r0 = bpf2a64[BPF_REG_0];
+
+ emit(A64_MOV(isdw, tmp2, r0), ctx);
+ emit(A64_LDXR(isdw, r0, reg), ctx);
+ emit(A64_EOR(isdw, tmp3, r0, tmp2), ctx);
+ jmp_offset = 4;
+ check_imm19(jmp_offset);
+ emit(A64_CBNZ(isdw, tmp3, jmp_offset), ctx);
+ emit(A64_STLXR(isdw, src, reg, tmp3), ctx);
+ jmp_offset = -4;
+ check_imm19(jmp_offset);
+ emit(A64_CBNZ(0, tmp3, jmp_offset), ctx);
+ emit(A64_DMB_ISH, ctx);
+ } else {
+ pr_err_once("unknown atomic op code %02x\n", imm);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static void build_epilogue(struct jit_ctx *ctx)
{
const u8 r0 = bpf2a64[BPF_REG_0];
@@ -434,29 +609,16 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
const u8 src = bpf2a64[insn->src_reg];
const u8 tmp = bpf2a64[TMP_REG_1];
const u8 tmp2 = bpf2a64[TMP_REG_2];
- const u8 tmp3 = bpf2a64[TMP_REG_3];
const s16 off = insn->off;
const s32 imm = insn->imm;
const int i = insn - ctx->prog->insnsi;
const bool is64 = BPF_CLASS(code) == BPF_ALU64 ||
BPF_CLASS(code) == BPF_JMP;
- const bool isdw = BPF_SIZE(code) == BPF_DW;
- u8 jmp_cond, reg;
+ u8 jmp_cond;
s32 jmp_offset;
u32 a64_insn;
int ret;
-#define check_imm(bits, imm) do { \
- if ((((imm) > 0) && ((imm) >> (bits))) || \
- (((imm) < 0) && (~(imm) >> (bits)))) { \
- pr_info("[%2d] imm=%d(0x%x) out of range\n", \
- i, imm, imm); \
- return -EINVAL; \
- } \
-} while (0)
-#define check_imm19(imm) check_imm(19, imm)
-#define check_imm26(imm) check_imm(26, imm)
-
switch (code) {
/* dst = src */
case BPF_ALU | BPF_MOV | BPF_X:
@@ -891,33 +1053,12 @@ emit_cond_jmp:
case BPF_STX | BPF_ATOMIC | BPF_W:
case BPF_STX | BPF_ATOMIC | BPF_DW:
- if (insn->imm != BPF_ADD) {
- pr_err_once("unknown atomic op code %02x\n", insn->imm);
- return -EINVAL;
- }
-
- /* STX XADD: lock *(u32 *)(dst + off) += src
- * and
- * STX XADD: lock *(u64 *)(dst + off) += src
- */
-
- if (!off) {
- reg = dst;
- } else {
- emit_a64_mov_i(1, tmp, off, ctx);
- emit(A64_ADD(1, tmp, tmp, dst), ctx);
- reg = tmp;
- }
- if (cpus_have_cap(ARM64_HAS_LSE_ATOMICS)) {
- emit(A64_STADD(isdw, reg, src), ctx);
- } else {
- emit(A64_LDXR(isdw, tmp2, reg), ctx);
- emit(A64_ADD(isdw, tmp2, tmp2, src), ctx);
- emit(A64_STXR(isdw, tmp2, reg, tmp3), ctx);
- jmp_offset = -3;
- check_imm19(jmp_offset);
- emit(A64_CBNZ(0, tmp3, jmp_offset), ctx);
- }
+ if (cpus_have_cap(ARM64_HAS_LSE_ATOMICS))
+ ret = emit_lse_atomic(insn, ctx);
+ else
+ ret = emit_ll_sc_atomic(insn, ctx);
+ if (ret)
+ return ret;
break;
default:
@@ -1049,15 +1190,18 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
goto out_off;
}
- /* 1. Initial fake pass to compute ctx->idx. */
-
- /* Fake pass to fill in ctx->offset. */
- if (build_body(&ctx, extra_pass)) {
+ /*
+ * 1. Initial fake pass to compute ctx->idx and ctx->offset.
+ *
+ * BPF line info needs ctx->offset[i] to be the offset of
+ * instruction[i] in jited image, so build prologue first.
+ */
+ if (build_prologue(&ctx, was_classic)) {
prog = orig_prog;
goto out_off;
}
- if (build_prologue(&ctx, was_classic)) {
+ if (build_body(&ctx, extra_pass)) {
prog = orig_prog;
goto out_off;
}
@@ -1130,6 +1274,11 @@ skip_init_ctx:
prog->jited_len = prog_size;
if (!prog->is_func || extra_pass) {
+ int i;
+
+ /* offset[prog->len] is the size of program */
+ for (i = 0; i <= prog->len; i++)
+ ctx.offset[i] *= AARCH64_INSN_SIZE;
bpf_prog_fill_jited_linfo(prog, ctx.offset + 1);
out_off:
kfree(ctx.offset);
@@ -1143,6 +1292,11 @@ out:
return prog;
}
+bool bpf_jit_supports_kfunc_call(void)
+{
+ return true;
+}
+
u64 bpf_jit_alloc_exec_limit(void)
{
return VMALLOC_END - VMALLOC_START;