summaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/arm/boot/dts/am33xx.dtsi2
-rw-r--r--arch/arm/boot/dts/am4372.dtsi2
-rw-r--r--arch/arm/boot/dts/armada-xp-matrix.dts4
-rw-r--r--arch/arm/boot/dts/vt8500.dtsi6
-rw-r--r--arch/arm/boot/dts/wm8650.dtsi6
-rw-r--r--arch/arm/boot/dts/wm8850.dtsi6
-rw-r--r--arch/arm/mach-tegra/board-paz00.c4
-rw-r--r--arch/arm/net/bpf_jit_32.c139
-rw-r--r--arch/mips/bcm47xx/sprom.c1
-rw-r--r--arch/powerpc/net/bpf_jit_64.S2
-rw-r--r--arch/powerpc/net/bpf_jit_comp.c157
-rw-r--r--arch/powerpc/sysdev/fsl_soc.c32
-rw-r--r--arch/s390/net/bpf_jit_comp.c163
-rw-r--r--arch/sparc/include/asm/checksum_32.h12
-rw-r--r--arch/sparc/include/asm/checksum_64.h12
-rw-r--r--arch/sparc/net/bpf_jit_comp.c162
-rw-r--r--arch/x86/include/asm/checksum_64.h9
-rw-r--r--arch/x86/net/bpf_jit.S77
-rw-r--r--arch/x86/net/bpf_jit_comp.c1399
19 files changed, 1151 insertions, 1044 deletions
diff --git a/arch/arm/boot/dts/am33xx.dtsi b/arch/arm/boot/dts/am33xx.dtsi
index 9f53e824b03..4a4e02d0ce9 100644
--- a/arch/arm/boot/dts/am33xx.dtsi
+++ b/arch/arm/boot/dts/am33xx.dtsi
@@ -662,6 +662,8 @@
mac: ethernet@4a100000 {
compatible = "ti,cpsw";
ti,hwmods = "cpgmac0";
+ clocks = <&cpsw_125mhz_gclk>, <&cpsw_cpts_rft_clk>;
+ clock-names = "fck", "cpts";
cpdma_channels = <8>;
ale_entries = <1024>;
bd_ram_size = <0x2000>;
diff --git a/arch/arm/boot/dts/am4372.dtsi b/arch/arm/boot/dts/am4372.dtsi
index db464d7eaca..49fa5962225 100644
--- a/arch/arm/boot/dts/am4372.dtsi
+++ b/arch/arm/boot/dts/am4372.dtsi
@@ -490,6 +490,8 @@
#address-cells = <1>;
#size-cells = <1>;
ti,hwmods = "cpgmac0";
+ clocks = <&cpsw_125mhz_gclk>, <&cpsw_cpts_rft_clk>;
+ clock-names = "fck", "cpts";
status = "disabled";
cpdma_channels = <8>;
ale_entries = <1024>;
diff --git a/arch/arm/boot/dts/armada-xp-matrix.dts b/arch/arm/boot/dts/armada-xp-matrix.dts
index 25674fe81f7..7e291e2ef4b 100644
--- a/arch/arm/boot/dts/armada-xp-matrix.dts
+++ b/arch/arm/boot/dts/armada-xp-matrix.dts
@@ -57,6 +57,10 @@
ethernet@30000 {
status = "okay";
phy-mode = "sgmii";
+ fixed-link {
+ speed = <1000>;
+ full-duplex;
+ };
};
pcie-controller {
diff --git a/arch/arm/boot/dts/vt8500.dtsi b/arch/arm/boot/dts/vt8500.dtsi
index 51d0e912c8f..1929ad390d8 100644
--- a/arch/arm/boot/dts/vt8500.dtsi
+++ b/arch/arm/boot/dts/vt8500.dtsi
@@ -165,5 +165,11 @@
reg = <0xd8100000 0x10000>;
interrupts = <48>;
};
+
+ ethernet@d8004000 {
+ compatible = "via,vt8500-rhine";
+ reg = <0xd8004000 0x100>;
+ interrupts = <10>;
+ };
};
};
diff --git a/arch/arm/boot/dts/wm8650.dtsi b/arch/arm/boot/dts/wm8650.dtsi
index 7525982262a..b1c59a766a1 100644
--- a/arch/arm/boot/dts/wm8650.dtsi
+++ b/arch/arm/boot/dts/wm8650.dtsi
@@ -218,5 +218,11 @@
reg = <0xd8100000 0x10000>;
interrupts = <48>;
};
+
+ ethernet@d8004000 {
+ compatible = "via,vt8500-rhine";
+ reg = <0xd8004000 0x100>;
+ interrupts = <10>;
+ };
};
};
diff --git a/arch/arm/boot/dts/wm8850.dtsi b/arch/arm/boot/dts/wm8850.dtsi
index d98386dd288..8fbccfbe75f 100644
--- a/arch/arm/boot/dts/wm8850.dtsi
+++ b/arch/arm/boot/dts/wm8850.dtsi
@@ -298,5 +298,11 @@
bus-width = <4>;
sdon-inverted;
};
+
+ ethernet@d8004000 {
+ compatible = "via,vt8500-rhine";
+ reg = <0xd8004000 0x100>;
+ interrupts = <10>;
+ };
};
};
diff --git a/arch/arm/mach-tegra/board-paz00.c b/arch/arm/mach-tegra/board-paz00.c
index e4dec9fcb08..9c6029ba526 100644
--- a/arch/arm/mach-tegra/board-paz00.c
+++ b/arch/arm/mach-tegra/board-paz00.c
@@ -23,9 +23,7 @@
#include "board.h"
static struct rfkill_gpio_platform_data wifi_rfkill_platform_data = {
- .name = "wifi_rfkill",
- .reset_gpio = 25, /* PD1 */
- .shutdown_gpio = 85, /* PK5 */
+ .name = "wifi_rfkill",
.type = RFKILL_TYPE_WLAN,
};
diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c
index 6f879c319a9..fb5503ce016 100644
--- a/arch/arm/net/bpf_jit_32.c
+++ b/arch/arm/net/bpf_jit_32.c
@@ -136,7 +136,7 @@ static u16 saved_regs(struct jit_ctx *ctx)
u16 ret = 0;
if ((ctx->skf->len > 1) ||
- (ctx->skf->insns[0].code == BPF_S_RET_A))
+ (ctx->skf->insns[0].code == (BPF_RET | BPF_A)))
ret |= 1 << r_A;
#ifdef CONFIG_FRAME_POINTER
@@ -164,18 +164,10 @@ static inline int mem_words_used(struct jit_ctx *ctx)
static inline bool is_load_to_a(u16 inst)
{
switch (inst) {
- case BPF_S_LD_W_LEN:
- case BPF_S_LD_W_ABS:
- case BPF_S_LD_H_ABS:
- case BPF_S_LD_B_ABS:
- case BPF_S_ANC_CPU:
- case BPF_S_ANC_IFINDEX:
- case BPF_S_ANC_MARK:
- case BPF_S_ANC_PROTOCOL:
- case BPF_S_ANC_RXHASH:
- case BPF_S_ANC_VLAN_TAG:
- case BPF_S_ANC_VLAN_TAG_PRESENT:
- case BPF_S_ANC_QUEUE:
+ case BPF_LD | BPF_W | BPF_LEN:
+ case BPF_LD | BPF_W | BPF_ABS:
+ case BPF_LD | BPF_H | BPF_ABS:
+ case BPF_LD | BPF_B | BPF_ABS:
return true;
default:
return false;
@@ -215,7 +207,7 @@ static void build_prologue(struct jit_ctx *ctx)
emit(ARM_MOV_I(r_X, 0), ctx);
/* do not leak kernel data to userspace */
- if ((first_inst != BPF_S_RET_K) && !(is_load_to_a(first_inst)))
+ if ((first_inst != (BPF_RET | BPF_K)) && !(is_load_to_a(first_inst)))
emit(ARM_MOV_I(r_A, 0), ctx);
/* stack space for the BPF_MEM words */
@@ -480,36 +472,39 @@ static int build_body(struct jit_ctx *ctx)
u32 k;
for (i = 0; i < prog->len; i++) {
+ u16 code;
+
inst = &(prog->insns[i]);
/* K as an immediate value operand */
k = inst->k;
+ code = bpf_anc_helper(inst);
/* compute offsets only in the fake pass */
if (ctx->target == NULL)
ctx->offsets[i] = ctx->idx * 4;
- switch (inst->code) {
- case BPF_S_LD_IMM:
+ switch (code) {
+ case BPF_LD | BPF_IMM:
emit_mov_i(r_A, k, ctx);
break;
- case BPF_S_LD_W_LEN:
+ case BPF_LD | BPF_W | BPF_LEN:
ctx->seen |= SEEN_SKB;
BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, len) != 4);
emit(ARM_LDR_I(r_A, r_skb,
offsetof(struct sk_buff, len)), ctx);
break;
- case BPF_S_LD_MEM:
+ case BPF_LD | BPF_MEM:
/* A = scratch[k] */
ctx->seen |= SEEN_MEM_WORD(k);
emit(ARM_LDR_I(r_A, ARM_SP, SCRATCH_OFF(k)), ctx);
break;
- case BPF_S_LD_W_ABS:
+ case BPF_LD | BPF_W | BPF_ABS:
load_order = 2;
goto load;
- case BPF_S_LD_H_ABS:
+ case BPF_LD | BPF_H | BPF_ABS:
load_order = 1;
goto load;
- case BPF_S_LD_B_ABS:
+ case BPF_LD | BPF_B | BPF_ABS:
load_order = 0;
load:
/* the interpreter will deal with the negative K */
@@ -552,31 +547,31 @@ load_common:
emit_err_ret(ARM_COND_NE, ctx);
emit(ARM_MOV_R(r_A, ARM_R0), ctx);
break;
- case BPF_S_LD_W_IND:
+ case BPF_LD | BPF_W | BPF_IND:
load_order = 2;
goto load_ind;
- case BPF_S_LD_H_IND:
+ case BPF_LD | BPF_H | BPF_IND:
load_order = 1;
goto load_ind;
- case BPF_S_LD_B_IND:
+ case BPF_LD | BPF_B | BPF_IND:
load_order = 0;
load_ind:
OP_IMM3(ARM_ADD, r_off, r_X, k, ctx);
goto load_common;
- case BPF_S_LDX_IMM:
+ case BPF_LDX | BPF_IMM:
ctx->seen |= SEEN_X;
emit_mov_i(r_X, k, ctx);
break;
- case BPF_S_LDX_W_LEN:
+ case BPF_LDX | BPF_W | BPF_LEN:
ctx->seen |= SEEN_X | SEEN_SKB;
emit(ARM_LDR_I(r_X, r_skb,
offsetof(struct sk_buff, len)), ctx);
break;
- case BPF_S_LDX_MEM:
+ case BPF_LDX | BPF_MEM:
ctx->seen |= SEEN_X | SEEN_MEM_WORD(k);
emit(ARM_LDR_I(r_X, ARM_SP, SCRATCH_OFF(k)), ctx);
break;
- case BPF_S_LDX_B_MSH:
+ case BPF_LDX | BPF_B | BPF_MSH:
/* x = ((*(frame + k)) & 0xf) << 2; */
ctx->seen |= SEEN_X | SEEN_DATA | SEEN_CALL;
/* the interpreter should deal with the negative K */
@@ -606,113 +601,113 @@ load_ind:
emit(ARM_AND_I(r_X, ARM_R0, 0x00f), ctx);
emit(ARM_LSL_I(r_X, r_X, 2), ctx);
break;
- case BPF_S_ST:
+ case BPF_ST:
ctx->seen |= SEEN_MEM_WORD(k);
emit(ARM_STR_I(r_A, ARM_SP, SCRATCH_OFF(k)), ctx);
break;
- case BPF_S_STX:
+ case BPF_STX:
update_on_xread(ctx);
ctx->seen |= SEEN_MEM_WORD(k);
emit(ARM_STR_I(r_X, ARM_SP, SCRATCH_OFF(k)), ctx);
break;
- case BPF_S_ALU_ADD_K:
+ case BPF_ALU | BPF_ADD | BPF_K:
/* A += K */
OP_IMM3(ARM_ADD, r_A, r_A, k, ctx);
break;
- case BPF_S_ALU_ADD_X:
+ case BPF_ALU | BPF_ADD | BPF_X:
update_on_xread(ctx);
emit(ARM_ADD_R(r_A, r_A, r_X), ctx);
break;
- case BPF_S_ALU_SUB_K:
+ case BPF_ALU | BPF_SUB | BPF_K:
/* A -= K */
OP_IMM3(ARM_SUB, r_A, r_A, k, ctx);
break;
- case BPF_S_ALU_SUB_X:
+ case BPF_ALU | BPF_SUB | BPF_X:
update_on_xread(ctx);
emit(ARM_SUB_R(r_A, r_A, r_X), ctx);
break;
- case BPF_S_ALU_MUL_K:
+ case BPF_ALU | BPF_MUL | BPF_K:
/* A *= K */
emit_mov_i(r_scratch, k, ctx);
emit(ARM_MUL(r_A, r_A, r_scratch), ctx);
break;
- case BPF_S_ALU_MUL_X:
+ case BPF_ALU | BPF_MUL | BPF_X:
update_on_xread(ctx);
emit(ARM_MUL(r_A, r_A, r_X), ctx);
break;
- case BPF_S_ALU_DIV_K:
+ case BPF_ALU | BPF_DIV | BPF_K:
if (k == 1)
break;
emit_mov_i(r_scratch, k, ctx);
emit_udiv(r_A, r_A, r_scratch, ctx);
break;
- case BPF_S_ALU_DIV_X:
+ case BPF_ALU | BPF_DIV | BPF_X:
update_on_xread(ctx);
emit(ARM_CMP_I(r_X, 0), ctx);
emit_err_ret(ARM_COND_EQ, ctx);
emit_udiv(r_A, r_A, r_X, ctx);
break;
- case BPF_S_ALU_OR_K:
+ case BPF_ALU | BPF_OR | BPF_K:
/* A |= K */
OP_IMM3(ARM_ORR, r_A, r_A, k, ctx);
break;
- case BPF_S_ALU_OR_X:
+ case BPF_ALU | BPF_OR | BPF_X:
update_on_xread(ctx);
emit(ARM_ORR_R(r_A, r_A, r_X), ctx);
break;
- case BPF_S_ALU_XOR_K:
+ case BPF_ALU | BPF_XOR | BPF_K:
/* A ^= K; */
OP_IMM3(ARM_EOR, r_A, r_A, k, ctx);
break;
- case BPF_S_ANC_ALU_XOR_X:
- case BPF_S_ALU_XOR_X:
+ case BPF_ANC | SKF_AD_ALU_XOR_X:
+ case BPF_ALU | BPF_XOR | BPF_X:
/* A ^= X */
update_on_xread(ctx);
emit(ARM_EOR_R(r_A, r_A, r_X), ctx);
break;
- case BPF_S_ALU_AND_K:
+ case BPF_ALU | BPF_AND | BPF_K:
/* A &= K */
OP_IMM3(ARM_AND, r_A, r_A, k, ctx);
break;
- case BPF_S_ALU_AND_X:
+ case BPF_ALU | BPF_AND | BPF_X:
update_on_xread(ctx);
emit(ARM_AND_R(r_A, r_A, r_X), ctx);
break;
- case BPF_S_ALU_LSH_K:
+ case BPF_ALU | BPF_LSH | BPF_K:
if (unlikely(k > 31))
return -1;
emit(ARM_LSL_I(r_A, r_A, k), ctx);
break;
- case BPF_S_ALU_LSH_X:
+ case BPF_ALU | BPF_LSH | BPF_X:
update_on_xread(ctx);
emit(ARM_LSL_R(r_A, r_A, r_X), ctx);
break;
- case BPF_S_ALU_RSH_K:
+ case BPF_ALU | BPF_RSH | BPF_K:
if (unlikely(k > 31))
return -1;
emit(ARM_LSR_I(r_A, r_A, k), ctx);
break;
- case BPF_S_ALU_RSH_X:
+ case BPF_ALU | BPF_RSH | BPF_X:
update_on_xread(ctx);
emit(ARM_LSR_R(r_A, r_A, r_X), ctx);
break;
- case BPF_S_ALU_NEG:
+ case BPF_ALU | BPF_NEG:
/* A = -A */
emit(ARM_RSB_I(r_A, r_A, 0), ctx);
break;
- case BPF_S_JMP_JA:
+ case BPF_JMP | BPF_JA:
/* pc += K */
emit(ARM_B(b_imm(i + k + 1, ctx)), ctx);
break;
- case BPF_S_JMP_JEQ_K:
+ case BPF_JMP | BPF_JEQ | BPF_K:
/* pc += (A == K) ? pc->jt : pc->jf */
condt = ARM_COND_EQ;
goto cmp_imm;
- case BPF_S_JMP_JGT_K:
+ case BPF_JMP | BPF_JGT | BPF_K:
/* pc += (A > K) ? pc->jt : pc->jf */
condt = ARM_COND_HI;
goto cmp_imm;
- case BPF_S_JMP_JGE_K:
+ case BPF_JMP | BPF_JGE | BPF_K:
/* pc += (A >= K) ? pc->jt : pc->jf */
condt = ARM_COND_HS;
cmp_imm:
@@ -731,22 +726,22 @@ cond_jump:
_emit(condt ^ 1, ARM_B(b_imm(i + inst->jf + 1,
ctx)), ctx);
break;
- case BPF_S_JMP_JEQ_X:
+ case BPF_JMP | BPF_JEQ | BPF_X:
/* pc += (A == X) ? pc->jt : pc->jf */
condt = ARM_COND_EQ;
goto cmp_x;
- case BPF_S_JMP_JGT_X:
+ case BPF_JMP | BPF_JGT | BPF_X:
/* pc += (A > X) ? pc->jt : pc->jf */
condt = ARM_COND_HI;
goto cmp_x;
- case BPF_S_JMP_JGE_X:
+ case BPF_JMP | BPF_JGE | BPF_X:
/* pc += (A >= X) ? pc->jt : pc->jf */
condt = ARM_COND_CS;
cmp_x:
update_on_xread(ctx);
emit(ARM_CMP_R(r_A, r_X), ctx);
goto cond_jump;
- case BPF_S_JMP_JSET_K:
+ case BPF_JMP | BPF_JSET | BPF_K:
/* pc += (A & K) ? pc->jt : pc->jf */
condt = ARM_COND_NE;
/* not set iff all zeroes iff Z==1 iff EQ */
@@ -759,16 +754,16 @@ cmp_x:
emit(ARM_TST_I(r_A, imm12), ctx);
}
goto cond_jump;
- case BPF_S_JMP_JSET_X:
+ case BPF_JMP | BPF_JSET | BPF_X:
/* pc += (A & X) ? pc->jt : pc->jf */
update_on_xread(ctx);
condt = ARM_COND_NE;
emit(ARM_TST_R(r_A, r_X), ctx);
goto cond_jump;
- case BPF_S_RET_A:
+ case BPF_RET | BPF_A:
emit(ARM_MOV_R(ARM_R0, r_A), ctx);
goto b_epilogue;
- case BPF_S_RET_K:
+ case BPF_RET | BPF_K:
if ((k == 0) && (ctx->ret0_fp_idx < 0))
ctx->ret0_fp_idx = i;
emit_mov_i(ARM_R0, k, ctx);
@@ -776,17 +771,17 @@ b_epilogue:
if (i != ctx->skf->len - 1)
emit(ARM_B(b_imm(prog->len, ctx)), ctx);
break;
- case BPF_S_MISC_TAX:
+ case BPF_MISC | BPF_TAX:
/* X = A */
ctx->seen |= SEEN_X;
emit(ARM_MOV_R(r_X, r_A), ctx);
break;
- case BPF_S_MISC_TXA:
+ case BPF_MISC | BPF_TXA:
/* A = X */
update_on_xread(ctx);
emit(ARM_MOV_R(r_A, r_X), ctx);
break;
- case BPF_S_ANC_PROTOCOL:
+ case BPF_ANC | SKF_AD_PROTOCOL:
/* A = ntohs(skb->protocol) */
ctx->seen |= SEEN_SKB;
BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff,
@@ -795,7 +790,7 @@ b_epilogue:
emit(ARM_LDRH_I(r_scratch, r_skb, off), ctx);
emit_swap16(r_A, r_scratch, ctx);
break;
- case BPF_S_ANC_CPU:
+ case BPF_ANC | SKF_AD_CPU:
/* r_scratch = current_thread_info() */
OP_IMM3(ARM_BIC, r_scratch, ARM_SP, THREAD_SIZE - 1, ctx);
/* A = current_thread_info()->cpu */
@@ -803,7 +798,7 @@ b_epilogue:
off = offsetof(struct thread_info, cpu);
emit(ARM_LDR_I(r_A, r_scratch, off), ctx);
break;
- case BPF_S_ANC_IFINDEX:
+ case BPF_ANC | SKF_AD_IFINDEX:
/* A = skb->dev->ifindex */
ctx->seen |= SEEN_SKB;
off = offsetof(struct sk_buff, dev);
@@ -817,30 +812,30 @@ b_epilogue:
off = offsetof(struct net_device, ifindex);
emit(ARM_LDR_I(r_A, r_scratch, off), ctx);
break;
- case BPF_S_ANC_MARK:
+ case BPF_ANC | SKF_AD_MARK:
ctx->seen |= SEEN_SKB;
BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4);
off = offsetof(struct sk_buff, mark);
emit(ARM_LDR_I(r_A, r_skb, off), ctx);
break;
- case BPF_S_ANC_RXHASH:
+ case BPF_ANC | SKF_AD_RXHASH:
ctx->seen |= SEEN_SKB;
BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, hash) != 4);
off = offsetof(struct sk_buff, hash);
emit(ARM_LDR_I(r_A, r_skb, off), ctx);
break;
- case BPF_S_ANC_VLAN_TAG:
- case BPF_S_ANC_VLAN_TAG_PRESENT:
+ case BPF_ANC | SKF_AD_VLAN_TAG:
+ case BPF_ANC | SKF_AD_VLAN_TAG_PRESENT:
ctx->seen |= SEEN_SKB;
BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_tci) != 2);
off = offsetof(struct sk_buff, vlan_tci);
emit(ARM_LDRH_I(r_A, r_skb, off), ctx);
- if (inst->code == BPF_S_ANC_VLAN_TAG)
+ if (code == (BPF_ANC | SKF_AD_VLAN_TAG))
OP_IMM3(ARM_AND, r_A, r_A, VLAN_VID_MASK, ctx);
else
OP_IMM3(ARM_AND, r_A, r_A, VLAN_TAG_PRESENT, ctx);
break;
- case BPF_S_ANC_QUEUE:
+ case BPF_ANC | SKF_AD_QUEUE:
ctx->seen |= SEEN_SKB;
BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff,
queue_mapping) != 2);
diff --git a/arch/mips/bcm47xx/sprom.c b/arch/mips/bcm47xx/sprom.c
index a8b5408dd34..da4cdb16844 100644
--- a/arch/mips/bcm47xx/sprom.c
+++ b/arch/mips/bcm47xx/sprom.c
@@ -168,6 +168,7 @@ static void nvram_read_alpha2(const char *prefix, const char *name,
static void bcm47xx_fill_sprom_r1234589(struct ssb_sprom *sprom,
const char *prefix, bool fallback)
{
+ nvram_read_u16(prefix, NULL, "devid", &sprom->dev_id, 0, fallback);
nvram_read_u8(prefix, NULL, "ledbh0", &sprom->gpio0, 0xff, fallback);
nvram_read_u8(prefix, NULL, "ledbh1", &sprom->gpio1, 0xff, fallback);
nvram_read_u8(prefix, NULL, "ledbh2", &sprom->gpio2, 0xff, fallback);
diff --git a/arch/powerpc/net/bpf_jit_64.S b/arch/powerpc/net/bpf_jit_64.S
index e76eba74d9d..8f87d921712 100644
--- a/arch/powerpc/net/bpf_jit_64.S
+++ b/arch/powerpc/net/bpf_jit_64.S
@@ -78,7 +78,7 @@ sk_load_byte_positive_offset:
blr
/*
- * BPF_S_LDX_B_MSH: ldxb 4*([offset]&0xf)
+ * BPF_LDX | BPF_B | BPF_MSH: ldxb 4*([offset]&0xf)
* r_addr is the offset value
*/
.globl sk_load_byte_msh
diff --git a/arch/powerpc/net/bpf_jit_comp.c b/arch/powerpc/net/bpf_jit_comp.c
index 808ce1cae21..6dcdadefd8d 100644
--- a/arch/powerpc/net/bpf_jit_comp.c
+++ b/arch/powerpc/net/bpf_jit_comp.c
@@ -79,19 +79,11 @@ static void bpf_jit_build_prologue(struct sk_filter *fp, u32 *image,
}
switch (filter[0].code) {
- case BPF_S_RET_K:
- case BPF_S_LD_W_LEN:
- case BPF_S_ANC_PROTOCOL:
- case BPF_S_ANC_IFINDEX:
- case BPF_S_ANC_MARK:
- case BPF_S_ANC_RXHASH:
- case BPF_S_ANC_VLAN_TAG:
- case BPF_S_ANC_VLAN_TAG_PRESENT:
- case BPF_S_ANC_CPU:
- case BPF_S_ANC_QUEUE:
- case BPF_S_LD_W_ABS:
- case BPF_S_LD_H_ABS:
- case BPF_S_LD_B_ABS:
+ case BPF_RET | BPF_K:
+ case BPF_LD | BPF_W | BPF_LEN:
+ case BPF_LD | BPF_W | BPF_ABS:
+ case BPF_LD | BPF_H | BPF_ABS:
+ case BPF_LD | BPF_B | BPF_ABS:
/* first instruction sets A register (or is RET 'constant') */
break;
default:
@@ -144,6 +136,7 @@ static int bpf_jit_build_body(struct sk_filter *fp, u32 *image,
for (i = 0; i < flen; i++) {
unsigned int K = filter[i].k;
+ u16 code = bpf_anc_helper(&filter[i]);
/*
* addrs[] maps a BPF bytecode address into a real offset from
@@ -151,35 +144,35 @@ static int bpf_jit_build_body(struct sk_filter *fp, u32 *image,
*/
addrs[i] = ctx->idx * 4;
- switch (filter[i].code) {
+ switch (code) {
/*** ALU ops ***/
- case BPF_S_ALU_ADD_X: /* A += X; */
+ case BPF_ALU | BPF_ADD | BPF_X: /* A += X; */
ctx->seen |= SEEN_XREG;
PPC_ADD(r_A, r_A, r_X);
break;
- case BPF_S_ALU_ADD_K: /* A += K; */
+ case BPF_ALU | BPF_ADD | BPF_K: /* A += K; */
if (!K)
break;
PPC_ADDI(r_A, r_A, IMM_L(K));
if (K >= 32768)
PPC_ADDIS(r_A, r_A, IMM_HA(K));
break;
- case BPF_S_ALU_SUB_X: /* A -= X; */
+ case BPF_ALU | BPF_SUB | BPF_X: /* A -= X; */
ctx->seen |= SEEN_XREG;
PPC_SUB(r_A, r_A, r_X);
break;
- case BPF_S_ALU_SUB_K: /* A -= K */
+ case BPF_ALU | BPF_SUB | BPF_K: /* A -= K */
if (!K)
break;
PPC_ADDI(r_A, r_A, IMM_L(-K));
if (K >= 32768)
PPC_ADDIS(r_A, r_A, IMM_HA(-K));
break;
- case BPF_S_ALU_MUL_X: /* A *= X; */
+ case BPF_ALU | BPF_MUL | BPF_X: /* A *= X; */
ctx->seen |= SEEN_XREG;
PPC_MUL(r_A, r_A, r_X);
break;
- case BPF_S_ALU_MUL_K: /* A *= K */
+ case BPF_ALU | BPF_MUL | BPF_K: /* A *= K */
if (K < 32768)
PPC_MULI(r_A, r_A, K);
else {
@@ -187,7 +180,7 @@ static int bpf_jit_build_body(struct sk_filter *fp, u32 *image,
PPC_MUL(r_A, r_A, r_scratch1);
}
break;
- case BPF_S_ALU_MOD_X: /* A %= X; */
+ case BPF_ALU | BPF_MOD | BPF_X: /* A %= X; */
ctx->seen |= SEEN_XREG;
PPC_CMPWI(r_X, 0);
if (ctx->pc_ret0 != -1) {
@@ -201,13 +194,13 @@ static int bpf_jit_build_body(struct sk_filter *fp, u32 *image,
PPC_MUL(r_scratch1, r_X, r_scratch1);
PPC_SUB(r_A, r_A, r_scratch1);
break;
- case BPF_S_ALU_MOD_K: /* A %= K; */
+ case BPF_ALU | BPF_MOD | BPF_K: /* A %= K; */
PPC_LI32(r_scratch2, K);
PPC_DIVWU(r_scratch1, r_A, r_scratch2);
PPC_MUL(r_scratch1, r_scratch2, r_scratch1);
PPC_SUB(r_A, r_A, r_scratch1);
break;
- case BPF_S_ALU_DIV_X: /* A /= X; */
+ case BPF_ALU | BPF_DIV | BPF_X: /* A /= X; */
ctx->seen |= SEEN_XREG;
PPC_CMPWI(r_X, 0);
if (ctx->pc_ret0 != -1) {
@@ -223,17 +216,17 @@ static int bpf_jit_build_body(struct sk_filter *fp, u32 *image,
}
PPC_DIVWU(r_A, r_A, r_X);
break;
- case BPF_S_ALU_DIV_K: /* A /= K */
+ case BPF_ALU | BPF_DIV | BPF_K: /* A /= K */
if (K == 1)
break;
PPC_LI32(r_scratch1, K);
PPC_DIVWU(r_A, r_A, r_scratch1);
break;
- case BPF_S_ALU_AND_X:
+ case BPF_ALU | BPF_AND | BPF_X:
ctx->seen |= SEEN_XREG;
PPC_AND(r_A, r_A, r_X);
break;
- case BPF_S_ALU_AND_K:
+ case BPF_ALU | BPF_AND | BPF_K:
if (!IMM_H(K))
PPC_ANDI(r_A, r_A, K);
else {
@@ -241,51 +234,51 @@ static int bpf_jit_build_body(struct sk_filter *fp, u32 *image,
PPC_AND(r_A, r_A, r_scratch1);
}
break;
- case BPF_S_ALU_OR_X:
+ case BPF_ALU | BPF_OR | BPF_X:
ctx->seen |= SEEN_XREG;
PPC_OR(r_A, r_A, r_X);
break;
- case BPF_S_ALU_OR_K:
+ case BPF_ALU | BPF_OR | BPF_K:
if (IMM_L(K))
PPC_ORI(r_A, r_A, IMM_L(K));
if (K >= 65536)
PPC_ORIS(r_A, r_A, IMM_H(K));
break;
- case BPF_S_ANC_ALU_XOR_X:
- case BPF_S_ALU_XOR_X: /* A ^= X */
+ case BPF_ANC | SKF_AD_ALU_XOR_X:
+ case BPF_ALU | BPF_XOR | BPF_X: /* A ^= X */
ctx->seen |= SEEN_XREG;
PPC_XOR(r_A, r_A, r_X);
break;
- case BPF_S_ALU_XOR_K: /* A ^= K */
+ case BPF_ALU | BPF_XOR | BPF_K: /* A ^= K */
if (IMM_L(K))
PPC_XORI(r_A, r_A, IMM_L(K));
if (K >= 65536)
PPC_XORIS(r_A, r_A, IMM_H(K));
break;
- case BPF_S_ALU_LSH_X: /* A <<= X; */
+ case BPF_ALU | BPF_LSH | BPF_X: /* A <<= X; */
ctx->seen |= SEEN_XREG;
PPC_SLW(r_A, r_A, r_X);
break;
- case BPF_S_ALU_LSH_K:
+ case BPF_ALU | BPF_LSH | BPF_K:
if (K == 0)
break;
else
PPC_SLWI(r_A, r_A, K);
break;
- case BPF_S_ALU_RSH_X: /* A >>= X; */
+ case BPF_ALU | BPF_RSH | BPF_X: /* A >>= X; */
ctx->seen |= SEEN_XREG;
PPC_SRW(r_A, r_A, r_X);
break;
- case BPF_S_ALU_RSH_K: /* A >>= K; */
+ case BPF_ALU | BPF_RSH | BPF_K: /* A >>= K; */
if (K == 0)
break;
else
PPC_SRWI(r_A, r_A, K);
break;
- case BPF_S_ALU_NEG:
+ case BPF_ALU | BPF_NEG:
PPC_NEG(r_A, r_A);
break;
- case BPF_S_RET_K:
+ case BPF_RET | BPF_K:
PPC_LI32(r_ret, K);
if (!K) {
if (ctx->pc_ret0 == -1)
@@ -312,7 +305,7 @@ static int bpf_jit_build_body(struct sk_filter *fp, u32 *image,
PPC_BLR();
}
break;
- case BPF_S_RET_A:
+ case BPF_RET | BPF_A:
PPC_MR(r_ret, r_A);
if (i != flen - 1) {
if (ctx->seen)
@@ -321,53 +314,53 @@ static int bpf_jit_build_body(struct sk_filter *fp, u32 *image,
PPC_BLR();
}
break;
- case BPF_S_MISC_TAX: /* X = A */
+ case BPF_MISC | BPF_TAX: /* X = A */
PPC_MR(r_X, r_A);
break;
- case BPF_S_MISC_TXA: /* A = X */
+ case BPF_MISC | BPF_TXA: /* A = X */
ctx->seen |= SEEN_XREG;
PPC_MR(r_A, r_X);
break;
/*** Constant loads/M[] access ***/
- case BPF_S_LD_IMM: /* A = K */
+ case BPF_LD | BPF_IMM: /* A = K */
PPC_LI32(r_A, K);
break;
- case BPF_S_LDX_IMM: /* X = K */
+ case BPF_LDX | BPF_IMM: /* X = K */
PPC_LI32(r_X, K);
break;
- case BPF_S_LD_MEM: /* A = mem[K] */
+ case BPF_LD | BPF_MEM: /* A = mem[K] */
PPC_MR(r_A, r_M + (K & 0xf));
ctx->seen |= SEEN_MEM | (1<<(K & 0xf));
break;
- case BPF_S_LDX_MEM: /* X = mem[K] */
+ case BPF_LDX | BPF_MEM: /* X = mem[K] */
PPC_MR(r_X, r_M + (K & 0xf));
ctx->seen |= SEEN_MEM | (1<<(K & 0xf));
break;
- case BPF_S_ST: /* mem[K] = A */
+ case BPF_ST: /* mem[K] = A */
PPC_MR(r_M + (K & 0xf), r_A);
ctx->seen |= SEEN_MEM | (1<<(K & 0xf));
break;
- case BPF_S_STX: /* mem[K] = X */
+ case BPF_STX: /* mem[K] = X */
PPC_MR(r_M + (K & 0xf), r_X);
ctx->seen |= SEEN_XREG | SEEN_MEM | (1<<(K & 0xf));
break;
- case BPF_S_LD_W_LEN: /* A = skb->len; */
+ case BPF_LD | BPF_W | BPF_LEN: /* A = skb->len; */
BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, len) != 4);
PPC_LWZ_OFFS(r_A, r_skb, offsetof(struct sk_buff, len));
break;
- case BPF_S_LDX_W_LEN: /* X = skb->len; */
+ case BPF_LDX | BPF_W | BPF_LEN: /* X = skb->len; */
PPC_LWZ_OFFS(r_X, r_skb, offsetof(struct sk_buff, len));
break;
/*** Ancillary info loads ***/
- case BPF_S_ANC_PROTOCOL: /* A = ntohs(skb->protocol); */
+ case BPF_ANC | SKF_AD_PROTOCOL: /* A = ntohs(skb->protocol); */
BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff,
protocol) != 2);
PPC_NTOHS_OFFS(r_A, r_skb, offsetof(struct sk_buff,
protocol));
break;
- case BPF_S_ANC_IFINDEX:
+ case BPF_ANC | SKF_AD_IFINDEX:
PPC_LD_OFFS(r_scratch1, r_skb, offsetof(struct sk_buff,
dev));
PPC_CMPDI(r_scratch1, 0);
@@ -384,33 +377,33 @@ static int bpf_jit_build_body(struct sk_filter *fp, u32 *image,
PPC_LWZ_OFFS(r_A, r_scratch1,
offsetof(struct net_device, ifindex));
break;
- case BPF_S_ANC_MARK:
+ case BPF_ANC | SKF_AD_MARK:
BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4);
PPC_LWZ_OFFS(r_A, r_skb, offsetof(struct sk_buff,
mark));
break;
- case BPF_S_ANC_RXHASH:
+ case BPF_ANC | SKF_AD_RXHASH:
BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, hash) != 4);
PPC_LWZ_OFFS(r_A, r_skb, offsetof(struct sk_buff,
hash));
break;
- case BPF_S_ANC_VLAN_TAG:
- case BPF_S_ANC_VLAN_TAG_PRESENT:
+ case BPF_ANC | SKF_AD_VLAN_TAG:
+ case BPF_ANC | SKF_AD_VLAN_TAG_PRESENT:
BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_tci) != 2);
PPC_LHZ_OFFS(r_A, r_skb, offsetof(struct sk_buff,
vlan_tci));
- if (filter[i].code == BPF_S_ANC_VLAN_TAG)
+ if (code == (BPF_ANC | SKF_AD_VLAN_TAG))
PPC_ANDI(r_A, r_A, VLAN_VID_MASK);
else
PPC_ANDI(r_A, r_A, VLAN_TAG_PRESENT);
break;
- case BPF_S_ANC_QUEUE:
+ case BPF_ANC | SKF_AD_QUEUE:
BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff,
queue_mapping) != 2);
PPC_LHZ_OFFS(r_A, r_skb, offsetof(struct sk_buff,
queue_mapping));
break;
- case BPF_S_ANC_CPU:
+ case BPF_ANC | SKF_AD_CPU:
#ifdef CONFIG_SMP
/*
* PACA ptr is r13:
@@ -426,13 +419,13 @@ static int bpf_jit_build_body(struct sk_filter *fp, u32 *image,
break;
/*** Absolute loads from packet header/data ***/
- case BPF_S_LD_W_ABS:
+ case BPF_LD | BPF_W | BPF_ABS:
func = CHOOSE_LOAD_FUNC(K, sk_load_word);
goto common_load;
- case BPF_S_LD_H_ABS:
+ case BPF_LD | BPF_H | BPF_ABS:
func = CHOOSE_LOAD_FUNC(K, sk_load_half);
goto common_load;
- case BPF_S_LD_B_ABS:
+ case BPF_LD | BPF_B | BPF_ABS:
func = CHOOSE_LOAD_FUNC(K, sk_load_byte);
common_load:
/* Load from [K]. */
@@ -449,13 +442,13 @@ static int bpf_jit_build_body(struct sk_filter *fp, u32 *image,
break;
/*** Indirect loads from packet header/data ***/
- case BPF_S_LD_W_IND:
+ case BPF_LD | BPF_W | BPF_IND:
func = sk_load_word;
goto common_load_ind;
- case BPF_S_LD_H_IND:
+ case BPF_LD | BPF_H | BPF_IND:
func = sk_load_half;
goto common_load_ind;
- case BPF_S_LD_B_IND:
+ case BPF_LD | BPF_B | BPF_IND:
func = sk_load_byte;
common_load_ind:
/*
@@ -473,31 +466,31 @@ static int bpf_jit_build_body(struct sk_filter *fp, u32 *image,
PPC_BCC(COND_LT, exit_addr);
break;
- case BPF_S_LDX_B_MSH:
+ case BPF_LDX | BPF_B | BPF_MSH:
func = CHOOSE_LOAD_FUNC(K, sk_load_byte_msh);
goto common_load;
break;
/*** Jump and branches ***/
- case BPF_S_JMP_JA:
+ case BPF_JMP | BPF_JA:
if (K != 0)
PPC_JMP(addrs[i + 1 + K]);
break;
- case BPF_S_JMP_JGT_K:
- case BPF_S_JMP_JGT_X:
+ case BPF_JMP | BPF_JGT | BPF_K:
+ case BPF_JMP | BPF_JGT | BPF_X:
true_cond = COND_GT;
goto cond_branch;
- case BPF_S_JMP_JGE_K:
- case BPF_S_JMP_JGE_X:
+ case BPF_JMP | BPF_JGE | BPF_K:
+ case BPF_JMP | BPF_JGE | BPF_X:
true_cond = COND_GE;
goto cond_branch;
- case BPF_S_JMP_JEQ_K:
- case BPF_S_JMP_JEQ_X:
+ case BPF_JMP | BPF_JEQ | BPF_K:
+ case BPF_JMP | BPF_JEQ | BPF_X:
true_cond = COND_EQ;
goto cond_branch;
- case BPF_S_JMP_JSET_K:
- case BPF_S_JMP_JSET_X:
+ case BPF_JMP | BPF_JSET | BPF_K:
+ case BPF_JMP | BPF_JSET | BPF_X:
true_cond = COND_NE;
/* Fall through */
cond_branch:
@@ -508,20 +501,20 @@ static int bpf_jit_build_body(struct sk_filter *fp, u32 *image,
break;
}
- switch (filter[i].code) {
- case BPF_S_JMP_JGT_X:
- case BPF_S_JMP_JGE_X:
- case BPF_S_JMP_JEQ_X:
+ switch (code) {
+ case BPF_JMP | BPF_JGT | BPF_X:
+ case BPF_JMP | BPF_JGE | BPF_X:
+ case BPF_JMP | BPF_JEQ | BPF_X:
ctx->seen |= SEEN_XREG;
PPC_CMPLW(r_A, r_X);
break;
- case BPF_S_JMP_JSET_X:
+ case BPF_JMP | BPF_JSET | BPF_X:
ctx->seen |= SEEN_XREG;
PPC_AND_DOT(r_scratch1, r_A, r_X);
break;
- case BPF_S_JMP_JEQ_K:
- case BPF_S_JMP_JGT_K:
- case BPF_S_JMP_JGE_K:
+ case BPF_JMP | BPF_JEQ | BPF_K:
+ case BPF_JMP | BPF_JGT | BPF_K:
+ case BPF_JMP | BPF_JGE | BPF_K:
if (K < 32768)
PPC_CMPLWI(r_A, K);
else {
@@ -529,7 +522,7 @@ static int bpf_jit_build_body(struct sk_filter *fp, u32 *image,
PPC_CMPLW(r_A, r_scratch1);
}
break;
- case BPF_S_JMP_JSET_K:
+ case BPF_JMP | BPF_JSET | BPF_K:
if (K < 32768)
/* PPC_ANDI is /only/ dot-form */
PPC_ANDI(r_scratch1, r_A, K);
diff --git a/arch/powerpc/sysdev/fsl_soc.c b/arch/powerpc/sysdev/fsl_soc.c
index 228cf91b91c..ffd1169ebaa 100644
--- a/arch/powerpc/sysdev/fsl_soc.c
+++ b/arch/powerpc/sysdev/fsl_soc.c
@@ -25,7 +25,6 @@
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/phy.h>
-#include <linux/phy_fixed.h>
#include <linux/spi/spi.h>
#include <linux/fsl_devices.h>
#include <linux/fs_enet_pd.h>
@@ -178,37 +177,6 @@ u32 get_baudrate(void)
EXPORT_SYMBOL(get_baudrate);
#endif /* CONFIG_CPM2 */
-#ifdef CONFIG_FIXED_PHY
-static int __init of_add_fixed_phys(void)
-{
- int ret;
- struct device_node *np;
- u32 *fixed_link;
- struct fixed_phy_status status = {};
-
- for_each_node_by_name(np, "ethernet") {
- fixed_link = (u32 *)of_get_property(np, "fixed-link", NULL);
- if (!fixed_link)
- continue;
-
- status.link = 1;
- status.duplex = fixed_link[1];
- status.speed = fixed_link[2];
- status.pause = fixed_link[3];
- status.asym_pause = fixed_link[4];
-
- ret = fixed_phy_add(PHY_POLL, fixed_link[0], &status);
- if (ret) {
- of_node_put(np);
- return ret;
- }
- }
-
- return 0;
-}
-arch_initcall(of_add_fixed_phys);
-#endif /* CONFIG_FIXED_PHY */
-
#if defined(CONFIG_FSL_SOC_BOOKE) || defined(CONFIG_PPC_86xx)
static __be32 __iomem *rstcr;
diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c
index e9f8fa9337f..a2cbd875543 100644
--- a/arch/s390/net/bpf_jit_comp.c
+++ b/arch/s390/net/bpf_jit_comp.c
@@ -269,27 +269,17 @@ static void bpf_jit_noleaks(struct bpf_jit *jit, struct sock_filter *filter)
EMIT4(0xa7c80000);
/* Clear A if the first register does not set it. */
switch (filter[0].code) {
- case BPF_S_LD_W_ABS:
- case BPF_S_LD_H_ABS:
- case BPF_S_LD_B_ABS:
- case BPF_S_LD_W_LEN:
- case BPF_S_LD_W_IND:
- case BPF_S_LD_H_IND:
- case BPF_S_LD_B_IND:
- case BPF_S_LD_IMM:
- case BPF_S_LD_MEM:
- case BPF_S_MISC_TXA:
- case BPF_S_ANC_PROTOCOL:
- case BPF_S_ANC_PKTTYPE:
- case BPF_S_ANC_IFINDEX:
- case BPF_S_ANC_MARK:
- case BPF_S_ANC_QUEUE:
- case BPF_S_ANC_HATYPE:
- case BPF_S_ANC_RXHASH:
- case BPF_S_ANC_CPU:
- case BPF_S_ANC_VLAN_TAG:
- case BPF_S_ANC_VLAN_TAG_PRESENT:
- case BPF_S_RET_K:
+ case BPF_LD | BPF_W | BPF_ABS:
+ case BPF_LD | BPF_H | BPF_ABS:
+ case BPF_LD | BPF_B | BPF_ABS:
+ case BPF_LD | BPF_W | BPF_LEN:
+ case BPF_LD | BPF_W | BPF_IND:
+ case BPF_LD | BPF_H | BPF_IND:
+ case BPF_LD | BPF_B | BPF_IND:
+ case BPF_LD | BPF_IMM:
+ case BPF_LD | BPF_MEM:
+ case BPF_MISC | BPF_TXA:
+ case BPF_RET | BPF_K:
/* first instruction sets A register */
break;
default: /* A = 0 */
@@ -304,15 +294,18 @@ static int bpf_jit_insn(struct bpf_jit *jit, struct sock_filter *filter,
unsigned int K;
int offset;
unsigned int mask;
+ u16 code;
K = filter->k;
- switch (filter->code) {
- case BPF_S_ALU_ADD_X: /* A += X */
+ code = bpf_anc_helper(filter);
+
+ switch (code) {
+ case BPF_ALU | BPF_ADD | BPF_X: /* A += X */
jit->seen |= SEEN_XREG;
/* ar %r5,%r12 */
EMIT2(0x1a5c);
break;
- case BPF_S_ALU_ADD_K: /* A += K */
+ case BPF_ALU | BPF_ADD | BPF_K: /* A += K */
if (!K)
break;
if (K <= 16383)
@@ -325,12 +318,12 @@ static int bpf_jit_insn(struct bpf_jit *jit, struct sock_filter *filter,
/* a %r5,<d(K)>(%r13) */
EMIT4_DISP(0x5a50d000, EMIT_CONST(K));
break;
- case BPF_S_ALU_SUB_X: /* A -= X */
+ case BPF_ALU | BPF_SUB | BPF_X: /* A -= X */
jit->seen |= SEEN_XREG;
/* sr %r5,%r12 */
EMIT2(0x1b5c);
break;
- case BPF_S_ALU_SUB_K: /* A -= K */
+ case BPF_ALU | BPF_SUB | BPF_K: /* A -= K */
if (!K)
break;
if (K <= 16384)
@@ -343,12 +336,12 @@ static int bpf_jit_insn(struct bpf_jit *jit, struct sock_filter *filter,
/* s %r5,<d(K)>(%r13) */
EMIT4_DISP(0x5b50d000, EMIT_CONST(K));
break;
- case BPF_S_ALU_MUL_X: /* A *= X */
+ case BPF_ALU | BPF_MUL | BPF_X: /* A *= X */
jit->seen |= SEEN_XREG;
/* msr %r5,%r12 */
EMIT4(0xb252005c);
break;
- case BPF_S_ALU_MUL_K: /* A *= K */
+ case BPF_ALU | BPF_MUL | BPF_K: /* A *= K */
if (K <= 16383)
/* mhi %r5,K */
EMIT4_IMM(0xa75c0000, K);
@@ -359,7 +352,7 @@ static int bpf_jit_insn(struct bpf_jit *jit, struct sock_filter *filter,
/* ms %r5,<d(K)>(%r13) */
EMIT4_DISP(0x7150d000, EMIT_CONST(K));
break;
- case BPF_S_ALU_DIV_X: /* A /= X */
+ case BPF_ALU | BPF_DIV | BPF_X: /* A /= X */
jit->seen |= SEEN_XREG | SEEN_RET0;
/* ltr %r12,%r12 */
EMIT2(0x12cc);
@@ -370,7 +363,7 @@ static int bpf_jit_insn(struct bpf_jit *jit, struct sock_filter *filter,
/* dlr %r4,%r12 */
EMIT4(0xb997004c);
break;
- case BPF_S_ALU_DIV_K: /* A /= K */
+ case BPF_ALU | BPF_DIV | BPF_K: /* A /= K */
if (K == 1)
break;
/* lhi %r4,0 */
@@ -378,7 +371,7 @@ static int bpf_jit_insn(struct bpf_jit *jit, struct sock_filter *filter,
/* dl %r4,<d(K)>(%r13) */
EMIT6_DISP(0xe340d000, 0x0097, EMIT_CONST(K));
break;
- case BPF_S_ALU_MOD_X: /* A %= X */
+ case BPF_ALU | BPF_MOD | BPF_X: /* A %= X */
jit->seen |= SEEN_XREG | SEEN_RET0;
/* ltr %r12,%r12 */
EMIT2(0x12cc);
@@ -391,7 +384,7 @@ static int bpf_jit_insn(struct bpf_jit *jit, struct sock_filter *filter,
/* lr %r5,%r4 */
EMIT2(0x1854);
break;
- case BPF_S_ALU_MOD_K: /* A %= K */
+ case BPF_ALU | BPF_MOD | BPF_K: /* A %= K */
if (K == 1) {
/* lhi %r5,0 */
EMIT4(0xa7580000);
@@ -404,12 +397,12 @@ static int bpf_jit_insn(struct bpf_jit *jit, struct sock_filter *filter,
/* lr %r5,%r4 */
EMIT2(0x1854);
break;
- case BPF_S_ALU_AND_X: /* A &= X */
+ case BPF_ALU | BPF_AND | BPF_X: /* A &= X */
jit->seen |= SEEN_XREG;
/* nr %r5,%r12 */
EMIT2(0x145c);
break;
- case BPF_S_ALU_AND_K: /* A &= K */
+ case BPF_ALU | BPF_AND | BPF_K: /* A &= K */
if (test_facility(21))
/* nilf %r5,<K> */
EMIT6_IMM(0xc05b0000, K);
@@ -417,12 +410,12 @@ static int bpf_jit_insn(struct bpf_jit *jit, struct sock_filter *filter,
/* n %r5,<d(K)>(%r13) */
EMIT4_DISP(0x5450d000, EMIT_CONST(K));
break;
- case BPF_S_ALU_OR_X: /* A |= X */
+ case BPF_ALU | BPF_OR | BPF_X: /* A |= X */
jit->seen |= SEEN_XREG;
/* or %r5,%r12 */
EMIT2(0x165c);
break;
- case BPF_S_ALU_OR_K: /* A |= K */
+ case BPF_ALU | BPF_OR | BPF_K: /* A |= K */
if (test_facility(21))
/* oilf %r5,<K> */
EMIT6_IMM(0xc05d0000, K);
@@ -430,55 +423,55 @@ static int bpf_jit_insn(struct bpf_jit *jit, struct sock_filter *filter,
/* o %r5,<d(K)>(%r13) */
EMIT4_DISP(0x5650d000, EMIT_CONST(K));
break;
- case BPF_S_ANC_ALU_XOR_X: /* A ^= X; */
- case BPF_S_ALU_XOR_X:
+ case BPF_ANC | SKF_AD_ALU_XOR_X: /* A ^= X; */
+ case BPF_ALU | BPF_XOR | BPF_X:
jit->seen |= SEEN_XREG;
/* xr %r5,%r12 */
EMIT2(0x175c);
break;
- case BPF_S_ALU_XOR_K: /* A ^= K */
+ case BPF_ALU | BPF_XOR | BPF_K: /* A ^= K */
if (!K)
break;
/* x %r5,<d(K)>(%r13) */
EMIT4_DISP(0x5750d000, EMIT_CONST(K));
break;
- case BPF_S_ALU_LSH_X: /* A <<= X; */
+ case BPF_ALU | BPF_LSH | BPF_X: /* A <<= X; */
jit->seen |= SEEN_XREG;
/* sll %r5,0(%r12) */
EMIT4(0x8950c000);
break;
- case BPF_S_ALU_LSH_K: /* A <<= K */
+ case BPF_ALU | BPF_LSH | BPF_K: /* A <<= K */
if (K == 0)
break;
/* sll %r5,K */
EMIT4_DISP(0x89500000, K);
break;
- case BPF_S_ALU_RSH_X: /* A >>= X; */
+ case BPF_ALU | BPF_RSH | BPF_X: /* A >>= X; */
jit->seen |= SEEN_XREG;
/* srl %r5,0(%r12) */
EMIT4(0x8850c000);
break;
- case BPF_S_ALU_RSH_K: /* A >>= K; */
+ case BPF_ALU | BPF_RSH | BPF_K: /* A >>= K; */
if (K == 0)
break;
/* srl %r5,K */
EMIT4_DISP(0x88500000, K);
break;
- case BPF_S_ALU_NEG: /* A = -A */
+ case BPF_ALU | BPF_NEG: /* A = -A */
/* lnr %r5,%r5 */
EMIT2(0x1155);
break;
- case BPF_S_JMP_JA: /* ip += K */
+ case BPF_JMP | BPF_JA: /* ip += K */
offset = addrs[i + K] + jit->start - jit->prg;
EMIT4_PCREL(0xa7f40000, offset);
break;
- case BPF_S_JMP_JGT_K: /* ip += (A > K) ? jt : jf */
+ case BPF_JMP | BPF_JGT | BPF_K: /* ip += (A > K) ? jt : jf */
mask = 0x200000; /* jh */
goto kbranch;
- case BPF_S_JMP_JGE_K: /* ip += (A >= K) ? jt : jf */
+ case BPF_JMP | BPF_JGE | BPF_K: /* ip += (A >= K) ? jt : jf */
mask = 0xa00000; /* jhe */
goto kbranch;
- case BPF_S_JMP_JEQ_K: /* ip += (A == K) ? jt : jf */
+ case BPF_JMP | BPF_JEQ | BPF_K: /* ip += (A == K) ? jt : jf */
mask = 0x800000; /* je */
kbranch: /* Emit compare if the branch targets are different */
if (filter->jt != filter->jf) {
@@ -511,7 +504,7 @@ branch: if (filter->jt == filter->jf) {
EMIT4_PCREL(0xa7040000 | (mask ^ 0xf00000), offset);
}
break;
- case BPF_S_JMP_JSET_K: /* ip += (A & K) ? jt : jf */
+ case BPF_JMP | BPF_JSET | BPF_K: /* ip += (A & K) ? jt : jf */
mask = 0x700000; /* jnz */
/* Emit test if the branch targets are different */
if (filter->jt != filter->jf) {
@@ -525,13 +518,13 @@ branch: if (filter->jt == filter->jf) {
EMIT4_IMM(0xa7510000, K);
}
goto branch;
- case BPF_S_JMP_JGT_X: /* ip += (A > X) ? jt : jf */
+ case BPF_JMP | BPF_JGT | BPF_X: /* ip += (A > X) ? jt : jf */
mask = 0x200000; /* jh */
goto xbranch;
- case BPF_S_JMP_JGE_X: /* ip += (A >= X) ? jt : jf */
+ case BPF_JMP | BPF_JGE | BPF_X: /* ip += (A >= X) ? jt : jf */
mask = 0xa00000; /* jhe */
goto xbranch;
- case BPF_S_JMP_JEQ_X: /* ip += (A == X) ? jt : jf */
+ case BPF_JMP | BPF_JEQ | BPF_X: /* ip += (A == X) ? jt : jf */
mask = 0x800000; /* je */
xbranch: /* Emit compare if the branch targets are different */
if (filter->jt != filter->jf) {
@@ -540,7 +533,7 @@ xbranch: /* Emit compare if the branch targets are different */
EMIT2(0x195c);
}
goto branch;
- case BPF_S_JMP_JSET_X: /* ip += (A & X) ? jt : jf */
+ case BPF_JMP | BPF_JSET | BPF_X: /* ip += (A & X) ? jt : jf */
mask = 0x700000; /* jnz */
/* Emit test if the branch targets are different */
if (filter->jt != filter->jf) {
@@ -551,15 +544,15 @@ xbranch: /* Emit compare if the branch targets are different */
EMIT2(0x144c);
}
goto branch;
- case BPF_S_LD_W_ABS: /* A = *(u32 *) (skb->data+K) */
+ case BPF_LD | BPF_W | BPF_ABS: /* A = *(u32 *) (skb->data+K) */
jit->seen |= SEEN_DATAREF | SEEN_RET0 | SEEN_LOAD_WORD;
offset = jit->off_load_word;
goto load_abs;
- case BPF_S_LD_H_ABS: /* A = *(u16 *) (skb->data+K) */
+ case BPF_LD | BPF_H | BPF_ABS: /* A = *(u16 *) (skb->data+K) */
jit->seen |= SEEN_DATAREF | SEEN_RET0 | SEEN_LOAD_HALF;
offset = jit->off_load_half;
goto load_abs;
- case BPF_S_LD_B_ABS: /* A = *(u8 *) (skb->data+K) */
+ case BPF_LD | BPF_B | BPF_ABS: /* A = *(u8 *) (skb->data+K) */
jit->seen |= SEEN_DATAREF | SEEN_RET0 | SEEN_LOAD_BYTE;
offset = jit->off_load_byte;
load_abs: if ((int) K < 0)
@@ -573,19 +566,19 @@ call_fn: /* lg %r1,<d(function)>(%r13) */
/* jnz <ret0> */
EMIT4_PCREL(0xa7740000, (jit->ret0_ip - jit->prg));
break;
- case BPF_S_LD_W_IND: /* A = *(u32 *) (skb->data+K+X) */
+ case BPF_LD | BPF_W | BPF_IND: /* A = *(u32 *) (skb->data+K+X) */
jit->seen |= SEEN_DATAREF | SEEN_RET0 | SEEN_LOAD_IWORD;
offset = jit->off_load_iword;
goto call_fn;
- case BPF_S_LD_H_IND: /* A = *(u16 *) (skb->data+K+X) */
+ case BPF_LD | BPF_H | BPF_IND: /* A = *(u16 *) (skb->data+K+X) */
jit->seen |= SEEN_DATAREF | SEEN_RET0 | SEEN_LOAD_IHALF;
offset = jit->off_load_ihalf;
goto call_fn;
- case BPF_S_LD_B_IND: /* A = *(u8 *) (skb->data+K+X) */
+ case BPF_LD | BPF_B | BPF_IND: /* A = *(u8 *) (skb->data+K+X) */
jit->seen |= SEEN_DATAREF | SEEN_RET0 | SEEN_LOAD_IBYTE;
offset = jit->off_load_ibyte;
goto call_fn;
- case BPF_S_LDX_B_MSH:
+ case BPF_LDX | BPF_B | BPF_MSH:
/* X = (*(u8 *)(skb->data+K) & 0xf) << 2 */
jit->seen |= SEEN_RET0;
if ((int) K < 0) {
@@ -596,17 +589,17 @@ call_fn: /* lg %r1,<d(function)>(%r13) */
jit->seen |= SEEN_DATAREF | SEEN_LOAD_BMSH;
offset = jit->off_load_bmsh;
goto call_fn;
- case BPF_S_LD_W_LEN: /* A = skb->len; */
+ case BPF_LD | BPF_W | BPF_LEN: /* A = skb->len; */
BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, len) != 4);
/* l %r5,<d(len)>(%r2) */
EMIT4_DISP(0x58502000, offsetof(struct sk_buff, len));
break;
- case BPF_S_LDX_W_LEN: /* X = skb->len; */
+ case BPF_LDX | BPF_W | BPF_LEN: /* X = skb->len; */
jit->seen |= SEEN_XREG;
/* l %r12,<d(len)>(%r2) */
EMIT4_DISP(0x58c02000, offsetof(struct sk_buff, len));
break;
- case BPF_S_LD_IMM: /* A = K */
+ case BPF_LD | BPF_IMM: /* A = K */
if (K <= 16383)
/* lhi %r5,K */
EMIT4_IMM(0xa7580000, K);
@@ -617,7 +610,7 @@ call_fn: /* lg %r1,<d(function)>(%r13) */
/* l %r5,<d(K)>(%r13) */
EMIT4_DISP(0x5850d000, EMIT_CONST(K));
break;
- case BPF_S_LDX_IMM: /* X = K */
+ case BPF_LDX | BPF_IMM: /* X = K */
jit->seen |= SEEN_XREG;
if (K <= 16383)
/* lhi %r12,<K> */
@@ -629,29 +622,29 @@ call_fn: /* lg %r1,<d(function)>(%r13) */
/* l %r12,<d(K)>(%r13) */
EMIT4_DISP(0x58c0d000, EMIT_CONST(K));
break;
- case BPF_S_LD_MEM: /* A = mem[K] */
+ case BPF_LD | BPF_MEM: /* A = mem[K] */
jit->seen |= SEEN_MEM;
/* l %r5,<K>(%r15) */
EMIT4_DISP(0x5850f000,
(jit->seen & SEEN_DATAREF) ? 160 + K*4 : K*4);
break;
- case BPF_S_LDX_MEM: /* X = mem[K] */
+ case BPF_LDX | BPF_MEM: /* X = mem[K] */
jit->seen |= SEEN_XREG | SEEN_MEM;
/* l %r12,<K>(%r15) */
EMIT4_DISP(0x58c0f000,
(jit->seen & SEEN_DATAREF) ? 160 + K*4 : K*4);
break;
- case BPF_S_MISC_TAX: /* X = A */
+ case BPF_MISC | BPF_TAX: /* X = A */
jit->seen |= SEEN_XREG;
/* lr %r12,%r5 */
EMIT2(0x18c5);
break;
- case BPF_S_MISC_TXA: /* A = X */
+ case BPF_MISC | BPF_TXA: /* A = X */
jit->seen |= SEEN_XREG;
/* lr %r5,%r12 */
EMIT2(0x185c);
break;
- case BPF_S_RET_K:
+ case BPF_RET | BPF_K:
if (K == 0) {
jit->seen |= SEEN_RET0;
if (last)
@@ -671,33 +664,33 @@ call_fn: /* lg %r1,<d(function)>(%r13) */
EMIT4_PCREL(0xa7f40000, jit->exit_ip - jit->prg);
}
break;
- case BPF_S_RET_A:
+ case BPF_RET | BPF_A:
/* llgfr %r2,%r5 */
EMIT4(0xb9160025);
/* j <exit> */
EMIT4_PCREL(0xa7f40000, jit->exit_ip - jit->prg);
break;
- case BPF_S_ST: /* mem[K] = A */
+ case BPF_ST: /* mem[K] = A */
jit->seen |= SEEN_MEM;
/* st %r5,<K>(%r15) */
EMIT4_DISP(0x5050f000,
(jit->seen & SEEN_DATAREF) ? 160 + K*4 : K*4);
break;
- case BPF_S_STX: /* mem[K] = X : mov %ebx,off8(%rbp) */
+ case BPF_STX: /* mem[K] = X : mov %ebx,off8(%rbp) */
jit->seen |= SEEN_XREG | SEEN_MEM;
/* st %r12,<K>(%r15) */
EMIT4_DISP(0x50c0f000,
(jit->seen & SEEN_DATAREF) ? 160 + K*4 : K*4);
break;
- case BPF_S_ANC_PROTOCOL: /* A = ntohs(skb->protocol); */
+ case BPF_ANC | SKF_AD_PROTOCOL: /* A = ntohs(skb->protocol); */
BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, protocol) != 2);
/* lhi %r5,0 */
EMIT4(0xa7580000);
/* icm %r5,3,<d(protocol)>(%r2) */
EMIT4_DISP(0xbf532000, offsetof(struct sk_buff, protocol));
break;
- case BPF_S_ANC_IFINDEX: /* if (!skb->dev) return 0;
- * A = skb->dev->ifindex */
+ case BPF_ANC | SKF_AD_IFINDEX: /* if (!skb->dev) return 0;
+ * A = skb->dev->ifindex */
BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, ifindex) != 4);
jit->seen |= SEEN_RET0;
/* lg %r1,<d(dev)>(%r2) */
@@ -709,20 +702,20 @@ call_fn: /* lg %r1,<d(function)>(%r13) */
/* l %r5,<d(ifindex)>(%r1) */
EMIT4_DISP(0x58501000, offsetof(struct net_device, ifindex));
break;
- case BPF_S_ANC_MARK: /* A = skb->mark */
+ case BPF_ANC | SKF_AD_MARK: /* A = skb->mark */
BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4);
/* l %r5,<d(mark)>(%r2) */
EMIT4_DISP(0x58502000, offsetof(struct sk_buff, mark));
break;
- case BPF_S_ANC_QUEUE: /* A = skb->queue_mapping */
+ case BPF_ANC | SKF_AD_QUEUE: /* A = skb->queue_mapping */
BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, queue_mapping) != 2);
/* lhi %r5,0 */
EMIT4(0xa7580000);
/* icm %r5,3,<d(queue_mapping)>(%r2) */
EMIT4_DISP(0xbf532000, offsetof(struct sk_buff, queue_mapping));
break;
- case BPF_S_ANC_HATYPE: /* if (!skb->dev) return 0;
- * A = skb->dev->type */
+ case BPF_ANC | SKF_AD_HATYPE: /* if (!skb->dev) return 0;
+ * A = skb->dev->type */
BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, type) != 2);
jit->seen |= SEEN_RET0;
/* lg %r1,<d(dev)>(%r2) */
@@ -736,20 +729,20 @@ call_fn: /* lg %r1,<d(function)>(%r13) */
/* icm %r5,3,<d(type)>(%r1) */
EMIT4_DISP(0xbf531000, offsetof(struct net_device, type));
break;
- case BPF_S_ANC_RXHASH: /* A = skb->hash */
+ case BPF_ANC | SKF_AD_RXHASH: /* A = skb->hash */
BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, hash) != 4);
/* l %r5,<d(hash)>(%r2) */
EMIT4_DISP(0x58502000, offsetof(struct sk_buff, hash));
break;
- case BPF_S_ANC_VLAN_TAG:
- case BPF_S_ANC_VLAN_TAG_PRESENT:
+ case BPF_ANC | SKF_AD_VLAN_TAG:
+ case BPF_ANC | SKF_AD_VLAN_TAG_PRESENT:
BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_tci) != 2);
BUILD_BUG_ON(VLAN_TAG_PRESENT != 0x1000);
/* lhi %r5,0 */
EMIT4(0xa7580000);
/* icm %r5,3,<d(vlan_tci)>(%r2) */
EMIT4_DISP(0xbf532000, offsetof(struct sk_buff, vlan_tci));
- if (filter->code == BPF_S_ANC_VLAN_TAG) {
+ if (code == (BPF_ANC | SKF_AD_VLAN_TAG)) {
/* nill %r5,0xefff */
EMIT4_IMM(0xa5570000, ~VLAN_TAG_PRESENT);
} else {
@@ -759,7 +752,7 @@ call_fn: /* lg %r1,<d(function)>(%r13) */
EMIT4_DISP(0x88500000, 12);
}
break;
- case BPF_S_ANC_PKTTYPE:
+ case BPF_ANC | SKF_AD_PKTTYPE:
if (pkt_type_offset < 0)
goto out;
/* lhi %r5,0 */
@@ -769,7 +762,7 @@ call_fn: /* lg %r1,<d(function)>(%r13) */
/* srl %r5,5 */
EMIT4_DISP(0x88500000, 5);
break;
- case BPF_S_ANC_CPU: /* A = smp_processor_id() */
+ case BPF_ANC | SKF_AD_CPU: /* A = smp_processor_id() */
#ifdef CONFIG_SMP
/* l %r5,<d(cpu_nr)> */
EMIT4_DISP(0x58500000, offsetof(struct _lowcore, cpu_nr));
diff --git a/arch/sparc/include/asm/checksum_32.h b/arch/sparc/include/asm/checksum_32.h
index bdbda1453aa..04471dc6484 100644
--- a/arch/sparc/include/asm/checksum_32.h
+++ b/arch/sparc/include/asm/checksum_32.h
@@ -238,4 +238,16 @@ static inline __sum16 ip_compute_csum(const void *buff, int len)
return csum_fold(csum_partial(buff, len, 0));
}
+#define HAVE_ARCH_CSUM_ADD
+static inline __wsum csum_add(__wsum csum, __wsum addend)
+{
+ __asm__ __volatile__(
+ "addcc %0, %1, %0\n"
+ "addx %0, %%g0, %0"
+ : "=r" (csum)
+ : "r" (addend), "0" (csum));
+
+ return csum;
+}
+
#endif /* !(__SPARC_CHECKSUM_H) */
diff --git a/arch/sparc/include/asm/checksum_64.h b/arch/sparc/include/asm/checksum_64.h
index 019b9615e43..2ff81ae8f3a 100644
--- a/arch/sparc/include/asm/checksum_64.h
+++ b/arch/sparc/include/asm/checksum_64.h
@@ -164,4 +164,16 @@ static inline __sum16 ip_compute_csum(const void *buff, int len)
return csum_fold(csum_partial(buff, len, 0));
}
+#define HAVE_ARCH_CSUM_ADD
+static inline __wsum csum_add(__wsum csum, __wsum addend)
+{
+ __asm__ __volatile__(
+ "addcc %0, %1, %0\n"
+ "addx %0, %%g0, %0"
+ : "=r" (csum)
+ : "r" (addend), "0" (csum));
+
+ return csum;
+}
+
#endif /* !(__SPARC64_CHECKSUM_H) */
diff --git a/arch/sparc/net/bpf_jit_comp.c b/arch/sparc/net/bpf_jit_comp.c
index a82c6b2a978..892a102671a 100644
--- a/arch/sparc/net/bpf_jit_comp.c
+++ b/arch/sparc/net/bpf_jit_comp.c
@@ -83,9 +83,9 @@ static void bpf_flush_icache(void *start_, void *end_)
#define BNE (F2(0, 2) | CONDNE)
#ifdef CONFIG_SPARC64
-#define BNE_PTR (F2(0, 1) | CONDNE | (2 << 20))
+#define BE_PTR (F2(0, 1) | CONDE | (2 << 20))
#else
-#define BNE_PTR BNE
+#define BE_PTR BE
#endif
#define SETHI(K, REG) \
@@ -415,20 +415,11 @@ void bpf_jit_compile(struct sk_filter *fp)
emit_reg_move(O7, r_saved_O7);
switch (filter[0].code) {
- case BPF_S_RET_K:
- case BPF_S_LD_W_LEN:
- case BPF_S_ANC_PROTOCOL:
- case BPF_S_ANC_PKTTYPE:
- case BPF_S_ANC_IFINDEX:
- case BPF_S_ANC_MARK:
- case BPF_S_ANC_RXHASH:
- case BPF_S_ANC_VLAN_TAG:
- case BPF_S_ANC_VLAN_TAG_PRESENT:
- case BPF_S_ANC_CPU:
- case BPF_S_ANC_QUEUE:
- case BPF_S_LD_W_ABS:
- case BPF_S_LD_H_ABS:
- case BPF_S_LD_B_ABS:
+ case BPF_RET | BPF_K:
+ case BPF_LD | BPF_W | BPF_LEN:
+ case BPF_LD | BPF_W | BPF_ABS:
+ case BPF_LD | BPF_H | BPF_ABS:
+ case BPF_LD | BPF_B | BPF_ABS:
/* The first instruction sets the A register (or is
* a "RET 'constant'")
*/
@@ -445,59 +436,60 @@ void bpf_jit_compile(struct sk_filter *fp)
unsigned int t_offset;
unsigned int f_offset;
u32 t_op, f_op;
+ u16 code = bpf_anc_helper(&filter[i]);
int ilen;
- switch (filter[i].code) {
- case BPF_S_ALU_ADD_X: /* A += X; */
+ switch (code) {
+ case BPF_ALU | BPF_ADD | BPF_X: /* A += X; */
emit_alu_X(ADD);
break;
- case BPF_S_ALU_ADD_K: /* A += K; */
+ case BPF_ALU | BPF_ADD | BPF_K: /* A += K; */
emit_alu_K(ADD, K);
break;
- case BPF_S_ALU_SUB_X: /* A -= X; */
+ case BPF_ALU | BPF_SUB | BPF_X: /* A -= X; */
emit_alu_X(SUB);
break;
- case BPF_S_ALU_SUB_K: /* A -= K */
+ case BPF_ALU | BPF_SUB | BPF_K: /* A -= K */
emit_alu_K(SUB, K);
break;
- case BPF_S_ALU_AND_X: /* A &= X */
+ case BPF_ALU | BPF_AND | BPF_X: /* A &= X */
emit_alu_X(AND);
break;
- case BPF_S_ALU_AND_K: /* A &= K */
+ case BPF_ALU | BPF_AND | BPF_K: /* A &= K */
emit_alu_K(AND, K);
break;
- case BPF_S_ALU_OR_X: /* A |= X */
+ case BPF_ALU | BPF_OR | BPF_X: /* A |= X */
emit_alu_X(OR);
break;
- case BPF_S_ALU_OR_K: /* A |= K */
+ case BPF_ALU | BPF_OR | BPF_K: /* A |= K */
emit_alu_K(OR, K);
break;
- case BPF_S_ANC_ALU_XOR_X: /* A ^= X; */
- case BPF_S_ALU_XOR_X:
+ case BPF_ANC | SKF_AD_ALU_XOR_X: /* A ^= X; */
+ case BPF_ALU | BPF_XOR | BPF_X:
emit_alu_X(XOR);
break;
- case BPF_S_ALU_XOR_K: /* A ^= K */
+ case BPF_ALU | BPF_XOR | BPF_K: /* A ^= K */
emit_alu_K(XOR, K);
break;
- case BPF_S_ALU_LSH_X: /* A <<= X */
+ case BPF_ALU | BPF_LSH | BPF_X: /* A <<= X */
emit_alu_X(SLL);
break;
- case BPF_S_ALU_LSH_K: /* A <<= K */
+ case BPF_ALU | BPF_LSH | BPF_K: /* A <<= K */
emit_alu_K(SLL, K);
break;
- case BPF_S_ALU_RSH_X: /* A >>= X */
+ case BPF_ALU | BPF_RSH | BPF_X: /* A >>= X */
emit_alu_X(SRL);
break;
- case BPF_S_ALU_RSH_K: /* A >>= K */
+ case BPF_ALU | BPF_RSH | BPF_K: /* A >>= K */
emit_alu_K(SRL, K);
break;
- case BPF_S_ALU_MUL_X: /* A *= X; */
+ case BPF_ALU | BPF_MUL | BPF_X: /* A *= X; */
emit_alu_X(MUL);
break;
- case BPF_S_ALU_MUL_K: /* A *= K */
+ case BPF_ALU | BPF_MUL | BPF_K: /* A *= K */
emit_alu_K(MUL, K);
break;
- case BPF_S_ALU_DIV_K: /* A /= K with K != 0*/
+ case BPF_ALU | BPF_DIV | BPF_K: /* A /= K with K != 0*/
if (K == 1)
break;
emit_write_y(G0);
@@ -512,7 +504,7 @@ void bpf_jit_compile(struct sk_filter *fp)
#endif
emit_alu_K(DIV, K);
break;
- case BPF_S_ALU_DIV_X: /* A /= X; */
+ case BPF_ALU | BPF_DIV | BPF_X: /* A /= X; */
emit_cmpi(r_X, 0);
if (pc_ret0 > 0) {
t_offset = addrs[pc_ret0 - 1];
@@ -544,10 +536,10 @@ void bpf_jit_compile(struct sk_filter *fp)
#endif
emit_alu_X(DIV);
break;
- case BPF_S_ALU_NEG:
+ case BPF_ALU | BPF_NEG:
emit_neg();
break;
- case BPF_S_RET_K:
+ case BPF_RET | BPF_K:
if (!K) {
if (pc_ret0 == -1)
pc_ret0 = i;
@@ -556,7 +548,7 @@ void bpf_jit_compile(struct sk_filter *fp)
emit_loadimm(K, r_A);
}
/* Fallthrough */
- case BPF_S_RET_A:
+ case BPF_RET | BPF_A:
if (seen_or_pass0) {
if (i != flen - 1) {
emit_jump(cleanup_addr);
@@ -573,18 +565,18 @@ void bpf_jit_compile(struct sk_filter *fp)
emit_jmpl(r_saved_O7, 8, G0);
emit_reg_move(r_A, O0); /* delay slot */
break;
- case BPF_S_MISC_TAX:
+ case BPF_MISC | BPF_TAX:
seen |= SEEN_XREG;
emit_reg_move(r_A, r_X);
break;
- case BPF_S_MISC_TXA:
+ case BPF_MISC | BPF_TXA:
seen |= SEEN_XREG;
emit_reg_move(r_X, r_A);
break;
- case BPF_S_ANC_CPU:
+ case BPF_ANC | SKF_AD_CPU:
emit_load_cpu(r_A);
break;
- case BPF_S_ANC_PROTOCOL:
+ case BPF_ANC | SKF_AD_PROTOCOL:
emit_skb_load16(protocol, r_A);
break;
#if 0
@@ -592,38 +584,38 @@ void bpf_jit_compile(struct sk_filter *fp)
* a bit field even though we very much
* know what we are doing here.
*/
- case BPF_S_ANC_PKTTYPE:
+ case BPF_ANC | SKF_AD_PKTTYPE:
__emit_skb_load8(pkt_type, r_A);
emit_alu_K(SRL, 5);
break;
#endif
- case BPF_S_ANC_IFINDEX:
+ case BPF_ANC | SKF_AD_IFINDEX:
emit_skb_loadptr(dev, r_A);
emit_cmpi(r_A, 0);
- emit_branch(BNE_PTR, cleanup_addr + 4);
+ emit_branch(BE_PTR, cleanup_addr + 4);
emit_nop();
emit_load32(r_A, struct net_device, ifindex, r_A);
break;
- case BPF_S_ANC_MARK:
+ case BPF_ANC | SKF_AD_MARK:
emit_skb_load32(mark, r_A);
break;
- case BPF_S_ANC_QUEUE:
+ case BPF_ANC | SKF_AD_QUEUE:
emit_skb_load16(queue_mapping, r_A);
break;
- case BPF_S_ANC_HATYPE:
+ case BPF_ANC | SKF_AD_HATYPE:
emit_skb_loadptr(dev, r_A);
emit_cmpi(r_A, 0);
- emit_branch(BNE_PTR, cleanup_addr + 4);
+ emit_branch(BE_PTR, cleanup_addr + 4);
emit_nop();
emit_load16(r_A, struct net_device, type, r_A);
break;
- case BPF_S_ANC_RXHASH:
+ case BPF_ANC | SKF_AD_RXHASH:
emit_skb_load32(hash, r_A);
break;
- case BPF_S_ANC_VLAN_TAG:
- case BPF_S_ANC_VLAN_TAG_PRESENT:
+ case BPF_ANC | SKF_AD_VLAN_TAG:
+ case BPF_ANC | SKF_AD_VLAN_TAG_PRESENT:
emit_skb_load16(vlan_tci, r_A);
- if (filter[i].code == BPF_S_ANC_VLAN_TAG) {
+ if (code == (BPF_ANC | SKF_AD_VLAN_TAG)) {
emit_andi(r_A, VLAN_VID_MASK, r_A);
} else {
emit_loadimm(VLAN_TAG_PRESENT, r_TMP);
@@ -631,44 +623,44 @@ void bpf_jit_compile(struct sk_filter *fp)
}
break;
- case BPF_S_LD_IMM:
+ case BPF_LD | BPF_IMM:
emit_loadimm(K, r_A);
break;
- case BPF_S_LDX_IMM:
+ case BPF_LDX | BPF_IMM:
emit_loadimm(K, r_X);
break;
- case BPF_S_LD_MEM:
+ case BPF_LD | BPF_MEM:
emit_ldmem(K * 4, r_A);
break;
- case BPF_S_LDX_MEM:
+ case BPF_LDX | BPF_MEM:
emit_ldmem(K * 4, r_X);
break;
- case BPF_S_ST:
+ case BPF_ST:
emit_stmem(K * 4, r_A);
break;
- case BPF_S_STX:
+ case BPF_STX:
emit_stmem(K * 4, r_X);
break;
#define CHOOSE_LOAD_FUNC(K, func) \
((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative_offset : func) : func##_positive_offset)
- case BPF_S_LD_W_ABS:
+ case BPF_LD | BPF_W | BPF_ABS:
func = CHOOSE_LOAD_FUNC(K, bpf_jit_load_word);
common_load: seen |= SEEN_DATAREF;
emit_loadimm(K, r_OFF);
emit_call(func);
break;
- case BPF_S_LD_H_ABS:
+ case BPF_LD | BPF_H | BPF_ABS:
func = CHOOSE_LOAD_FUNC(K, bpf_jit_load_half);
goto common_load;
- case BPF_S_LD_B_ABS:
+ case BPF_LD | BPF_B | BPF_ABS:
func = CHOOSE_LOAD_FUNC(K, bpf_jit_load_byte);
goto common_load;
- case BPF_S_LDX_B_MSH:
+ case BPF_LDX | BPF_B | BPF_MSH:
func = CHOOSE_LOAD_FUNC(K, bpf_jit_load_byte_msh);
goto common_load;
- case BPF_S_LD_W_IND:
+ case BPF_LD | BPF_W | BPF_IND:
func = bpf_jit_load_word;
common_load_ind: seen |= SEEN_DATAREF | SEEN_XREG;
if (K) {
@@ -683,13 +675,13 @@ common_load_ind: seen |= SEEN_DATAREF | SEEN_XREG;
}
emit_call(func);
break;
- case BPF_S_LD_H_IND:
+ case BPF_LD | BPF_H | BPF_IND:
func = bpf_jit_load_half;
goto common_load_ind;
- case BPF_S_LD_B_IND:
+ case BPF_LD | BPF_B | BPF_IND:
func = bpf_jit_load_byte;
goto common_load_ind;
- case BPF_S_JMP_JA:
+ case BPF_JMP | BPF_JA:
emit_jump(addrs[i + K]);
emit_nop();
break;
@@ -700,14 +692,14 @@ common_load_ind: seen |= SEEN_DATAREF | SEEN_XREG;
f_op = FOP; \
goto cond_branch
- COND_SEL(BPF_S_JMP_JGT_K, BGU, BLEU);
- COND_SEL(BPF_S_JMP_JGE_K, BGEU, BLU);
- COND_SEL(BPF_S_JMP_JEQ_K, BE, BNE);
- COND_SEL(BPF_S_JMP_JSET_K, BNE, BE);
- COND_SEL(BPF_S_JMP_JGT_X, BGU, BLEU);
- COND_SEL(BPF_S_JMP_JGE_X, BGEU, BLU);
- COND_SEL(BPF_S_JMP_JEQ_X, BE, BNE);
- COND_SEL(BPF_S_JMP_JSET_X, BNE, BE);
+ COND_SEL(BPF_JMP | BPF_JGT | BPF_K, BGU, BLEU);
+ COND_SEL(BPF_JMP | BPF_JGE | BPF_K, BGEU, BLU);
+ COND_SEL(BPF_JMP | BPF_JEQ | BPF_K, BE, BNE);
+ COND_SEL(BPF_JMP | BPF_JSET | BPF_K, BNE, BE);
+ COND_SEL(BPF_JMP | BPF_JGT | BPF_X, BGU, BLEU);
+ COND_SEL(BPF_JMP | BPF_JGE | BPF_X, BGEU, BLU);
+ COND_SEL(BPF_JMP | BPF_JEQ | BPF_X, BE, BNE);
+ COND_SEL(BPF_JMP | BPF_JSET | BPF_X, BNE, BE);
cond_branch: f_offset = addrs[i + filter[i].jf];
t_offset = addrs[i + filter[i].jt];
@@ -719,20 +711,20 @@ cond_branch: f_offset = addrs[i + filter[i].jf];
break;
}
- switch (filter[i].code) {
- case BPF_S_JMP_JGT_X:
- case BPF_S_JMP_JGE_X:
- case BPF_S_JMP_JEQ_X:
+ switch (code) {
+ case BPF_JMP | BPF_JGT | BPF_X:
+ case BPF_JMP | BPF_JGE | BPF_X:
+ case BPF_JMP | BPF_JEQ | BPF_X:
seen |= SEEN_XREG;
emit_cmp(r_A, r_X);
break;
- case BPF_S_JMP_JSET_X:
+ case BPF_JMP | BPF_JSET | BPF_X:
seen |= SEEN_XREG;
emit_btst(r_A, r_X);
break;
- case BPF_S_JMP_JEQ_K:
- case BPF_S_JMP_JGT_K:
- case BPF_S_JMP_JGE_K:
+ case BPF_JMP | BPF_JEQ | BPF_K:
+ case BPF_JMP | BPF_JGT | BPF_K:
+ case BPF_JMP | BPF_JGE | BPF_K:
if (is_simm13(K)) {
emit_cmpi(r_A, K);
} else {
@@ -740,7 +732,7 @@ cond_branch: f_offset = addrs[i + filter[i].jf];
emit_cmp(r_A, r_TMP);
}
break;
- case BPF_S_JMP_JSET_K:
+ case BPF_JMP | BPF_JSET | BPF_K:
if (is_simm13(K)) {
emit_btsti(r_A, K);
} else {
diff --git a/arch/x86/include/asm/checksum_64.h b/arch/x86/include/asm/checksum_64.h
index e6fd8a026c7..cd00e177449 100644
--- a/arch/x86/include/asm/checksum_64.h
+++ b/arch/x86/include/asm/checksum_64.h
@@ -184,8 +184,15 @@ static inline unsigned add32_with_carry(unsigned a, unsigned b)
asm("addl %2,%0\n\t"
"adcl $0,%0"
: "=r" (a)
- : "0" (a), "r" (b));
+ : "0" (a), "rm" (b));
return a;
}
+#define HAVE_ARCH_CSUM_ADD
+static inline __wsum csum_add(__wsum csum, __wsum addend)
+{
+ return (__force __wsum)add32_with_carry((__force unsigned)csum,
+ (__force unsigned)addend);
+}
+
#endif /* _ASM_X86_CHECKSUM_64_H */
diff --git a/arch/x86/net/bpf_jit.S b/arch/x86/net/bpf_jit.S
index 01495755701..6440221ced0 100644
--- a/arch/x86/net/bpf_jit.S
+++ b/arch/x86/net/bpf_jit.S
@@ -12,13 +12,16 @@
/*
* Calling convention :
- * rdi : skb pointer
+ * rbx : skb pointer (callee saved)
* esi : offset of byte(s) to fetch in skb (can be scratched)
- * r8 : copy of skb->data
+ * r10 : copy of skb->data
* r9d : hlen = skb->len - skb->data_len
*/
-#define SKBDATA %r8
+#define SKBDATA %r10
#define SKF_MAX_NEG_OFF $(-0x200000) /* SKF_LL_OFF from filter.h */
+#define MAX_BPF_STACK (512 /* from filter.h */ + \
+ 32 /* space for rbx,r13,r14,r15 */ + \
+ 8 /* space for skb_copy_bits */)
sk_load_word:
.globl sk_load_word
@@ -68,53 +71,31 @@ sk_load_byte_positive_offset:
movzbl (SKBDATA,%rsi),%eax
ret
-/**
- * sk_load_byte_msh - BPF_S_LDX_B_MSH helper
- *
- * Implements BPF_S_LDX_B_MSH : ldxb 4*([offset]&0xf)
- * Must preserve A accumulator (%eax)
- * Inputs : %esi is the offset value
- */
-sk_load_byte_msh:
- .globl sk_load_byte_msh
- test %esi,%esi
- js bpf_slow_path_byte_msh_neg
-
-sk_load_byte_msh_positive_offset:
- .globl sk_load_byte_msh_positive_offset
- cmp %esi,%r9d /* if (offset >= hlen) goto bpf_slow_path_byte_msh */
- jle bpf_slow_path_byte_msh
- movzbl (SKBDATA,%rsi),%ebx
- and $15,%bl
- shl $2,%bl
- ret
-
/* rsi contains offset and can be scratched */
#define bpf_slow_path_common(LEN) \
- push %rdi; /* save skb */ \
+ mov %rbx, %rdi; /* arg1 == skb */ \
push %r9; \
push SKBDATA; \
/* rsi already has offset */ \
mov $LEN,%ecx; /* len */ \
- lea -12(%rbp),%rdx; \
+ lea - MAX_BPF_STACK + 32(%rbp),%rdx; \
call skb_copy_bits; \
test %eax,%eax; \
pop SKBDATA; \
- pop %r9; \
- pop %rdi
+ pop %r9;
bpf_slow_path_word:
bpf_slow_path_common(4)
js bpf_error
- mov -12(%rbp),%eax
+ mov - MAX_BPF_STACK + 32(%rbp),%eax
bswap %eax
ret
bpf_slow_path_half:
bpf_slow_path_common(2)
js bpf_error
- mov -12(%rbp),%ax
+ mov - MAX_BPF_STACK + 32(%rbp),%ax
rol $8,%ax
movzwl %ax,%eax
ret
@@ -122,21 +103,11 @@ bpf_slow_path_half:
bpf_slow_path_byte:
bpf_slow_path_common(1)
js bpf_error
- movzbl -12(%rbp),%eax
- ret
-
-bpf_slow_path_byte_msh:
- xchg %eax,%ebx /* dont lose A , X is about to be scratched */
- bpf_slow_path_common(1)
- js bpf_error
- movzbl -12(%rbp),%eax
- and $15,%al
- shl $2,%al
- xchg %eax,%ebx
+ movzbl - MAX_BPF_STACK + 32(%rbp),%eax
ret
#define sk_negative_common(SIZE) \
- push %rdi; /* save skb */ \
+ mov %rbx, %rdi; /* arg1 == skb */ \
push %r9; \
push SKBDATA; \
/* rsi already has offset */ \
@@ -145,10 +116,8 @@ bpf_slow_path_byte_msh:
test %rax,%rax; \
pop SKBDATA; \
pop %r9; \
- pop %rdi; \
jz bpf_error
-
bpf_slow_path_word_neg:
cmp SKF_MAX_NEG_OFF, %esi /* test range */
jl bpf_error /* offset lower -> error */
@@ -179,22 +148,12 @@ sk_load_byte_negative_offset:
movzbl (%rax), %eax
ret
-bpf_slow_path_byte_msh_neg:
- cmp SKF_MAX_NEG_OFF, %esi
- jl bpf_error
-sk_load_byte_msh_negative_offset:
- .globl sk_load_byte_msh_negative_offset
- xchg %eax,%ebx /* dont lose A , X is about to be scratched */
- sk_negative_common(1)
- movzbl (%rax),%eax
- and $15,%al
- shl $2,%al
- xchg %eax,%ebx
- ret
-
bpf_error:
# force a return 0 from jit handler
- xor %eax,%eax
- mov -8(%rbp),%rbx
+ xor %eax,%eax
+ mov - MAX_BPF_STACK(%rbp),%rbx
+ mov - MAX_BPF_STACK + 8(%rbp),%r13
+ mov - MAX_BPF_STACK + 16(%rbp),%r14
+ mov - MAX_BPF_STACK + 24(%rbp),%r15
leaveq
ret
diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
index 6d5663a599a..99bef86ed6d 100644
--- a/arch/x86/net/bpf_jit_comp.c
+++ b/arch/x86/net/bpf_jit_comp.c
@@ -1,6 +1,7 @@
/* bpf_jit_comp.c : BPF JIT compiler
*
* Copyright (C) 2011-2013 Eric Dumazet (eric.dumazet@gmail.com)
+ * Internal BPF Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
@@ -14,28 +15,16 @@
#include <linux/if_vlan.h>
#include <linux/random.h>
-/*
- * Conventions :
- * EAX : BPF A accumulator
- * EBX : BPF X accumulator
- * RDI : pointer to skb (first argument given to JIT function)
- * RBP : frame pointer (even if CONFIG_FRAME_POINTER=n)
- * ECX,EDX,ESI : scratch registers
- * r9d : skb->len - skb->data_len (headlen)
- * r8 : skb->data
- * -8(RBP) : saved RBX value
- * -16(RBP)..-80(RBP) : BPF_MEMWORDS values
- */
int bpf_jit_enable __read_mostly;
/*
* assembly code in arch/x86/net/bpf_jit.S
*/
-extern u8 sk_load_word[], sk_load_half[], sk_load_byte[], sk_load_byte_msh[];
+extern u8 sk_load_word[], sk_load_half[], sk_load_byte[];
extern u8 sk_load_word_positive_offset[], sk_load_half_positive_offset[];
-extern u8 sk_load_byte_positive_offset[], sk_load_byte_msh_positive_offset[];
+extern u8 sk_load_byte_positive_offset[];
extern u8 sk_load_word_negative_offset[], sk_load_half_negative_offset[];
-extern u8 sk_load_byte_negative_offset[], sk_load_byte_msh_negative_offset[];
+extern u8 sk_load_byte_negative_offset[];
static inline u8 *emit_code(u8 *ptr, u32 bytes, unsigned int len)
{
@@ -56,30 +45,44 @@ static inline u8 *emit_code(u8 *ptr, u32 bytes, unsigned int len)
#define EMIT2(b1, b2) EMIT((b1) + ((b2) << 8), 2)
#define EMIT3(b1, b2, b3) EMIT((b1) + ((b2) << 8) + ((b3) << 16), 3)
#define EMIT4(b1, b2, b3, b4) EMIT((b1) + ((b2) << 8) + ((b3) << 16) + ((b4) << 24), 4)
-#define EMIT1_off32(b1, off) do { EMIT1(b1); EMIT(off, 4);} while (0)
-
-#define CLEAR_A() EMIT2(0x31, 0xc0) /* xor %eax,%eax */
-#define CLEAR_X() EMIT2(0x31, 0xdb) /* xor %ebx,%ebx */
+#define EMIT1_off32(b1, off) \
+ do {EMIT1(b1); EMIT(off, 4); } while (0)
+#define EMIT2_off32(b1, b2, off) \
+ do {EMIT2(b1, b2); EMIT(off, 4); } while (0)
+#define EMIT3_off32(b1, b2, b3, off) \
+ do {EMIT3(b1, b2, b3); EMIT(off, 4); } while (0)
+#define EMIT4_off32(b1, b2, b3, b4, off) \
+ do {EMIT4(b1, b2, b3, b4); EMIT(off, 4); } while (0)
static inline bool is_imm8(int value)
{
return value <= 127 && value >= -128;
}
-static inline bool is_near(int offset)
+static inline bool is_simm32(s64 value)
{
- return offset <= 127 && offset >= -128;
+ return value == (s64) (s32) value;
}
-#define EMIT_JMP(offset) \
-do { \
- if (offset) { \
- if (is_near(offset)) \
- EMIT2(0xeb, offset); /* jmp .+off8 */ \
- else \
- EMIT1_off32(0xe9, offset); /* jmp .+off32 */ \
- } \
-} while (0)
+/* mov dst, src */
+#define EMIT_mov(DST, SRC) \
+ do {if (DST != SRC) \
+ EMIT3(add_2mod(0x48, DST, SRC), 0x89, add_2reg(0xC0, DST, SRC)); \
+ } while (0)
+
+static int bpf_size_to_x86_bytes(int bpf_size)
+{
+ if (bpf_size == BPF_W)
+ return 4;
+ else if (bpf_size == BPF_H)
+ return 2;
+ else if (bpf_size == BPF_B)
+ return 1;
+ else if (bpf_size == BPF_DW)
+ return 4; /* imm32 */
+ else
+ return 0;
+}
/* list of x86 cond jumps opcodes (. + s8)
* Add 0x10 (and an extra 0x0f) to generate far jumps (. + s32)
@@ -90,27 +93,8 @@ do { \
#define X86_JNE 0x75
#define X86_JBE 0x76
#define X86_JA 0x77
-
-#define EMIT_COND_JMP(op, offset) \
-do { \
- if (is_near(offset)) \
- EMIT2(op, offset); /* jxx .+off8 */ \
- else { \
- EMIT2(0x0f, op + 0x10); \
- EMIT(offset, 4); /* jxx .+off32 */ \
- } \
-} while (0)
-
-#define COND_SEL(CODE, TOP, FOP) \
- case CODE: \
- t_op = TOP; \
- f_op = FOP; \
- goto cond_branch
-
-
-#define SEEN_DATAREF 1 /* might call external helpers */
-#define SEEN_XREG 2 /* ebx is used */
-#define SEEN_MEM 4 /* use mem[] for temporary storage */
+#define X86_JGE 0x7D
+#define X86_JG 0x7F
static inline void bpf_flush_icache(void *start, void *end)
{
@@ -125,26 +109,6 @@ static inline void bpf_flush_icache(void *start, void *end)
#define CHOOSE_LOAD_FUNC(K, func) \
((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative_offset : func) : func##_positive_offset)
-/* Helper to find the offset of pkt_type in sk_buff
- * We want to make sure its still a 3bit field starting at a byte boundary.
- */
-#define PKT_TYPE_MAX 7
-static int pkt_type_offset(void)
-{
- struct sk_buff skb_probe = {
- .pkt_type = ~0,
- };
- char *ct = (char *)&skb_probe;
- unsigned int off;
-
- for (off = 0; off < sizeof(struct sk_buff); off++) {
- if (ct[off] == PKT_TYPE_MAX)
- return off;
- }
- pr_err_once("Please fix pkt_type_offset(), as pkt_type couldn't be found\n");
- return -1;
-}
-
struct bpf_binary_header {
unsigned int pages;
/* Note : for security reasons, bpf code will follow a randomly
@@ -178,583 +142,771 @@ static struct bpf_binary_header *bpf_alloc_binary(unsigned int proglen,
return header;
}
-void bpf_jit_compile(struct sk_filter *fp)
+/* pick a register outside of BPF range for JIT internal work */
+#define AUX_REG (MAX_BPF_REG + 1)
+
+/* the following table maps BPF registers to x64 registers.
+ * x64 register r12 is unused, since if used as base address register
+ * in load/store instructions, it always needs an extra byte of encoding
+ */
+static const int reg2hex[] = {
+ [BPF_REG_0] = 0, /* rax */
+ [BPF_REG_1] = 7, /* rdi */
+ [BPF_REG_2] = 6, /* rsi */
+ [BPF_REG_3] = 2, /* rdx */
+ [BPF_REG_4] = 1, /* rcx */
+ [BPF_REG_5] = 0, /* r8 */
+ [BPF_REG_6] = 3, /* rbx callee saved */
+ [BPF_REG_7] = 5, /* r13 callee saved */
+ [BPF_REG_8] = 6, /* r14 callee saved */
+ [BPF_REG_9] = 7, /* r15 callee saved */
+ [BPF_REG_FP] = 5, /* rbp readonly */
+ [AUX_REG] = 3, /* r11 temp register */
+};
+
+/* is_ereg() == true if BPF register 'reg' maps to x64 r8..r15
+ * which need extra byte of encoding.
+ * rax,rcx,...,rbp have simpler encoding
+ */
+static inline bool is_ereg(u32 reg)
{
- u8 temp[64];
- u8 *prog;
- unsigned int proglen, oldproglen = 0;
- int ilen, i;
- int t_offset, f_offset;
- u8 t_op, f_op, seen = 0, pass;
- u8 *image = NULL;
- struct bpf_binary_header *header = NULL;
- u8 *func;
- int pc_ret0 = -1; /* bpf index of first RET #0 instruction (if any) */
- unsigned int cleanup_addr; /* epilogue code offset */
- unsigned int *addrs;
- const struct sock_filter *filter = fp->insns;
- int flen = fp->len;
+ if (reg == BPF_REG_5 || reg == AUX_REG ||
+ (reg >= BPF_REG_7 && reg <= BPF_REG_9))
+ return true;
+ else
+ return false;
+}
- if (!bpf_jit_enable)
- return;
+/* add modifiers if 'reg' maps to x64 registers r8..r15 */
+static inline u8 add_1mod(u8 byte, u32 reg)
+{
+ if (is_ereg(reg))
+ byte |= 1;
+ return byte;
+}
- addrs = kmalloc(flen * sizeof(*addrs), GFP_KERNEL);
- if (addrs == NULL)
- return;
+static inline u8 add_2mod(u8 byte, u32 r1, u32 r2)
+{
+ if (is_ereg(r1))
+ byte |= 1;
+ if (is_ereg(r2))
+ byte |= 4;
+ return byte;
+}
- /* Before first pass, make a rough estimation of addrs[]
- * each bpf instruction is translated to less than 64 bytes
+/* encode 'dst_reg' register into x64 opcode 'byte' */
+static inline u8 add_1reg(u8 byte, u32 dst_reg)
+{
+ return byte + reg2hex[dst_reg];
+}
+
+/* encode 'dst_reg' and 'src_reg' registers into x64 opcode 'byte' */
+static inline u8 add_2reg(u8 byte, u32 dst_reg, u32 src_reg)
+{
+ return byte + reg2hex[dst_reg] + (reg2hex[src_reg] << 3);
+}
+
+struct jit_context {
+ unsigned int cleanup_addr; /* epilogue code offset */
+ bool seen_ld_abs;
+};
+
+static int do_jit(struct sk_filter *bpf_prog, int *addrs, u8 *image,
+ int oldproglen, struct jit_context *ctx)
+{
+ struct sock_filter_int *insn = bpf_prog->insnsi;
+ int insn_cnt = bpf_prog->len;
+ u8 temp[64];
+ int i;
+ int proglen = 0;
+ u8 *prog = temp;
+ int stacksize = MAX_BPF_STACK +
+ 32 /* space for rbx, r13, r14, r15 */ +
+ 8 /* space for skb_copy_bits() buffer */;
+
+ EMIT1(0x55); /* push rbp */
+ EMIT3(0x48, 0x89, 0xE5); /* mov rbp,rsp */
+
+ /* sub rsp, stacksize */
+ EMIT3_off32(0x48, 0x81, 0xEC, stacksize);
+
+ /* all classic BPF filters use R6(rbx) save it */
+
+ /* mov qword ptr [rbp-X],rbx */
+ EMIT3_off32(0x48, 0x89, 0x9D, -stacksize);
+
+ /* sk_convert_filter() maps classic BPF register X to R7 and uses R8
+ * as temporary, so all tcpdump filters need to spill/fill R7(r13) and
+ * R8(r14). R9(r15) spill could be made conditional, but there is only
+ * one 'bpf_error' return path out of helper functions inside bpf_jit.S
+ * The overhead of extra spill is negligible for any filter other
+ * than synthetic ones. Therefore not worth adding complexity.
*/
- for (proglen = 0, i = 0; i < flen; i++) {
- proglen += 64;
- addrs[i] = proglen;
+
+ /* mov qword ptr [rbp-X],r13 */
+ EMIT3_off32(0x4C, 0x89, 0xAD, -stacksize + 8);
+ /* mov qword ptr [rbp-X],r14 */
+ EMIT3_off32(0x4C, 0x89, 0xB5, -stacksize + 16);
+ /* mov qword ptr [rbp-X],r15 */
+ EMIT3_off32(0x4C, 0x89, 0xBD, -stacksize + 24);
+
+ /* clear A and X registers */
+ EMIT2(0x31, 0xc0); /* xor eax, eax */
+ EMIT3(0x4D, 0x31, 0xED); /* xor r13, r13 */
+
+ if (ctx->seen_ld_abs) {
+ /* r9d : skb->len - skb->data_len (headlen)
+ * r10 : skb->data
+ */
+ if (is_imm8(offsetof(struct sk_buff, len)))
+ /* mov %r9d, off8(%rdi) */
+ EMIT4(0x44, 0x8b, 0x4f,
+ offsetof(struct sk_buff, len));
+ else
+ /* mov %r9d, off32(%rdi) */
+ EMIT3_off32(0x44, 0x8b, 0x8f,
+ offsetof(struct sk_buff, len));
+
+ if (is_imm8(offsetof(struct sk_buff, data_len)))
+ /* sub %r9d, off8(%rdi) */
+ EMIT4(0x44, 0x2b, 0x4f,
+ offsetof(struct sk_buff, data_len));
+ else
+ EMIT3_off32(0x44, 0x2b, 0x8f,
+ offsetof(struct sk_buff, data_len));
+
+ if (is_imm8(offsetof(struct sk_buff, data)))
+ /* mov %r10, off8(%rdi) */
+ EMIT4(0x4c, 0x8b, 0x57,
+ offsetof(struct sk_buff, data));
+ else
+ /* mov %r10, off32(%rdi) */
+ EMIT3_off32(0x4c, 0x8b, 0x97,
+ offsetof(struct sk_buff, data));
}
- cleanup_addr = proglen; /* epilogue address */
- for (pass = 0; pass < 10; pass++) {
- u8 seen_or_pass0 = (pass == 0) ? (SEEN_XREG | SEEN_DATAREF | SEEN_MEM) : seen;
- /* no prologue/epilogue for trivial filters (RET something) */
- proglen = 0;
- prog = temp;
+ for (i = 0; i < insn_cnt; i++, insn++) {
+ const s32 imm32 = insn->imm;
+ u32 dst_reg = insn->dst_reg;
+ u32 src_reg = insn->src_reg;
+ u8 b1 = 0, b2 = 0, b3 = 0;
+ s64 jmp_offset;
+ u8 jmp_cond;
+ int ilen;
+ u8 *func;
+
+ switch (insn->code) {
+ /* ALU */
+ case BPF_ALU | BPF_ADD | BPF_X:
+ case BPF_ALU | BPF_SUB | BPF_X:
+ case BPF_ALU | BPF_AND | BPF_X:
+ case BPF_ALU | BPF_OR | BPF_X:
+ case BPF_ALU | BPF_XOR | BPF_X:
+ case BPF_ALU64 | BPF_ADD | BPF_X:
+ case BPF_ALU64 | BPF_SUB | BPF_X:
+ case BPF_ALU64 | BPF_AND | BPF_X:
+ case BPF_ALU64 | BPF_OR | BPF_X:
+ case BPF_ALU64 | BPF_XOR | BPF_X:
+ switch (BPF_OP(insn->code)) {
+ case BPF_ADD: b2 = 0x01; break;
+ case BPF_SUB: b2 = 0x29; break;
+ case BPF_AND: b2 = 0x21; break;
+ case BPF_OR: b2 = 0x09; break;
+ case BPF_XOR: b2 = 0x31; break;
+ }
+ if (BPF_CLASS(insn->code) == BPF_ALU64)
+ EMIT1(add_2mod(0x48, dst_reg, src_reg));
+ else if (is_ereg(dst_reg) || is_ereg(src_reg))
+ EMIT1(add_2mod(0x40, dst_reg, src_reg));
+ EMIT2(b2, add_2reg(0xC0, dst_reg, src_reg));
+ break;
- if (seen_or_pass0) {
- EMIT4(0x55, 0x48, 0x89, 0xe5); /* push %rbp; mov %rsp,%rbp */
- EMIT4(0x48, 0x83, 0xec, 96); /* subq $96,%rsp */
- /* note : must save %rbx in case bpf_error is hit */
- if (seen_or_pass0 & (SEEN_XREG | SEEN_DATAREF))
- EMIT4(0x48, 0x89, 0x5d, 0xf8); /* mov %rbx, -8(%rbp) */
- if (seen_or_pass0 & SEEN_XREG)
- CLEAR_X(); /* make sure we dont leek kernel memory */
-
- /*
- * If this filter needs to access skb data,
- * loads r9 and r8 with :
- * r9 = skb->len - skb->data_len
- * r8 = skb->data
+ /* mov dst, src */
+ case BPF_ALU64 | BPF_MOV | BPF_X:
+ EMIT_mov(dst_reg, src_reg);
+ break;
+
+ /* mov32 dst, src */
+ case BPF_ALU | BPF_MOV | BPF_X:
+ if (is_ereg(dst_reg) || is_ereg(src_reg))
+ EMIT1(add_2mod(0x40, dst_reg, src_reg));
+ EMIT2(0x89, add_2reg(0xC0, dst_reg, src_reg));
+ break;
+
+ /* neg dst */
+ case BPF_ALU | BPF_NEG:
+ case BPF_ALU64 | BPF_NEG:
+ if (BPF_CLASS(insn->code) == BPF_ALU64)
+ EMIT1(add_1mod(0x48, dst_reg));
+ else if (is_ereg(dst_reg))
+ EMIT1(add_1mod(0x40, dst_reg));
+ EMIT2(0xF7, add_1reg(0xD8, dst_reg));
+ break;
+
+ case BPF_ALU | BPF_ADD | BPF_K:
+ case BPF_ALU | BPF_SUB | BPF_K:
+ case BPF_ALU | BPF_AND | BPF_K:
+ case BPF_ALU | BPF_OR | BPF_K:
+ case BPF_ALU | BPF_XOR | BPF_K:
+ case BPF_ALU64 | BPF_ADD | BPF_K:
+ case BPF_ALU64 | BPF_SUB | BPF_K:
+ case BPF_ALU64 | BPF_AND | BPF_K:
+ case BPF_ALU64 | BPF_OR | BPF_K:
+ case BPF_ALU64 | BPF_XOR | BPF_K:
+ if (BPF_CLASS(insn->code) == BPF_ALU64)
+ EMIT1(add_1mod(0x48, dst_reg));
+ else if (is_ereg(dst_reg))
+ EMIT1(add_1mod(0x40, dst_reg));
+
+ switch (BPF_OP(insn->code)) {
+ case BPF_ADD: b3 = 0xC0; break;
+ case BPF_SUB: b3 = 0xE8; break;
+ case BPF_AND: b3 = 0xE0; break;
+ case BPF_OR: b3 = 0xC8; break;
+ case BPF_XOR: b3 = 0xF0; break;
+ }
+
+ if (is_imm8(imm32))
+ EMIT3(0x83, add_1reg(b3, dst_reg), imm32);
+ else
+ EMIT2_off32(0x81, add_1reg(b3, dst_reg), imm32);
+ break;
+
+ case BPF_ALU64 | BPF_MOV | BPF_K:
+ /* optimization: if imm32 is positive,
+ * use 'mov eax, imm32' (which zero-extends imm32)
+ * to save 2 bytes
*/
- if (seen_or_pass0 & SEEN_DATAREF) {
- if (offsetof(struct sk_buff, len) <= 127)
- /* mov off8(%rdi),%r9d */
- EMIT4(0x44, 0x8b, 0x4f, offsetof(struct sk_buff, len));
- else {
- /* mov off32(%rdi),%r9d */
- EMIT3(0x44, 0x8b, 0x8f);
- EMIT(offsetof(struct sk_buff, len), 4);
- }
- if (is_imm8(offsetof(struct sk_buff, data_len)))
- /* sub off8(%rdi),%r9d */
- EMIT4(0x44, 0x2b, 0x4f, offsetof(struct sk_buff, data_len));
- else {
- EMIT3(0x44, 0x2b, 0x8f);
- EMIT(offsetof(struct sk_buff, data_len), 4);
- }
+ if (imm32 < 0) {
+ /* 'mov rax, imm32' sign extends imm32 */
+ b1 = add_1mod(0x48, dst_reg);
+ b2 = 0xC7;
+ b3 = 0xC0;
+ EMIT3_off32(b1, b2, add_1reg(b3, dst_reg), imm32);
+ break;
+ }
- if (is_imm8(offsetof(struct sk_buff, data)))
- /* mov off8(%rdi),%r8 */
- EMIT4(0x4c, 0x8b, 0x47, offsetof(struct sk_buff, data));
- else {
- /* mov off32(%rdi),%r8 */
- EMIT3(0x4c, 0x8b, 0x87);
- EMIT(offsetof(struct sk_buff, data), 4);
- }
+ case BPF_ALU | BPF_MOV | BPF_K:
+ /* mov %eax, imm32 */
+ if (is_ereg(dst_reg))
+ EMIT1(add_1mod(0x40, dst_reg));
+ EMIT1_off32(add_1reg(0xB8, dst_reg), imm32);
+ break;
+
+ /* dst %= src, dst /= src, dst %= imm32, dst /= imm32 */
+ case BPF_ALU | BPF_MOD | BPF_X:
+ case BPF_ALU | BPF_DIV | BPF_X:
+ case BPF_ALU | BPF_MOD | BPF_K:
+ case BPF_ALU | BPF_DIV | BPF_K:
+ case BPF_ALU64 | BPF_MOD | BPF_X:
+ case BPF_ALU64 | BPF_DIV | BPF_X:
+ case BPF_ALU64 | BPF_MOD | BPF_K:
+ case BPF_ALU64 | BPF_DIV | BPF_K:
+ EMIT1(0x50); /* push rax */
+ EMIT1(0x52); /* push rdx */
+
+ if (BPF_SRC(insn->code) == BPF_X)
+ /* mov r11, src_reg */
+ EMIT_mov(AUX_REG, src_reg);
+ else
+ /* mov r11, imm32 */
+ EMIT3_off32(0x49, 0xC7, 0xC3, imm32);
+
+ /* mov rax, dst_reg */
+ EMIT_mov(BPF_REG_0, dst_reg);
+
+ /* xor edx, edx
+ * equivalent to 'xor rdx, rdx', but one byte less
+ */
+ EMIT2(0x31, 0xd2);
+
+ if (BPF_SRC(insn->code) == BPF_X) {
+ /* if (src_reg == 0) return 0 */
+
+ /* cmp r11, 0 */
+ EMIT4(0x49, 0x83, 0xFB, 0x00);
+
+ /* jne .+9 (skip over pop, pop, xor and jmp) */
+ EMIT2(X86_JNE, 1 + 1 + 2 + 5);
+ EMIT1(0x5A); /* pop rdx */
+ EMIT1(0x58); /* pop rax */
+ EMIT2(0x31, 0xc0); /* xor eax, eax */
+
+ /* jmp cleanup_addr
+ * addrs[i] - 11, because there are 11 bytes
+ * after this insn: div, mov, pop, pop, mov
+ */
+ jmp_offset = ctx->cleanup_addr - (addrs[i] - 11);
+ EMIT1_off32(0xE9, jmp_offset);
}
- }
- switch (filter[0].code) {
- case BPF_S_RET_K:
- case BPF_S_LD_W_LEN:
- case BPF_S_ANC_PROTOCOL:
- case BPF_S_ANC_IFINDEX:
- case BPF_S_ANC_MARK:
- case BPF_S_ANC_RXHASH:
- case BPF_S_ANC_CPU:
- case BPF_S_ANC_VLAN_TAG:
- case BPF_S_ANC_VLAN_TAG_PRESENT:
- case BPF_S_ANC_QUEUE:
- case BPF_S_ANC_PKTTYPE:
- case BPF_S_LD_W_ABS:
- case BPF_S_LD_H_ABS:
- case BPF_S_LD_B_ABS:
- /* first instruction sets A register (or is RET 'constant') */
+ if (BPF_CLASS(insn->code) == BPF_ALU64)
+ /* div r11 */
+ EMIT3(0x49, 0xF7, 0xF3);
+ else
+ /* div r11d */
+ EMIT3(0x41, 0xF7, 0xF3);
+
+ if (BPF_OP(insn->code) == BPF_MOD)
+ /* mov r11, rdx */
+ EMIT3(0x49, 0x89, 0xD3);
+ else
+ /* mov r11, rax */
+ EMIT3(0x49, 0x89, 0xC3);
+
+ EMIT1(0x5A); /* pop rdx */
+ EMIT1(0x58); /* pop rax */
+
+ /* mov dst_reg, r11 */
+ EMIT_mov(dst_reg, AUX_REG);
break;
- default:
- /* make sure we dont leak kernel information to user */
- CLEAR_A(); /* A = 0 */
- }
- for (i = 0; i < flen; i++) {
- unsigned int K = filter[i].k;
+ case BPF_ALU | BPF_MUL | BPF_K:
+ case BPF_ALU | BPF_MUL | BPF_X:
+ case BPF_ALU64 | BPF_MUL | BPF_K:
+ case BPF_ALU64 | BPF_MUL | BPF_X:
+ EMIT1(0x50); /* push rax */
+ EMIT1(0x52); /* push rdx */
+
+ /* mov r11, dst_reg */
+ EMIT_mov(AUX_REG, dst_reg);
+
+ if (BPF_SRC(insn->code) == BPF_X)
+ /* mov rax, src_reg */
+ EMIT_mov(BPF_REG_0, src_reg);
+ else
+ /* mov rax, imm32 */
+ EMIT3_off32(0x48, 0xC7, 0xC0, imm32);
+
+ if (BPF_CLASS(insn->code) == BPF_ALU64)
+ EMIT1(add_1mod(0x48, AUX_REG));
+ else if (is_ereg(AUX_REG))
+ EMIT1(add_1mod(0x40, AUX_REG));
+ /* mul(q) r11 */
+ EMIT2(0xF7, add_1reg(0xE0, AUX_REG));
+
+ /* mov r11, rax */
+ EMIT_mov(AUX_REG, BPF_REG_0);
+
+ EMIT1(0x5A); /* pop rdx */
+ EMIT1(0x58); /* pop rax */
+
+ /* mov dst_reg, r11 */
+ EMIT_mov(dst_reg, AUX_REG);
+ break;
- switch (filter[i].code) {
- case BPF_S_ALU_ADD_X: /* A += X; */
- seen |= SEEN_XREG;
- EMIT2(0x01, 0xd8); /* add %ebx,%eax */
- break;
- case BPF_S_ALU_ADD_K: /* A += K; */
- if (!K)
- break;
- if (is_imm8(K))
- EMIT3(0x83, 0xc0, K); /* add imm8,%eax */
- else
- EMIT1_off32(0x05, K); /* add imm32,%eax */
- break;
- case BPF_S_ALU_SUB_X: /* A -= X; */
- seen |= SEEN_XREG;
- EMIT2(0x29, 0xd8); /* sub %ebx,%eax */
- break;
- case BPF_S_ALU_SUB_K: /* A -= K */
- if (!K)
- break;
- if (is_imm8(K))
- EMIT3(0x83, 0xe8, K); /* sub imm8,%eax */
- else
- EMIT1_off32(0x2d, K); /* sub imm32,%eax */
- break;
- case BPF_S_ALU_MUL_X: /* A *= X; */
- seen |= SEEN_XREG;
- EMIT3(0x0f, 0xaf, 0xc3); /* imul %ebx,%eax */
- break;
- case BPF_S_ALU_MUL_K: /* A *= K */
- if (is_imm8(K))
- EMIT3(0x6b, 0xc0, K); /* imul imm8,%eax,%eax */
- else {
- EMIT2(0x69, 0xc0); /* imul imm32,%eax */
- EMIT(K, 4);
- }
- break;
- case BPF_S_ALU_DIV_X: /* A /= X; */
- seen |= SEEN_XREG;
- EMIT2(0x85, 0xdb); /* test %ebx,%ebx */
- if (pc_ret0 > 0) {
- /* addrs[pc_ret0 - 1] is start address of target
- * (addrs[i] - 4) is the address following this jmp
- * ("xor %edx,%edx; div %ebx" being 4 bytes long)
- */
- EMIT_COND_JMP(X86_JE, addrs[pc_ret0 - 1] -
- (addrs[i] - 4));
- } else {
- EMIT_COND_JMP(X86_JNE, 2 + 5);
- CLEAR_A();
- EMIT1_off32(0xe9, cleanup_addr - (addrs[i] - 4)); /* jmp .+off32 */
- }
- EMIT4(0x31, 0xd2, 0xf7, 0xf3); /* xor %edx,%edx; div %ebx */
- break;
- case BPF_S_ALU_MOD_X: /* A %= X; */
- seen |= SEEN_XREG;
- EMIT2(0x85, 0xdb); /* test %ebx,%ebx */
- if (pc_ret0 > 0) {
- /* addrs[pc_ret0 - 1] is start address of target
- * (addrs[i] - 6) is the address following this jmp
- * ("xor %edx,%edx; div %ebx;mov %edx,%eax" being 6 bytes long)
- */
- EMIT_COND_JMP(X86_JE, addrs[pc_ret0 - 1] -
- (addrs[i] - 6));
- } else {
- EMIT_COND_JMP(X86_JNE, 2 + 5);
- CLEAR_A();
- EMIT1_off32(0xe9, cleanup_addr - (addrs[i] - 6)); /* jmp .+off32 */
- }
- EMIT2(0x31, 0xd2); /* xor %edx,%edx */
- EMIT2(0xf7, 0xf3); /* div %ebx */
- EMIT2(0x89, 0xd0); /* mov %edx,%eax */
- break;
- case BPF_S_ALU_MOD_K: /* A %= K; */
- if (K == 1) {
- CLEAR_A();
- break;
- }
- EMIT2(0x31, 0xd2); /* xor %edx,%edx */
- EMIT1(0xb9);EMIT(K, 4); /* mov imm32,%ecx */
- EMIT2(0xf7, 0xf1); /* div %ecx */
- EMIT2(0x89, 0xd0); /* mov %edx,%eax */
- break;
- case BPF_S_ALU_DIV_K: /* A /= K */
- if (K == 1)
- break;
- EMIT2(0x31, 0xd2); /* xor %edx,%edx */
- EMIT1(0xb9);EMIT(K, 4); /* mov imm32,%ecx */
- EMIT2(0xf7, 0xf1); /* div %ecx */
- break;
- case BPF_S_ALU_AND_X:
- seen |= SEEN_XREG;
- EMIT2(0x21, 0xd8); /* and %ebx,%eax */
- break;
- case BPF_S_ALU_AND_K:
- if (K >= 0xFFFFFF00) {
- EMIT2(0x24, K & 0xFF); /* and imm8,%al */
- } else if (K >= 0xFFFF0000) {
- EMIT2(0x66, 0x25); /* and imm16,%ax */
- EMIT(K, 2);
- } else {
- EMIT1_off32(0x25, K); /* and imm32,%eax */
- }
- break;
- case BPF_S_ALU_OR_X:
- seen |= SEEN_XREG;
- EMIT2(0x09, 0xd8); /* or %ebx,%eax */
- break;
- case BPF_S_ALU_OR_K:
- if (is_imm8(K))
- EMIT3(0x83, 0xc8, K); /* or imm8,%eax */
- else
- EMIT1_off32(0x0d, K); /* or imm32,%eax */
- break;
- case BPF_S_ANC_ALU_XOR_X: /* A ^= X; */
- case BPF_S_ALU_XOR_X:
- seen |= SEEN_XREG;
- EMIT2(0x31, 0xd8); /* xor %ebx,%eax */
- break;
- case BPF_S_ALU_XOR_K: /* A ^= K; */
- if (K == 0)
- break;
- if (is_imm8(K))
- EMIT3(0x83, 0xf0, K); /* xor imm8,%eax */
- else
- EMIT1_off32(0x35, K); /* xor imm32,%eax */
- break;
- case BPF_S_ALU_LSH_X: /* A <<= X; */
- seen |= SEEN_XREG;
- EMIT4(0x89, 0xd9, 0xd3, 0xe0); /* mov %ebx,%ecx; shl %cl,%eax */
- break;
- case BPF_S_ALU_LSH_K:
- if (K == 0)
- break;
- else if (K == 1)
- EMIT2(0xd1, 0xe0); /* shl %eax */
- else
- EMIT3(0xc1, 0xe0, K);
- break;
- case BPF_S_ALU_RSH_X: /* A >>= X; */
- seen |= SEEN_XREG;
- EMIT4(0x89, 0xd9, 0xd3, 0xe8); /* mov %ebx,%ecx; shr %cl,%eax */
- break;
- case BPF_S_ALU_RSH_K: /* A >>= K; */
- if (K == 0)
- break;
- else if (K == 1)
- EMIT2(0xd1, 0xe8); /* shr %eax */
- else
- EMIT3(0xc1, 0xe8, K);
- break;
- case BPF_S_ALU_NEG:
- EMIT2(0xf7, 0xd8); /* neg %eax */
- break;
- case BPF_S_RET_K:
- if (!K) {
- if (pc_ret0 == -1)
- pc_ret0 = i;
- CLEAR_A();
- } else {
- EMIT1_off32(0xb8, K); /* mov $imm32,%eax */
- }
- /* fallinto */
- case BPF_S_RET_A:
- if (seen_or_pass0) {
- if (i != flen - 1) {
- EMIT_JMP(cleanup_addr - addrs[i]);
- break;
- }
- if (seen_or_pass0 & SEEN_XREG)
- EMIT4(0x48, 0x8b, 0x5d, 0xf8); /* mov -8(%rbp),%rbx */
- EMIT1(0xc9); /* leaveq */
- }
- EMIT1(0xc3); /* ret */
- break;
- case BPF_S_MISC_TAX: /* X = A */
- seen |= SEEN_XREG;
- EMIT2(0x89, 0xc3); /* mov %eax,%ebx */
- break;
- case BPF_S_MISC_TXA: /* A = X */
- seen |= SEEN_XREG;
- EMIT2(0x89, 0xd8); /* mov %ebx,%eax */
- break;
- case BPF_S_LD_IMM: /* A = K */
- if (!K)
- CLEAR_A();
- else
- EMIT1_off32(0xb8, K); /* mov $imm32,%eax */
- break;
- case BPF_S_LDX_IMM: /* X = K */
- seen |= SEEN_XREG;
- if (!K)
- CLEAR_X();
+ /* shifts */
+ case BPF_ALU | BPF_LSH | BPF_K:
+ case BPF_ALU | BPF_RSH | BPF_K:
+ case BPF_ALU | BPF_ARSH | BPF_K:
+ case BPF_ALU64 | BPF_LSH | BPF_K:
+ case BPF_ALU64 | BPF_RSH | BPF_K:
+ case BPF_ALU64 | BPF_ARSH | BPF_K:
+ if (BPF_CLASS(insn->code) == BPF_ALU64)
+ EMIT1(add_1mod(0x48, dst_reg));
+ else if (is_ereg(dst_reg))
+ EMIT1(add_1mod(0x40, dst_reg));
+
+ switch (BPF_OP(insn->code)) {
+ case BPF_LSH: b3 = 0xE0; break;
+ case BPF_RSH: b3 = 0xE8; break;
+ case BPF_ARSH: b3 = 0xF8; break;
+ }
+ EMIT3(0xC1, add_1reg(b3, dst_reg), imm32);
+ break;
+
+ case BPF_ALU | BPF_END | BPF_FROM_BE:
+ switch (imm32) {
+ case 16:
+ /* emit 'ror %ax, 8' to swap lower 2 bytes */
+ EMIT1(0x66);
+ if (is_ereg(dst_reg))
+ EMIT1(0x41);
+ EMIT3(0xC1, add_1reg(0xC8, dst_reg), 8);
+ break;
+ case 32:
+ /* emit 'bswap eax' to swap lower 4 bytes */
+ if (is_ereg(dst_reg))
+ EMIT2(0x41, 0x0F);
else
- EMIT1_off32(0xbb, K); /* mov $imm32,%ebx */
- break;
- case BPF_S_LD_MEM: /* A = mem[K] : mov off8(%rbp),%eax */
- seen |= SEEN_MEM;
- EMIT3(0x8b, 0x45, 0xf0 - K*4);
- break;
- case BPF_S_LDX_MEM: /* X = mem[K] : mov off8(%rbp),%ebx */
- seen |= SEEN_XREG | SEEN_MEM;
- EMIT3(0x8b, 0x5d, 0xf0 - K*4);
- break;
- case BPF_S_ST: /* mem[K] = A : mov %eax,off8(%rbp) */
- seen |= SEEN_MEM;
- EMIT3(0x89, 0x45, 0xf0 - K*4);
- break;
- case BPF_S_STX: /* mem[K] = X : mov %ebx,off8(%rbp) */
- seen |= SEEN_XREG | SEEN_MEM;
- EMIT3(0x89, 0x5d, 0xf0 - K*4);
- break;
- case BPF_S_LD_W_LEN: /* A = skb->len; */
- BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, len) != 4);
- if (is_imm8(offsetof(struct sk_buff, len)))
- /* mov off8(%rdi),%eax */
- EMIT3(0x8b, 0x47, offsetof(struct sk_buff, len));
- else {
- EMIT2(0x8b, 0x87);
- EMIT(offsetof(struct sk_buff, len), 4);
- }
- break;
- case BPF_S_LDX_W_LEN: /* X = skb->len; */
- seen |= SEEN_XREG;
- if (is_imm8(offsetof(struct sk_buff, len)))
- /* mov off8(%rdi),%ebx */
- EMIT3(0x8b, 0x5f, offsetof(struct sk_buff, len));
- else {
- EMIT2(0x8b, 0x9f);
- EMIT(offsetof(struct sk_buff, len), 4);
- }
- break;
- case BPF_S_ANC_PROTOCOL: /* A = ntohs(skb->protocol); */
- BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, protocol) != 2);
- if (is_imm8(offsetof(struct sk_buff, protocol))) {
- /* movzwl off8(%rdi),%eax */
- EMIT4(0x0f, 0xb7, 0x47, offsetof(struct sk_buff, protocol));
- } else {
- EMIT3(0x0f, 0xb7, 0x87); /* movzwl off32(%rdi),%eax */
- EMIT(offsetof(struct sk_buff, protocol), 4);
- }
- EMIT2(0x86, 0xc4); /* ntohs() : xchg %al,%ah */
- break;
- case BPF_S_ANC_IFINDEX:
- if (is_imm8(offsetof(struct sk_buff, dev))) {
- /* movq off8(%rdi),%rax */
- EMIT4(0x48, 0x8b, 0x47, offsetof(struct sk_buff, dev));
- } else {
- EMIT3(0x48, 0x8b, 0x87); /* movq off32(%rdi),%rax */
- EMIT(offsetof(struct sk_buff, dev), 4);
- }
- EMIT3(0x48, 0x85, 0xc0); /* test %rax,%rax */
- EMIT_COND_JMP(X86_JE, cleanup_addr - (addrs[i] - 6));
- BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, ifindex) != 4);
- EMIT2(0x8b, 0x80); /* mov off32(%rax),%eax */
- EMIT(offsetof(struct net_device, ifindex), 4);
- break;
- case BPF_S_ANC_MARK:
- BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4);
- if (is_imm8(offsetof(struct sk_buff, mark))) {
- /* mov off8(%rdi),%eax */
- EMIT3(0x8b, 0x47, offsetof(struct sk_buff, mark));
- } else {
- EMIT2(0x8b, 0x87);
- EMIT(offsetof(struct sk_buff, mark), 4);
- }
- break;
- case BPF_S_ANC_RXHASH:
- BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, hash) != 4);
- if (is_imm8(offsetof(struct sk_buff, hash))) {
- /* mov off8(%rdi),%eax */
- EMIT3(0x8b, 0x47, offsetof(struct sk_buff, hash));
- } else {
- EMIT2(0x8b, 0x87);
- EMIT(offsetof(struct sk_buff, hash), 4);
- }
- break;
- case BPF_S_ANC_QUEUE:
- BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, queue_mapping) != 2);
- if (is_imm8(offsetof(struct sk_buff, queue_mapping))) {
- /* movzwl off8(%rdi),%eax */
- EMIT4(0x0f, 0xb7, 0x47, offsetof(struct sk_buff, queue_mapping));
- } else {
- EMIT3(0x0f, 0xb7, 0x87); /* movzwl off32(%rdi),%eax */
- EMIT(offsetof(struct sk_buff, queue_mapping), 4);
- }
- break;
- case BPF_S_ANC_CPU:
-#ifdef CONFIG_SMP
- EMIT4(0x65, 0x8b, 0x04, 0x25); /* mov %gs:off32,%eax */
- EMIT((u32)(unsigned long)&cpu_number, 4); /* A = smp_processor_id(); */
-#else
- CLEAR_A();
-#endif
+ EMIT1(0x0F);
+ EMIT1(add_1reg(0xC8, dst_reg));
break;
- case BPF_S_ANC_VLAN_TAG:
- case BPF_S_ANC_VLAN_TAG_PRESENT:
- BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_tci) != 2);
- if (is_imm8(offsetof(struct sk_buff, vlan_tci))) {
- /* movzwl off8(%rdi),%eax */
- EMIT4(0x0f, 0xb7, 0x47, offsetof(struct sk_buff, vlan_tci));
- } else {
- EMIT3(0x0f, 0xb7, 0x87); /* movzwl off32(%rdi),%eax */
- EMIT(offsetof(struct sk_buff, vlan_tci), 4);
- }
- BUILD_BUG_ON(VLAN_TAG_PRESENT != 0x1000);
- if (filter[i].code == BPF_S_ANC_VLAN_TAG) {
- EMIT3(0x80, 0xe4, 0xef); /* and $0xef,%ah */
- } else {
- EMIT3(0xc1, 0xe8, 0x0c); /* shr $0xc,%eax */
- EMIT3(0x83, 0xe0, 0x01); /* and $0x1,%eax */
- }
- break;
- case BPF_S_ANC_PKTTYPE:
- {
- int off = pkt_type_offset();
-
- if (off < 0)
- goto out;
- if (is_imm8(off)) {
- /* movzbl off8(%rdi),%eax */
- EMIT4(0x0f, 0xb6, 0x47, off);
- } else {
- /* movbl off32(%rdi),%eax */
- EMIT3(0x0f, 0xb6, 0x87);
- EMIT(off, 4);
- }
- EMIT3(0x83, 0xe0, PKT_TYPE_MAX); /* and $0x7,%eax */
+ case 64:
+ /* emit 'bswap rax' to swap 8 bytes */
+ EMIT3(add_1mod(0x48, dst_reg), 0x0F,
+ add_1reg(0xC8, dst_reg));
break;
}
- case BPF_S_LD_W_ABS:
- func = CHOOSE_LOAD_FUNC(K, sk_load_word);
-common_load: seen |= SEEN_DATAREF;
- t_offset = func - (image + addrs[i]);
- EMIT1_off32(0xbe, K); /* mov imm32,%esi */
- EMIT1_off32(0xe8, t_offset); /* call */
- break;
- case BPF_S_LD_H_ABS:
- func = CHOOSE_LOAD_FUNC(K, sk_load_half);
- goto common_load;
- case BPF_S_LD_B_ABS:
- func = CHOOSE_LOAD_FUNC(K, sk_load_byte);
- goto common_load;
- case BPF_S_LDX_B_MSH:
- func = CHOOSE_LOAD_FUNC(K, sk_load_byte_msh);
- seen |= SEEN_DATAREF | SEEN_XREG;
- t_offset = func - (image + addrs[i]);
- EMIT1_off32(0xbe, K); /* mov imm32,%esi */
- EMIT1_off32(0xe8, t_offset); /* call sk_load_byte_msh */
- break;
- case BPF_S_LD_W_IND:
- func = sk_load_word;
-common_load_ind: seen |= SEEN_DATAREF | SEEN_XREG;
- t_offset = func - (image + addrs[i]);
- if (K) {
- if (is_imm8(K)) {
- EMIT3(0x8d, 0x73, K); /* lea imm8(%rbx), %esi */
- } else {
- EMIT2(0x8d, 0xb3); /* lea imm32(%rbx),%esi */
- EMIT(K, 4);
- }
- } else {
- EMIT2(0x89,0xde); /* mov %ebx,%esi */
- }
- EMIT1_off32(0xe8, t_offset); /* call sk_load_xxx_ind */
- break;
- case BPF_S_LD_H_IND:
- func = sk_load_half;
- goto common_load_ind;
- case BPF_S_LD_B_IND:
- func = sk_load_byte;
- goto common_load_ind;
- case BPF_S_JMP_JA:
- t_offset = addrs[i + K] - addrs[i];
- EMIT_JMP(t_offset);
- break;
- COND_SEL(BPF_S_JMP_JGT_K, X86_JA, X86_JBE);
- COND_SEL(BPF_S_JMP_JGE_K, X86_JAE, X86_JB);
- COND_SEL(BPF_S_JMP_JEQ_K, X86_JE, X86_JNE);
- COND_SEL(BPF_S_JMP_JSET_K,X86_JNE, X86_JE);
- COND_SEL(BPF_S_JMP_JGT_X, X86_JA, X86_JBE);
- COND_SEL(BPF_S_JMP_JGE_X, X86_JAE, X86_JB);
- COND_SEL(BPF_S_JMP_JEQ_X, X86_JE, X86_JNE);
- COND_SEL(BPF_S_JMP_JSET_X,X86_JNE, X86_JE);
-
-cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
- t_offset = addrs[i + filter[i].jt] - addrs[i];
-
- /* same targets, can avoid doing the test :) */
- if (filter[i].jt == filter[i].jf) {
- EMIT_JMP(t_offset);
- break;
- }
+ break;
+
+ case BPF_ALU | BPF_END | BPF_FROM_LE:
+ break;
+
+ /* ST: *(u8*)(dst_reg + off) = imm */
+ case BPF_ST | BPF_MEM | BPF_B:
+ if (is_ereg(dst_reg))
+ EMIT2(0x41, 0xC6);
+ else
+ EMIT1(0xC6);
+ goto st;
+ case BPF_ST | BPF_MEM | BPF_H:
+ if (is_ereg(dst_reg))
+ EMIT3(0x66, 0x41, 0xC7);
+ else
+ EMIT2(0x66, 0xC7);
+ goto st;
+ case BPF_ST | BPF_MEM | BPF_W:
+ if (is_ereg(dst_reg))
+ EMIT2(0x41, 0xC7);
+ else
+ EMIT1(0xC7);
+ goto st;
+ case BPF_ST | BPF_MEM | BPF_DW:
+ EMIT2(add_1mod(0x48, dst_reg), 0xC7);
+
+st: if (is_imm8(insn->off))
+ EMIT2(add_1reg(0x40, dst_reg), insn->off);
+ else
+ EMIT1_off32(add_1reg(0x80, dst_reg), insn->off);
+
+ EMIT(imm32, bpf_size_to_x86_bytes(BPF_SIZE(insn->code)));
+ break;
+
+ /* STX: *(u8*)(dst_reg + off) = src_reg */
+ case BPF_STX | BPF_MEM | BPF_B:
+ /* emit 'mov byte ptr [rax + off], al' */
+ if (is_ereg(dst_reg) || is_ereg(src_reg) ||
+ /* have to add extra byte for x86 SIL, DIL regs */
+ src_reg == BPF_REG_1 || src_reg == BPF_REG_2)
+ EMIT2(add_2mod(0x40, dst_reg, src_reg), 0x88);
+ else
+ EMIT1(0x88);
+ goto stx;
+ case BPF_STX | BPF_MEM | BPF_H:
+ if (is_ereg(dst_reg) || is_ereg(src_reg))
+ EMIT3(0x66, add_2mod(0x40, dst_reg, src_reg), 0x89);
+ else
+ EMIT2(0x66, 0x89);
+ goto stx;
+ case BPF_STX | BPF_MEM | BPF_W:
+ if (is_ereg(dst_reg) || is_ereg(src_reg))
+ EMIT2(add_2mod(0x40, dst_reg, src_reg), 0x89);
+ else
+ EMIT1(0x89);
+ goto stx;
+ case BPF_STX | BPF_MEM | BPF_DW:
+ EMIT2(add_2mod(0x48, dst_reg, src_reg), 0x89);
+stx: if (is_imm8(insn->off))
+ EMIT2(add_2reg(0x40, dst_reg, src_reg), insn->off);
+ else
+ EMIT1_off32(add_2reg(0x80, dst_reg, src_reg),
+ insn->off);
+ break;
+
+ /* LDX: dst_reg = *(u8*)(src_reg + off) */
+ case BPF_LDX | BPF_MEM | BPF_B:
+ /* emit 'movzx rax, byte ptr [rax + off]' */
+ EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xB6);
+ goto ldx;
+ case BPF_LDX | BPF_MEM | BPF_H:
+ /* emit 'movzx rax, word ptr [rax + off]' */
+ EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xB7);
+ goto ldx;
+ case BPF_LDX | BPF_MEM | BPF_W:
+ /* emit 'mov eax, dword ptr [rax+0x14]' */
+ if (is_ereg(dst_reg) || is_ereg(src_reg))
+ EMIT2(add_2mod(0x40, src_reg, dst_reg), 0x8B);
+ else
+ EMIT1(0x8B);
+ goto ldx;
+ case BPF_LDX | BPF_MEM | BPF_DW:
+ /* emit 'mov rax, qword ptr [rax+0x14]' */
+ EMIT2(add_2mod(0x48, src_reg, dst_reg), 0x8B);
+ldx: /* if insn->off == 0 we can save one extra byte, but
+ * special case of x86 r13 which always needs an offset
+ * is not worth the hassle
+ */
+ if (is_imm8(insn->off))
+ EMIT2(add_2reg(0x40, src_reg, dst_reg), insn->off);
+ else
+ EMIT1_off32(add_2reg(0x80, src_reg, dst_reg),
+ insn->off);
+ break;
+
+ /* STX XADD: lock *(u32*)(dst_reg + off) += src_reg */
+ case BPF_STX | BPF_XADD | BPF_W:
+ /* emit 'lock add dword ptr [rax + off], eax' */
+ if (is_ereg(dst_reg) || is_ereg(src_reg))
+ EMIT3(0xF0, add_2mod(0x40, dst_reg, src_reg), 0x01);
+ else
+ EMIT2(0xF0, 0x01);
+ goto xadd;
+ case BPF_STX | BPF_XADD | BPF_DW:
+ EMIT3(0xF0, add_2mod(0x48, dst_reg, src_reg), 0x01);
+xadd: if (is_imm8(insn->off))
+ EMIT2(add_2reg(0x40, dst_reg, src_reg), insn->off);
+ else
+ EMIT1_off32(add_2reg(0x80, dst_reg, src_reg),
+ insn->off);
+ break;
+
+ /* call */
+ case BPF_JMP | BPF_CALL:
+ func = (u8 *) __bpf_call_base + imm32;
+ jmp_offset = func - (image + addrs[i]);
+ if (ctx->seen_ld_abs) {
+ EMIT2(0x41, 0x52); /* push %r10 */
+ EMIT2(0x41, 0x51); /* push %r9 */
+ /* need to adjust jmp offset, since
+ * pop %r9, pop %r10 take 4 bytes after call insn
+ */
+ jmp_offset += 4;
+ }
+ if (!imm32 || !is_simm32(jmp_offset)) {
+ pr_err("unsupported bpf func %d addr %p image %p\n",
+ imm32, func, image);
+ return -EINVAL;
+ }
+ EMIT1_off32(0xE8, jmp_offset);
+ if (ctx->seen_ld_abs) {
+ EMIT2(0x41, 0x59); /* pop %r9 */
+ EMIT2(0x41, 0x5A); /* pop %r10 */
+ }
+ break;
+
+ /* cond jump */
+ case BPF_JMP | BPF_JEQ | BPF_X:
+ case BPF_JMP | BPF_JNE | BPF_X:
+ case BPF_JMP | BPF_JGT | BPF_X:
+ case BPF_JMP | BPF_JGE | BPF_X:
+ case BPF_JMP | BPF_JSGT | BPF_X:
+ case BPF_JMP | BPF_JSGE | BPF_X:
+ /* cmp dst_reg, src_reg */
+ EMIT3(add_2mod(0x48, dst_reg, src_reg), 0x39,
+ add_2reg(0xC0, dst_reg, src_reg));
+ goto emit_cond_jmp;
+
+ case BPF_JMP | BPF_JSET | BPF_X:
+ /* test dst_reg, src_reg */
+ EMIT3(add_2mod(0x48, dst_reg, src_reg), 0x85,
+ add_2reg(0xC0, dst_reg, src_reg));
+ goto emit_cond_jmp;
+
+ case BPF_JMP | BPF_JSET | BPF_K:
+ /* test dst_reg, imm32 */
+ EMIT1(add_1mod(0x48, dst_reg));
+ EMIT2_off32(0xF7, add_1reg(0xC0, dst_reg), imm32);
+ goto emit_cond_jmp;
+
+ case BPF_JMP | BPF_JEQ | BPF_K:
+ case BPF_JMP | BPF_JNE | BPF_K:
+ case BPF_JMP | BPF_JGT | BPF_K:
+ case BPF_JMP | BPF_JGE | BPF_K:
+ case BPF_JMP | BPF_JSGT | BPF_K:
+ case BPF_JMP | BPF_JSGE | BPF_K:
+ /* cmp dst_reg, imm8/32 */
+ EMIT1(add_1mod(0x48, dst_reg));
+
+ if (is_imm8(imm32))
+ EMIT3(0x83, add_1reg(0xF8, dst_reg), imm32);
+ else
+ EMIT2_off32(0x81, add_1reg(0xF8, dst_reg), imm32);
+
+emit_cond_jmp: /* convert BPF opcode to x86 */
+ switch (BPF_OP(insn->code)) {
+ case BPF_JEQ:
+ jmp_cond = X86_JE;
+ break;
+ case BPF_JSET:
+ case BPF_JNE:
+ jmp_cond = X86_JNE;
+ break;
+ case BPF_JGT:
+ /* GT is unsigned '>', JA in x86 */
+ jmp_cond = X86_JA;
+ break;
+ case BPF_JGE:
+ /* GE is unsigned '>=', JAE in x86 */
+ jmp_cond = X86_JAE;
+ break;
+ case BPF_JSGT:
+ /* signed '>', GT in x86 */
+ jmp_cond = X86_JG;
+ break;
+ case BPF_JSGE:
+ /* signed '>=', GE in x86 */
+ jmp_cond = X86_JGE;
+ break;
+ default: /* to silence gcc warning */
+ return -EFAULT;
+ }
+ jmp_offset = addrs[i + insn->off] - addrs[i];
+ if (is_imm8(jmp_offset)) {
+ EMIT2(jmp_cond, jmp_offset);
+ } else if (is_simm32(jmp_offset)) {
+ EMIT2_off32(0x0F, jmp_cond + 0x10, jmp_offset);
+ } else {
+ pr_err("cond_jmp gen bug %llx\n", jmp_offset);
+ return -EFAULT;
+ }
+
+ break;
- switch (filter[i].code) {
- case BPF_S_JMP_JGT_X:
- case BPF_S_JMP_JGE_X:
- case BPF_S_JMP_JEQ_X:
- seen |= SEEN_XREG;
- EMIT2(0x39, 0xd8); /* cmp %ebx,%eax */
- break;
- case BPF_S_JMP_JSET_X:
- seen |= SEEN_XREG;
- EMIT2(0x85, 0xd8); /* test %ebx,%eax */
- break;
- case BPF_S_JMP_JEQ_K:
- if (K == 0) {
- EMIT2(0x85, 0xc0); /* test %eax,%eax */
- break;
- }
- case BPF_S_JMP_JGT_K:
- case BPF_S_JMP_JGE_K:
- if (K <= 127)
- EMIT3(0x83, 0xf8, K); /* cmp imm8,%eax */
+ case BPF_JMP | BPF_JA:
+ jmp_offset = addrs[i + insn->off] - addrs[i];
+ if (!jmp_offset)
+ /* optimize out nop jumps */
+ break;
+emit_jmp:
+ if (is_imm8(jmp_offset)) {
+ EMIT2(0xEB, jmp_offset);
+ } else if (is_simm32(jmp_offset)) {
+ EMIT1_off32(0xE9, jmp_offset);
+ } else {
+ pr_err("jmp gen bug %llx\n", jmp_offset);
+ return -EFAULT;
+ }
+ break;
+
+ case BPF_LD | BPF_IND | BPF_W:
+ func = sk_load_word;
+ goto common_load;
+ case BPF_LD | BPF_ABS | BPF_W:
+ func = CHOOSE_LOAD_FUNC(imm32, sk_load_word);
+common_load: ctx->seen_ld_abs = true;
+ jmp_offset = func - (image + addrs[i]);
+ if (!func || !is_simm32(jmp_offset)) {
+ pr_err("unsupported bpf func %d addr %p image %p\n",
+ imm32, func, image);
+ return -EINVAL;
+ }
+ if (BPF_MODE(insn->code) == BPF_ABS) {
+ /* mov %esi, imm32 */
+ EMIT1_off32(0xBE, imm32);
+ } else {
+ /* mov %rsi, src_reg */
+ EMIT_mov(BPF_REG_2, src_reg);
+ if (imm32) {
+ if (is_imm8(imm32))
+ /* add %esi, imm8 */
+ EMIT3(0x83, 0xC6, imm32);
else
- EMIT1_off32(0x3d, K); /* cmp imm32,%eax */
- break;
- case BPF_S_JMP_JSET_K:
- if (K <= 0xFF)
- EMIT2(0xa8, K); /* test imm8,%al */
- else if (!(K & 0xFFFF00FF))
- EMIT3(0xf6, 0xc4, K >> 8); /* test imm8,%ah */
- else if (K <= 0xFFFF) {
- EMIT2(0x66, 0xa9); /* test imm16,%ax */
- EMIT(K, 2);
- } else {
- EMIT1_off32(0xa9, K); /* test imm32,%eax */
- }
- break;
+ /* add %esi, imm32 */
+ EMIT2_off32(0x81, 0xC6, imm32);
}
- if (filter[i].jt != 0) {
- if (filter[i].jf && f_offset)
- t_offset += is_near(f_offset) ? 2 : 5;
- EMIT_COND_JMP(t_op, t_offset);
- if (filter[i].jf)
- EMIT_JMP(f_offset);
- break;
- }
- EMIT_COND_JMP(f_op, f_offset);
- break;
- default:
- /* hmm, too complex filter, give up with jit compiler */
- goto out;
}
- ilen = prog - temp;
- if (image) {
- if (unlikely(proglen + ilen > oldproglen)) {
- pr_err("bpb_jit_compile fatal error\n");
- kfree(addrs);
- module_free(NULL, header);
- return;
- }
- memcpy(image + proglen, temp, ilen);
+ /* skb pointer is in R6 (%rbx), it will be copied into
+ * %rdi if skb_copy_bits() call is necessary.
+ * sk_load_* helpers also use %r10 and %r9d.
+ * See bpf_jit.S
+ */
+ EMIT1_off32(0xE8, jmp_offset); /* call */
+ break;
+
+ case BPF_LD | BPF_IND | BPF_H:
+ func = sk_load_half;
+ goto common_load;
+ case BPF_LD | BPF_ABS | BPF_H:
+ func = CHOOSE_LOAD_FUNC(imm32, sk_load_half);
+ goto common_load;
+ case BPF_LD | BPF_IND | BPF_B:
+ func = sk_load_byte;
+ goto common_load;
+ case BPF_LD | BPF_ABS | BPF_B:
+ func = CHOOSE_LOAD_FUNC(imm32, sk_load_byte);
+ goto common_load;
+
+ case BPF_JMP | BPF_EXIT:
+ if (i != insn_cnt - 1) {
+ jmp_offset = ctx->cleanup_addr - addrs[i];
+ goto emit_jmp;
}
- proglen += ilen;
- addrs[i] = proglen;
- prog = temp;
+ /* update cleanup_addr */
+ ctx->cleanup_addr = proglen;
+ /* mov rbx, qword ptr [rbp-X] */
+ EMIT3_off32(0x48, 0x8B, 0x9D, -stacksize);
+ /* mov r13, qword ptr [rbp-X] */
+ EMIT3_off32(0x4C, 0x8B, 0xAD, -stacksize + 8);
+ /* mov r14, qword ptr [rbp-X] */
+ EMIT3_off32(0x4C, 0x8B, 0xB5, -stacksize + 16);
+ /* mov r15, qword ptr [rbp-X] */
+ EMIT3_off32(0x4C, 0x8B, 0xBD, -stacksize + 24);
+
+ EMIT1(0xC9); /* leave */
+ EMIT1(0xC3); /* ret */
+ break;
+
+ default:
+ /* By design x64 JIT should support all BPF instructions
+ * This error will be seen if new instruction was added
+ * to interpreter, but not to JIT
+ * or if there is junk in sk_filter
+ */
+ pr_err("bpf_jit: unknown opcode %02x\n", insn->code);
+ return -EINVAL;
}
- /* last bpf instruction is always a RET :
- * use it to give the cleanup instruction(s) addr
- */
- cleanup_addr = proglen - 1; /* ret */
- if (seen_or_pass0)
- cleanup_addr -= 1; /* leaveq */
- if (seen_or_pass0 & SEEN_XREG)
- cleanup_addr -= 4; /* mov -8(%rbp),%rbx */
+ ilen = prog - temp;
+ if (image) {
+ if (unlikely(proglen + ilen > oldproglen)) {
+ pr_err("bpf_jit_compile fatal error\n");
+ return -EFAULT;
+ }
+ memcpy(image + proglen, temp, ilen);
+ }
+ proglen += ilen;
+ addrs[i] = proglen;
+ prog = temp;
+ }
+ return proglen;
+}
+
+void bpf_jit_compile(struct sk_filter *prog)
+{
+}
+
+void bpf_int_jit_compile(struct sk_filter *prog)
+{
+ struct bpf_binary_header *header = NULL;
+ int proglen, oldproglen = 0;
+ struct jit_context ctx = {};
+ u8 *image = NULL;
+ int *addrs;
+ int pass;
+ int i;
+
+ if (!bpf_jit_enable)
+ return;
+
+ if (!prog || !prog->len)
+ return;
+
+ addrs = kmalloc(prog->len * sizeof(*addrs), GFP_KERNEL);
+ if (!addrs)
+ return;
+
+ /* Before first pass, make a rough estimation of addrs[]
+ * each bpf instruction is translated to less than 64 bytes
+ */
+ for (proglen = 0, i = 0; i < prog->len; i++) {
+ proglen += 64;
+ addrs[i] = proglen;
+ }
+ ctx.cleanup_addr = proglen;
+
+ for (pass = 0; pass < 10; pass++) {
+ proglen = do_jit(prog, addrs, image, oldproglen, &ctx);
+ if (proglen <= 0) {
+ image = NULL;
+ if (header)
+ module_free(NULL, header);
+ goto out;
+ }
if (image) {
if (proglen != oldproglen)
- pr_err("bpb_jit_compile proglen=%u != oldproglen=%u\n", proglen, oldproglen);
+ pr_err("bpf_jit: proglen=%d != oldproglen=%d\n",
+ proglen, oldproglen);
break;
}
if (proglen == oldproglen) {
@@ -766,17 +918,16 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
}
if (bpf_jit_enable > 1)
- bpf_jit_dump(flen, proglen, pass, image);
+ bpf_jit_dump(prog->len, proglen, 0, image);
if (image) {
bpf_flush_icache(header, image + proglen);
set_memory_ro((unsigned long)header, header->pages);
- fp->bpf_func = (void *)image;
- fp->jited = 1;
+ prog->bpf_func = (void *)image;
+ prog->jited = 1;
}
out:
kfree(addrs);
- return;
}
static void bpf_jit_free_deferred(struct work_struct *work)