summaryrefslogtreecommitdiffstats
path: root/arch/powerpc/net/bpf_jit_comp.c
diff options
context:
space:
mode:
authorDaniel Vetter <daniel.vetter@ffwll.ch>2012-05-08 13:39:59 +0200
committerDaniel Vetter <daniel.vetter@ffwll.ch>2012-05-08 13:39:59 +0200
commit5e13a0c5ec05d382b488a691dfb8af015b1dea1e (patch)
tree7a06dfa1f7661f8908193f2437b32452520221d3 /arch/powerpc/net/bpf_jit_comp.c
parentb615b57a124a4af7b68196bc2fb8acc236041fa2 (diff)
parent4f256e8aa3eda15c11c3cec3ec5336e1fc579cbd (diff)
Merge remote-tracking branch 'airlied/drm-core-next' into drm-intel-next-queued
Backmerge of drm-next to resolve a few ugly conflicts and to get a few fixes from 3.4-rc6 (which drm-next has already merged). Note that this merge also restricts the stencil cache lra evict policy workaround to snb (as it should) - I had to frob the code anyway because the CM0_MASK_SHIFT define died in the masked bit cleanups. We need the backmerge to get Paulo Zanoni's infoframe regression fix for gm45 - further bugfixes from him touch the same area and would needlessly conflict. Signed-Off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Diffstat (limited to 'arch/powerpc/net/bpf_jit_comp.c')
-rw-r--r--arch/powerpc/net/bpf_jit_comp.c26
1 files changed, 9 insertions, 17 deletions
diff --git a/arch/powerpc/net/bpf_jit_comp.c b/arch/powerpc/net/bpf_jit_comp.c
index 73619d3aeb6..2dc8b148484 100644
--- a/arch/powerpc/net/bpf_jit_comp.c
+++ b/arch/powerpc/net/bpf_jit_comp.c
@@ -127,6 +127,9 @@ static void bpf_jit_build_epilogue(u32 *image, struct codegen_context *ctx)
PPC_BLR();
}
+#define CHOOSE_LOAD_FUNC(K, func) \
+ ((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative_offset : func) : func##_positive_offset)
+
/* Assemble the body code between the prologue & epilogue. */
static int bpf_jit_build_body(struct sk_filter *fp, u32 *image,
struct codegen_context *ctx,
@@ -391,21 +394,16 @@ static int bpf_jit_build_body(struct sk_filter *fp, u32 *image,
/*** Absolute loads from packet header/data ***/
case BPF_S_LD_W_ABS:
- func = sk_load_word;
+ func = CHOOSE_LOAD_FUNC(K, sk_load_word);
goto common_load;
case BPF_S_LD_H_ABS:
- func = sk_load_half;
+ func = CHOOSE_LOAD_FUNC(K, sk_load_half);
goto common_load;
case BPF_S_LD_B_ABS:
- func = sk_load_byte;
+ func = CHOOSE_LOAD_FUNC(K, sk_load_byte);
common_load:
- /*
- * Load from [K]. Reference with the (negative)
- * SKF_NET_OFF/SKF_LL_OFF offsets is unsupported.
- */
+ /* Load from [K]. */
ctx->seen |= SEEN_DATAREF;
- if ((int)K < 0)
- return -ENOTSUPP;
PPC_LI64(r_scratch1, func);
PPC_MTLR(r_scratch1);
PPC_LI32(r_addr, K);
@@ -429,7 +427,7 @@ static int bpf_jit_build_body(struct sk_filter *fp, u32 *image,
common_load_ind:
/*
* Load from [X + K]. Negative offsets are tested for
- * in the helper functions, and result in a 'ret 0'.
+ * in the helper functions.
*/
ctx->seen |= SEEN_DATAREF | SEEN_XREG;
PPC_LI64(r_scratch1, func);
@@ -443,13 +441,7 @@ static int bpf_jit_build_body(struct sk_filter *fp, u32 *image,
break;
case BPF_S_LDX_B_MSH:
- /*
- * x86 version drops packet (RET 0) when K<0, whereas
- * interpreter does allow K<0 (__load_pointer, special
- * ancillary data). common_load returns ENOTSUPP if K<0,
- * so we fall back to interpreter & filter works.
- */
- func = sk_load_byte_msh;
+ func = CHOOSE_LOAD_FUNC(K, sk_load_byte_msh);
goto common_load;
break;