|
@@ -74,32 +74,52 @@ struct jit_ctx {
|
|
|
|
|
|
int bpf_jit_enable __read_mostly;
|
|
|
|
|
|
-static u64 jit_get_skb_b(struct sk_buff *skb, unsigned offset)
|
|
|
+static inline int call_neg_helper(struct sk_buff *skb, int offset, void *ret,
|
|
|
+ unsigned int size)
|
|
|
+{
|
|
|
+ void *ptr = bpf_internal_load_pointer_neg_helper(skb, offset, size);
|
|
|
+
|
|
|
+ if (!ptr)
|
|
|
+ return -EFAULT;
|
|
|
+ memcpy(ret, ptr, size);
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
+static u64 jit_get_skb_b(struct sk_buff *skb, int offset)
|
|
|
{
|
|
|
u8 ret;
|
|
|
int err;
|
|
|
|
|
|
- err = skb_copy_bits(skb, offset, &ret, 1);
|
|
|
+ if (offset < 0)
|
|
|
+ err = call_neg_helper(skb, offset, &ret, 1);
|
|
|
+ else
|
|
|
+ err = skb_copy_bits(skb, offset, &ret, 1);
|
|
|
|
|
|
return (u64)err << 32 | ret;
|
|
|
}
|
|
|
|
|
|
-static u64 jit_get_skb_h(struct sk_buff *skb, unsigned offset)
|
|
|
+static u64 jit_get_skb_h(struct sk_buff *skb, int offset)
|
|
|
{
|
|
|
u16 ret;
|
|
|
int err;
|
|
|
|
|
|
- err = skb_copy_bits(skb, offset, &ret, 2);
|
|
|
+ if (offset < 0)
|
|
|
+ err = call_neg_helper(skb, offset, &ret, 2);
|
|
|
+ else
|
|
|
+ err = skb_copy_bits(skb, offset, &ret, 2);
|
|
|
|
|
|
return (u64)err << 32 | ntohs(ret);
|
|
|
}
|
|
|
|
|
|
-static u64 jit_get_skb_w(struct sk_buff *skb, unsigned offset)
|
|
|
+static u64 jit_get_skb_w(struct sk_buff *skb, int offset)
|
|
|
{
|
|
|
u32 ret;
|
|
|
int err;
|
|
|
|
|
|
- err = skb_copy_bits(skb, offset, &ret, 4);
|
|
|
+ if (offset < 0)
|
|
|
+ err = call_neg_helper(skb, offset, &ret, 4);
|
|
|
+ else
|
|
|
+ err = skb_copy_bits(skb, offset, &ret, 4);
|
|
|
|
|
|
return (u64)err << 32 | ntohl(ret);
|
|
|
}
|
|
@@ -536,9 +556,6 @@ static int build_body(struct jit_ctx *ctx)
|
|
|
case BPF_LD | BPF_B | BPF_ABS:
|
|
|
load_order = 0;
|
|
|
load:
|
|
|
- /* the interpreter will deal with the negative K */
|
|
|
- if ((int)k < 0)
|
|
|
- return -ENOTSUPP;
|
|
|
emit_mov_i(r_off, k, ctx);
|
|
|
load_common:
|
|
|
ctx->seen |= SEEN_DATA | SEEN_CALL;
|
|
@@ -553,6 +570,18 @@ load_common:
|
|
|
condt = ARM_COND_HI;
|
|
|
}
|
|
|
|
|
|
+ /*
|
|
|
+ * test for negative offset, only if we are
|
|
|
+ * currently scheduled to take the fast
|
|
|
+ * path. this will update the flags so that
|
|
|
+ * the slowpath instruction are ignored if the
|
|
|
+ * offset is negative.
|
|
|
+ *
|
|
|
+ * for loard_order == 0 the HI condition will
|
|
|
+ * make loads at offset 0 take the slow path too.
|
|
|
+ */
|
|
|
+ _emit(condt, ARM_CMP_I(r_off, 0), ctx);
|
|
|
+
|
|
|
_emit(condt, ARM_ADD_R(r_scratch, r_off, r_skb_data),
|
|
|
ctx);
|
|
|
|