|
@@ -1,7 +1,7 @@
|
|
/*
|
|
/*
|
|
* BPF JIT compiler for ARM64
|
|
* BPF JIT compiler for ARM64
|
|
*
|
|
*
|
|
- * Copyright (C) 2014 Zi Shen Lim <zlim.lnx@gmail.com>
|
|
|
|
|
|
+ * Copyright (C) 2014-2015 Zi Shen Lim <zlim.lnx@gmail.com>
|
|
*
|
|
*
|
|
* This program is free software; you can redistribute it and/or modify
|
|
* This program is free software; you can redistribute it and/or modify
|
|
* it under the terms of the GNU General Public License version 2 as
|
|
* it under the terms of the GNU General Public License version 2 as
|
|
@@ -225,6 +225,17 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx)
|
|
u8 jmp_cond;
|
|
u8 jmp_cond;
|
|
s32 jmp_offset;
|
|
s32 jmp_offset;
|
|
|
|
|
|
|
|
+#define check_imm(bits, imm) do { \
|
|
|
|
+ if ((((imm) > 0) && ((imm) >> (bits))) || \
|
|
|
|
+ (((imm) < 0) && (~(imm) >> (bits)))) { \
|
|
|
|
+ pr_info("[%2d] imm=%d(0x%x) out of range\n", \
|
|
|
|
+ i, imm, imm); \
|
|
|
|
+ return -EINVAL; \
|
|
|
|
+ } \
|
|
|
|
+} while (0)
|
|
|
|
+#define check_imm19(imm) check_imm(19, imm)
|
|
|
|
+#define check_imm26(imm) check_imm(26, imm)
|
|
|
|
+
|
|
switch (code) {
|
|
switch (code) {
|
|
/* dst = src */
|
|
/* dst = src */
|
|
case BPF_ALU | BPF_MOV | BPF_X:
|
|
case BPF_ALU | BPF_MOV | BPF_X:
|
|
@@ -258,15 +269,33 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx)
|
|
break;
|
|
break;
|
|
case BPF_ALU | BPF_DIV | BPF_X:
|
|
case BPF_ALU | BPF_DIV | BPF_X:
|
|
case BPF_ALU64 | BPF_DIV | BPF_X:
|
|
case BPF_ALU64 | BPF_DIV | BPF_X:
|
|
- emit(A64_UDIV(is64, dst, dst, src), ctx);
|
|
|
|
- break;
|
|
|
|
case BPF_ALU | BPF_MOD | BPF_X:
|
|
case BPF_ALU | BPF_MOD | BPF_X:
|
|
case BPF_ALU64 | BPF_MOD | BPF_X:
|
|
case BPF_ALU64 | BPF_MOD | BPF_X:
|
|
- ctx->tmp_used = 1;
|
|
|
|
- emit(A64_UDIV(is64, tmp, dst, src), ctx);
|
|
|
|
- emit(A64_MUL(is64, tmp, tmp, src), ctx);
|
|
|
|
- emit(A64_SUB(is64, dst, dst, tmp), ctx);
|
|
|
|
|
|
+ {
|
|
|
|
+ const u8 r0 = bpf2a64[BPF_REG_0];
|
|
|
|
+
|
|
|
|
+ /* if (src == 0) return 0 */
|
|
|
|
+ jmp_offset = 3; /* skip ahead to else path */
|
|
|
|
+ check_imm19(jmp_offset);
|
|
|
|
+ emit(A64_CBNZ(is64, src, jmp_offset), ctx);
|
|
|
|
+ emit(A64_MOVZ(1, r0, 0, 0), ctx);
|
|
|
|
+ jmp_offset = epilogue_offset(ctx);
|
|
|
|
+ check_imm26(jmp_offset);
|
|
|
|
+ emit(A64_B(jmp_offset), ctx);
|
|
|
|
+ /* else */
|
|
|
|
+ switch (BPF_OP(code)) {
|
|
|
|
+ case BPF_DIV:
|
|
|
|
+ emit(A64_UDIV(is64, dst, dst, src), ctx);
|
|
|
|
+ break;
|
|
|
|
+ case BPF_MOD:
|
|
|
|
+ ctx->tmp_used = 1;
|
|
|
|
+ emit(A64_UDIV(is64, tmp, dst, src), ctx);
|
|
|
|
+ emit(A64_MUL(is64, tmp, tmp, src), ctx);
|
|
|
|
+ emit(A64_SUB(is64, dst, dst, tmp), ctx);
|
|
|
|
+ break;
|
|
|
|
+ }
|
|
break;
|
|
break;
|
|
|
|
+ }
|
|
case BPF_ALU | BPF_LSH | BPF_X:
|
|
case BPF_ALU | BPF_LSH | BPF_X:
|
|
case BPF_ALU64 | BPF_LSH | BPF_X:
|
|
case BPF_ALU64 | BPF_LSH | BPF_X:
|
|
emit(A64_LSLV(is64, dst, dst, src), ctx);
|
|
emit(A64_LSLV(is64, dst, dst, src), ctx);
|
|
@@ -393,17 +422,6 @@ emit_bswap_uxt:
|
|
emit(A64_ASR(is64, dst, dst, imm), ctx);
|
|
emit(A64_ASR(is64, dst, dst, imm), ctx);
|
|
break;
|
|
break;
|
|
|
|
|
|
-#define check_imm(bits, imm) do { \
|
|
|
|
- if ((((imm) > 0) && ((imm) >> (bits))) || \
|
|
|
|
- (((imm) < 0) && (~(imm) >> (bits)))) { \
|
|
|
|
- pr_info("[%2d] imm=%d(0x%x) out of range\n", \
|
|
|
|
- i, imm, imm); \
|
|
|
|
- return -EINVAL; \
|
|
|
|
- } \
|
|
|
|
-} while (0)
|
|
|
|
-#define check_imm19(imm) check_imm(19, imm)
|
|
|
|
-#define check_imm26(imm) check_imm(26, imm)
|
|
|
|
-
|
|
|
|
/* JUMP off */
|
|
/* JUMP off */
|
|
case BPF_JMP | BPF_JA:
|
|
case BPF_JMP | BPF_JA:
|
|
jmp_offset = bpf2a64_offset(i + off, i, ctx);
|
|
jmp_offset = bpf2a64_offset(i + off, i, ctx);
|