bpf_jit_comp.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765
  1. /*
  2. * BPF JIT compiler for ARM64
  3. *
  4. * Copyright (C) 2014 Zi Shen Lim <zlim.lnx@gmail.com>
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License version 2 as
  8. * published by the Free Software Foundation.
  9. *
  10. * This program is distributed in the hope that it will be useful,
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  13. * GNU General Public License for more details.
  14. *
  15. * You should have received a copy of the GNU General Public License
  16. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  17. */
  18. #define pr_fmt(fmt) "bpf_jit: " fmt
  19. #include <linux/filter.h>
  20. #include <linux/printk.h>
  21. #include <linux/skbuff.h>
  22. #include <linux/slab.h>
  23. #include <asm/byteorder.h>
  24. #include <asm/cacheflush.h>
  25. #include <asm/debug-monitors.h>
  26. #include "bpf_jit.h"
  27. int bpf_jit_enable __read_mostly;
  28. #define TMP_REG_1 (MAX_BPF_REG + 0)
  29. #define TMP_REG_2 (MAX_BPF_REG + 1)
  30. /* Map BPF registers to A64 registers */
  31. static const int bpf2a64[] = {
  32. /* return value from in-kernel function, and exit value from eBPF */
  33. [BPF_REG_0] = A64_R(7),
  34. /* arguments from eBPF program to in-kernel function */
  35. [BPF_REG_1] = A64_R(0),
  36. [BPF_REG_2] = A64_R(1),
  37. [BPF_REG_3] = A64_R(2),
  38. [BPF_REG_4] = A64_R(3),
  39. [BPF_REG_5] = A64_R(4),
  40. /* callee saved registers that in-kernel function will preserve */
  41. [BPF_REG_6] = A64_R(19),
  42. [BPF_REG_7] = A64_R(20),
  43. [BPF_REG_8] = A64_R(21),
  44. [BPF_REG_9] = A64_R(22),
  45. /* read-only frame pointer to access stack */
  46. [BPF_REG_FP] = A64_FP,
  47. /* temporary register for internal BPF JIT */
  48. [TMP_REG_1] = A64_R(23),
  49. [TMP_REG_2] = A64_R(24),
  50. };
  51. struct jit_ctx {
  52. const struct bpf_prog *prog;
  53. int idx;
  54. int tmp_used;
  55. int epilogue_offset;
  56. int *offset;
  57. u32 *image;
  58. };
  59. static inline void emit(const u32 insn, struct jit_ctx *ctx)
  60. {
  61. if (ctx->image != NULL)
  62. ctx->image[ctx->idx] = cpu_to_le32(insn);
  63. ctx->idx++;
  64. }
  65. static inline void emit_a64_mov_i64(const int reg, const u64 val,
  66. struct jit_ctx *ctx)
  67. {
  68. u64 tmp = val;
  69. int shift = 0;
  70. emit(A64_MOVZ(1, reg, tmp & 0xffff, shift), ctx);
  71. tmp >>= 16;
  72. shift += 16;
  73. while (tmp) {
  74. if (tmp & 0xffff)
  75. emit(A64_MOVK(1, reg, tmp & 0xffff, shift), ctx);
  76. tmp >>= 16;
  77. shift += 16;
  78. }
  79. }
  80. static inline void emit_a64_mov_i(const int is64, const int reg,
  81. const s32 val, struct jit_ctx *ctx)
  82. {
  83. u16 hi = val >> 16;
  84. u16 lo = val & 0xffff;
  85. if (hi & 0x8000) {
  86. if (hi == 0xffff) {
  87. emit(A64_MOVN(is64, reg, (u16)~lo, 0), ctx);
  88. } else {
  89. emit(A64_MOVN(is64, reg, (u16)~hi, 16), ctx);
  90. emit(A64_MOVK(is64, reg, lo, 0), ctx);
  91. }
  92. } else {
  93. emit(A64_MOVZ(is64, reg, lo, 0), ctx);
  94. if (hi)
  95. emit(A64_MOVK(is64, reg, hi, 16), ctx);
  96. }
  97. }
  98. static inline int bpf2a64_offset(int bpf_to, int bpf_from,
  99. const struct jit_ctx *ctx)
  100. {
  101. int to = ctx->offset[bpf_to];
  102. /* -1 to account for the Branch instruction */
  103. int from = ctx->offset[bpf_from] - 1;
  104. return to - from;
  105. }
  106. static void jit_fill_hole(void *area, unsigned int size)
  107. {
  108. u32 *ptr;
  109. /* We are guaranteed to have aligned memory. */
  110. for (ptr = area; size >= sizeof(u32); size -= sizeof(u32))
  111. *ptr++ = cpu_to_le32(AARCH64_BREAK_FAULT);
  112. }
  113. static inline int epilogue_offset(const struct jit_ctx *ctx)
  114. {
  115. int to = ctx->epilogue_offset;
  116. int from = ctx->idx;
  117. return to - from;
  118. }
  119. /* Stack must be multiples of 16B */
  120. #define STACK_ALIGN(sz) (((sz) + 15) & ~15)
  121. static void build_prologue(struct jit_ctx *ctx)
  122. {
  123. const u8 r6 = bpf2a64[BPF_REG_6];
  124. const u8 r7 = bpf2a64[BPF_REG_7];
  125. const u8 r8 = bpf2a64[BPF_REG_8];
  126. const u8 r9 = bpf2a64[BPF_REG_9];
  127. const u8 fp = bpf2a64[BPF_REG_FP];
  128. const u8 ra = bpf2a64[BPF_REG_A];
  129. const u8 rx = bpf2a64[BPF_REG_X];
  130. const u8 tmp1 = bpf2a64[TMP_REG_1];
  131. const u8 tmp2 = bpf2a64[TMP_REG_2];
  132. int stack_size = MAX_BPF_STACK;
  133. stack_size += 4; /* extra for skb_copy_bits buffer */
  134. stack_size = STACK_ALIGN(stack_size);
  135. /* Save callee-saved register */
  136. emit(A64_PUSH(r6, r7, A64_SP), ctx);
  137. emit(A64_PUSH(r8, r9, A64_SP), ctx);
  138. if (ctx->tmp_used)
  139. emit(A64_PUSH(tmp1, tmp2, A64_SP), ctx);
  140. /* Set up BPF stack */
  141. emit(A64_SUB_I(1, A64_SP, A64_SP, stack_size), ctx);
  142. /* Set up frame pointer */
  143. emit(A64_MOV(1, fp, A64_SP), ctx);
  144. /* Clear registers A and X */
  145. emit_a64_mov_i64(ra, 0, ctx);
  146. emit_a64_mov_i64(rx, 0, ctx);
  147. }
  148. static void build_epilogue(struct jit_ctx *ctx)
  149. {
  150. const u8 r0 = bpf2a64[BPF_REG_0];
  151. const u8 r6 = bpf2a64[BPF_REG_6];
  152. const u8 r7 = bpf2a64[BPF_REG_7];
  153. const u8 r8 = bpf2a64[BPF_REG_8];
  154. const u8 r9 = bpf2a64[BPF_REG_9];
  155. const u8 fp = bpf2a64[BPF_REG_FP];
  156. const u8 tmp1 = bpf2a64[TMP_REG_1];
  157. const u8 tmp2 = bpf2a64[TMP_REG_2];
  158. int stack_size = MAX_BPF_STACK;
  159. stack_size += 4; /* extra for skb_copy_bits buffer */
  160. stack_size = STACK_ALIGN(stack_size);
  161. /* We're done with BPF stack */
  162. emit(A64_ADD_I(1, A64_SP, A64_SP, stack_size), ctx);
  163. /* Restore callee-saved register */
  164. if (ctx->tmp_used)
  165. emit(A64_POP(tmp1, tmp2, A64_SP), ctx);
  166. emit(A64_POP(r8, r9, A64_SP), ctx);
  167. emit(A64_POP(r6, r7, A64_SP), ctx);
  168. /* Restore frame pointer */
  169. emit(A64_MOV(1, fp, A64_SP), ctx);
  170. /* Set return value */
  171. emit(A64_MOV(1, A64_R(0), r0), ctx);
  172. emit(A64_RET(A64_LR), ctx);
  173. }
  174. /* JITs an eBPF instruction.
  175. * Returns:
  176. * 0 - successfully JITed an 8-byte eBPF instruction.
  177. * >0 - successfully JITed a 16-byte eBPF instruction.
  178. * <0 - failed to JIT.
  179. */
  180. static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx)
  181. {
  182. const u8 code = insn->code;
  183. const u8 dst = bpf2a64[insn->dst_reg];
  184. const u8 src = bpf2a64[insn->src_reg];
  185. const u8 tmp = bpf2a64[TMP_REG_1];
  186. const u8 tmp2 = bpf2a64[TMP_REG_2];
  187. const s16 off = insn->off;
  188. const s32 imm = insn->imm;
  189. const int i = insn - ctx->prog->insnsi;
  190. const bool is64 = BPF_CLASS(code) == BPF_ALU64;
  191. u8 jmp_cond;
  192. s32 jmp_offset;
  193. switch (code) {
  194. /* dst = src */
  195. case BPF_ALU | BPF_MOV | BPF_X:
  196. case BPF_ALU64 | BPF_MOV | BPF_X:
  197. emit(A64_MOV(is64, dst, src), ctx);
  198. break;
  199. /* dst = dst OP src */
  200. case BPF_ALU | BPF_ADD | BPF_X:
  201. case BPF_ALU64 | BPF_ADD | BPF_X:
  202. emit(A64_ADD(is64, dst, dst, src), ctx);
  203. break;
  204. case BPF_ALU | BPF_SUB | BPF_X:
  205. case BPF_ALU64 | BPF_SUB | BPF_X:
  206. emit(A64_SUB(is64, dst, dst, src), ctx);
  207. break;
  208. case BPF_ALU | BPF_AND | BPF_X:
  209. case BPF_ALU64 | BPF_AND | BPF_X:
  210. emit(A64_AND(is64, dst, dst, src), ctx);
  211. break;
  212. case BPF_ALU | BPF_OR | BPF_X:
  213. case BPF_ALU64 | BPF_OR | BPF_X:
  214. emit(A64_ORR(is64, dst, dst, src), ctx);
  215. break;
  216. case BPF_ALU | BPF_XOR | BPF_X:
  217. case BPF_ALU64 | BPF_XOR | BPF_X:
  218. emit(A64_EOR(is64, dst, dst, src), ctx);
  219. break;
  220. case BPF_ALU | BPF_MUL | BPF_X:
  221. case BPF_ALU64 | BPF_MUL | BPF_X:
  222. emit(A64_MUL(is64, dst, dst, src), ctx);
  223. break;
  224. case BPF_ALU | BPF_DIV | BPF_X:
  225. case BPF_ALU64 | BPF_DIV | BPF_X:
  226. emit(A64_UDIV(is64, dst, dst, src), ctx);
  227. break;
  228. case BPF_ALU | BPF_MOD | BPF_X:
  229. case BPF_ALU64 | BPF_MOD | BPF_X:
  230. ctx->tmp_used = 1;
  231. emit(A64_UDIV(is64, tmp, dst, src), ctx);
  232. emit(A64_MUL(is64, tmp, tmp, src), ctx);
  233. emit(A64_SUB(is64, dst, dst, tmp), ctx);
  234. break;
  235. case BPF_ALU | BPF_LSH | BPF_X:
  236. case BPF_ALU64 | BPF_LSH | BPF_X:
  237. emit(A64_LSLV(is64, dst, dst, src), ctx);
  238. break;
  239. case BPF_ALU | BPF_RSH | BPF_X:
  240. case BPF_ALU64 | BPF_RSH | BPF_X:
  241. emit(A64_LSRV(is64, dst, dst, src), ctx);
  242. break;
  243. case BPF_ALU | BPF_ARSH | BPF_X:
  244. case BPF_ALU64 | BPF_ARSH | BPF_X:
  245. emit(A64_ASRV(is64, dst, dst, src), ctx);
  246. break;
  247. /* dst = -dst */
  248. case BPF_ALU | BPF_NEG:
  249. case BPF_ALU64 | BPF_NEG:
  250. emit(A64_NEG(is64, dst, dst), ctx);
  251. break;
  252. /* dst = BSWAP##imm(dst) */
  253. case BPF_ALU | BPF_END | BPF_FROM_LE:
  254. case BPF_ALU | BPF_END | BPF_FROM_BE:
  255. #ifdef CONFIG_CPU_BIG_ENDIAN
  256. if (BPF_SRC(code) == BPF_FROM_BE)
  257. goto emit_bswap_uxt;
  258. #else /* !CONFIG_CPU_BIG_ENDIAN */
  259. if (BPF_SRC(code) == BPF_FROM_LE)
  260. goto emit_bswap_uxt;
  261. #endif
  262. switch (imm) {
  263. case 16:
  264. emit(A64_REV16(is64, dst, dst), ctx);
  265. /* zero-extend 16 bits into 64 bits */
  266. emit(A64_UXTH(is64, dst, dst), ctx);
  267. break;
  268. case 32:
  269. emit(A64_REV32(is64, dst, dst), ctx);
  270. /* upper 32 bits already cleared */
  271. break;
  272. case 64:
  273. emit(A64_REV64(dst, dst), ctx);
  274. break;
  275. }
  276. break;
  277. emit_bswap_uxt:
  278. switch (imm) {
  279. case 16:
  280. /* zero-extend 16 bits into 64 bits */
  281. emit(A64_UXTH(is64, dst, dst), ctx);
  282. break;
  283. case 32:
  284. /* zero-extend 32 bits into 64 bits */
  285. emit(A64_UXTW(is64, dst, dst), ctx);
  286. break;
  287. case 64:
  288. /* nop */
  289. break;
  290. }
  291. break;
  292. /* dst = imm */
  293. case BPF_ALU | BPF_MOV | BPF_K:
  294. case BPF_ALU64 | BPF_MOV | BPF_K:
  295. emit_a64_mov_i(is64, dst, imm, ctx);
  296. break;
  297. /* dst = dst OP imm */
  298. case BPF_ALU | BPF_ADD | BPF_K:
  299. case BPF_ALU64 | BPF_ADD | BPF_K:
  300. ctx->tmp_used = 1;
  301. emit_a64_mov_i(is64, tmp, imm, ctx);
  302. emit(A64_ADD(is64, dst, dst, tmp), ctx);
  303. break;
  304. case BPF_ALU | BPF_SUB | BPF_K:
  305. case BPF_ALU64 | BPF_SUB | BPF_K:
  306. ctx->tmp_used = 1;
  307. emit_a64_mov_i(is64, tmp, imm, ctx);
  308. emit(A64_SUB(is64, dst, dst, tmp), ctx);
  309. break;
  310. case BPF_ALU | BPF_AND | BPF_K:
  311. case BPF_ALU64 | BPF_AND | BPF_K:
  312. ctx->tmp_used = 1;
  313. emit_a64_mov_i(is64, tmp, imm, ctx);
  314. emit(A64_AND(is64, dst, dst, tmp), ctx);
  315. break;
  316. case BPF_ALU | BPF_OR | BPF_K:
  317. case BPF_ALU64 | BPF_OR | BPF_K:
  318. ctx->tmp_used = 1;
  319. emit_a64_mov_i(is64, tmp, imm, ctx);
  320. emit(A64_ORR(is64, dst, dst, tmp), ctx);
  321. break;
  322. case BPF_ALU | BPF_XOR | BPF_K:
  323. case BPF_ALU64 | BPF_XOR | BPF_K:
  324. ctx->tmp_used = 1;
  325. emit_a64_mov_i(is64, tmp, imm, ctx);
  326. emit(A64_EOR(is64, dst, dst, tmp), ctx);
  327. break;
  328. case BPF_ALU | BPF_MUL | BPF_K:
  329. case BPF_ALU64 | BPF_MUL | BPF_K:
  330. ctx->tmp_used = 1;
  331. emit_a64_mov_i(is64, tmp, imm, ctx);
  332. emit(A64_MUL(is64, dst, dst, tmp), ctx);
  333. break;
  334. case BPF_ALU | BPF_DIV | BPF_K:
  335. case BPF_ALU64 | BPF_DIV | BPF_K:
  336. ctx->tmp_used = 1;
  337. emit_a64_mov_i(is64, tmp, imm, ctx);
  338. emit(A64_UDIV(is64, dst, dst, tmp), ctx);
  339. break;
  340. case BPF_ALU | BPF_MOD | BPF_K:
  341. case BPF_ALU64 | BPF_MOD | BPF_K:
  342. ctx->tmp_used = 1;
  343. emit_a64_mov_i(is64, tmp2, imm, ctx);
  344. emit(A64_UDIV(is64, tmp, dst, tmp2), ctx);
  345. emit(A64_MUL(is64, tmp, tmp, tmp2), ctx);
  346. emit(A64_SUB(is64, dst, dst, tmp), ctx);
  347. break;
  348. case BPF_ALU | BPF_LSH | BPF_K:
  349. case BPF_ALU64 | BPF_LSH | BPF_K:
  350. emit(A64_LSL(is64, dst, dst, imm), ctx);
  351. break;
  352. case BPF_ALU | BPF_RSH | BPF_K:
  353. case BPF_ALU64 | BPF_RSH | BPF_K:
  354. emit(A64_LSR(is64, dst, dst, imm), ctx);
  355. break;
  356. case BPF_ALU | BPF_ARSH | BPF_K:
  357. case BPF_ALU64 | BPF_ARSH | BPF_K:
  358. emit(A64_ASR(is64, dst, dst, imm), ctx);
  359. break;
  360. #define check_imm(bits, imm) do { \
  361. if ((((imm) > 0) && ((imm) >> (bits))) || \
  362. (((imm) < 0) && (~(imm) >> (bits)))) { \
  363. pr_info("[%2d] imm=%d(0x%x) out of range\n", \
  364. i, imm, imm); \
  365. return -EINVAL; \
  366. } \
  367. } while (0)
  368. #define check_imm19(imm) check_imm(19, imm)
  369. #define check_imm26(imm) check_imm(26, imm)
  370. /* JUMP off */
  371. case BPF_JMP | BPF_JA:
  372. jmp_offset = bpf2a64_offset(i + off, i, ctx);
  373. check_imm26(jmp_offset);
  374. emit(A64_B(jmp_offset), ctx);
  375. break;
  376. /* IF (dst COND src) JUMP off */
  377. case BPF_JMP | BPF_JEQ | BPF_X:
  378. case BPF_JMP | BPF_JGT | BPF_X:
  379. case BPF_JMP | BPF_JGE | BPF_X:
  380. case BPF_JMP | BPF_JNE | BPF_X:
  381. case BPF_JMP | BPF_JSGT | BPF_X:
  382. case BPF_JMP | BPF_JSGE | BPF_X:
  383. emit(A64_CMP(1, dst, src), ctx);
  384. emit_cond_jmp:
  385. jmp_offset = bpf2a64_offset(i + off, i, ctx);
  386. check_imm19(jmp_offset);
  387. switch (BPF_OP(code)) {
  388. case BPF_JEQ:
  389. jmp_cond = A64_COND_EQ;
  390. break;
  391. case BPF_JGT:
  392. jmp_cond = A64_COND_HI;
  393. break;
  394. case BPF_JGE:
  395. jmp_cond = A64_COND_CS;
  396. break;
  397. case BPF_JNE:
  398. jmp_cond = A64_COND_NE;
  399. break;
  400. case BPF_JSGT:
  401. jmp_cond = A64_COND_GT;
  402. break;
  403. case BPF_JSGE:
  404. jmp_cond = A64_COND_GE;
  405. break;
  406. default:
  407. return -EFAULT;
  408. }
  409. emit(A64_B_(jmp_cond, jmp_offset), ctx);
  410. break;
  411. case BPF_JMP | BPF_JSET | BPF_X:
  412. emit(A64_TST(1, dst, src), ctx);
  413. goto emit_cond_jmp;
  414. /* IF (dst COND imm) JUMP off */
  415. case BPF_JMP | BPF_JEQ | BPF_K:
  416. case BPF_JMP | BPF_JGT | BPF_K:
  417. case BPF_JMP | BPF_JGE | BPF_K:
  418. case BPF_JMP | BPF_JNE | BPF_K:
  419. case BPF_JMP | BPF_JSGT | BPF_K:
  420. case BPF_JMP | BPF_JSGE | BPF_K:
  421. ctx->tmp_used = 1;
  422. emit_a64_mov_i(1, tmp, imm, ctx);
  423. emit(A64_CMP(1, dst, tmp), ctx);
  424. goto emit_cond_jmp;
  425. case BPF_JMP | BPF_JSET | BPF_K:
  426. ctx->tmp_used = 1;
  427. emit_a64_mov_i(1, tmp, imm, ctx);
  428. emit(A64_TST(1, dst, tmp), ctx);
  429. goto emit_cond_jmp;
  430. /* function call */
  431. case BPF_JMP | BPF_CALL:
  432. {
  433. const u8 r0 = bpf2a64[BPF_REG_0];
  434. const u64 func = (u64)__bpf_call_base + imm;
  435. ctx->tmp_used = 1;
  436. emit_a64_mov_i64(tmp, func, ctx);
  437. emit(A64_PUSH(A64_FP, A64_LR, A64_SP), ctx);
  438. emit(A64_MOV(1, A64_FP, A64_SP), ctx);
  439. emit(A64_BLR(tmp), ctx);
  440. emit(A64_MOV(1, r0, A64_R(0)), ctx);
  441. emit(A64_POP(A64_FP, A64_LR, A64_SP), ctx);
  442. break;
  443. }
  444. /* function return */
  445. case BPF_JMP | BPF_EXIT:
  446. /* Optimization: when last instruction is EXIT,
  447. simply fallthrough to epilogue. */
  448. if (i == ctx->prog->len - 1)
  449. break;
  450. jmp_offset = epilogue_offset(ctx);
  451. check_imm26(jmp_offset);
  452. emit(A64_B(jmp_offset), ctx);
  453. break;
  454. /* dst = imm64 */
  455. case BPF_LD | BPF_IMM | BPF_DW:
  456. {
  457. const struct bpf_insn insn1 = insn[1];
  458. u64 imm64;
  459. if (insn1.code != 0 || insn1.src_reg != 0 ||
  460. insn1.dst_reg != 0 || insn1.off != 0) {
  461. /* Note: verifier in BPF core must catch invalid
  462. * instructions.
  463. */
  464. pr_err_once("Invalid BPF_LD_IMM64 instruction\n");
  465. return -EINVAL;
  466. }
  467. imm64 = (u64)insn1.imm << 32 | (u32)imm;
  468. emit_a64_mov_i64(dst, imm64, ctx);
  469. return 1;
  470. }
  471. /* LDX: dst = *(size *)(src + off) */
  472. case BPF_LDX | BPF_MEM | BPF_W:
  473. case BPF_LDX | BPF_MEM | BPF_H:
  474. case BPF_LDX | BPF_MEM | BPF_B:
  475. case BPF_LDX | BPF_MEM | BPF_DW:
  476. ctx->tmp_used = 1;
  477. emit_a64_mov_i(1, tmp, off, ctx);
  478. switch (BPF_SIZE(code)) {
  479. case BPF_W:
  480. emit(A64_LDR32(dst, src, tmp), ctx);
  481. break;
  482. case BPF_H:
  483. emit(A64_LDRH(dst, src, tmp), ctx);
  484. break;
  485. case BPF_B:
  486. emit(A64_LDRB(dst, src, tmp), ctx);
  487. break;
  488. case BPF_DW:
  489. emit(A64_LDR64(dst, src, tmp), ctx);
  490. break;
  491. }
  492. break;
  493. /* ST: *(size *)(dst + off) = imm */
  494. case BPF_ST | BPF_MEM | BPF_W:
  495. case BPF_ST | BPF_MEM | BPF_H:
  496. case BPF_ST | BPF_MEM | BPF_B:
  497. case BPF_ST | BPF_MEM | BPF_DW:
  498. goto notyet;
  499. /* STX: *(size *)(dst + off) = src */
  500. case BPF_STX | BPF_MEM | BPF_W:
  501. case BPF_STX | BPF_MEM | BPF_H:
  502. case BPF_STX | BPF_MEM | BPF_B:
  503. case BPF_STX | BPF_MEM | BPF_DW:
  504. ctx->tmp_used = 1;
  505. emit_a64_mov_i(1, tmp, off, ctx);
  506. switch (BPF_SIZE(code)) {
  507. case BPF_W:
  508. emit(A64_STR32(src, dst, tmp), ctx);
  509. break;
  510. case BPF_H:
  511. emit(A64_STRH(src, dst, tmp), ctx);
  512. break;
  513. case BPF_B:
  514. emit(A64_STRB(src, dst, tmp), ctx);
  515. break;
  516. case BPF_DW:
  517. emit(A64_STR64(src, dst, tmp), ctx);
  518. break;
  519. }
  520. break;
  521. /* STX XADD: lock *(u32 *)(dst + off) += src */
  522. case BPF_STX | BPF_XADD | BPF_W:
  523. /* STX XADD: lock *(u64 *)(dst + off) += src */
  524. case BPF_STX | BPF_XADD | BPF_DW:
  525. goto notyet;
  526. /* R0 = ntohx(*(size *)(((struct sk_buff *)R6)->data + imm)) */
  527. case BPF_LD | BPF_ABS | BPF_W:
  528. case BPF_LD | BPF_ABS | BPF_H:
  529. case BPF_LD | BPF_ABS | BPF_B:
  530. /* R0 = ntohx(*(size *)(((struct sk_buff *)R6)->data + src + imm)) */
  531. case BPF_LD | BPF_IND | BPF_W:
  532. case BPF_LD | BPF_IND | BPF_H:
  533. case BPF_LD | BPF_IND | BPF_B:
  534. {
  535. const u8 r0 = bpf2a64[BPF_REG_0]; /* r0 = return value */
  536. const u8 r6 = bpf2a64[BPF_REG_6]; /* r6 = pointer to sk_buff */
  537. const u8 fp = bpf2a64[BPF_REG_FP];
  538. const u8 r1 = bpf2a64[BPF_REG_1]; /* r1: struct sk_buff *skb */
  539. const u8 r2 = bpf2a64[BPF_REG_2]; /* r2: int k */
  540. const u8 r3 = bpf2a64[BPF_REG_3]; /* r3: unsigned int size */
  541. const u8 r4 = bpf2a64[BPF_REG_4]; /* r4: void *buffer */
  542. const u8 r5 = bpf2a64[BPF_REG_5]; /* r5: void *(*func)(...) */
  543. int size;
  544. emit(A64_MOV(1, r1, r6), ctx);
  545. emit_a64_mov_i(0, r2, imm, ctx);
  546. if (BPF_MODE(code) == BPF_IND)
  547. emit(A64_ADD(0, r2, r2, src), ctx);
  548. switch (BPF_SIZE(code)) {
  549. case BPF_W:
  550. size = 4;
  551. break;
  552. case BPF_H:
  553. size = 2;
  554. break;
  555. case BPF_B:
  556. size = 1;
  557. break;
  558. default:
  559. return -EINVAL;
  560. }
  561. emit_a64_mov_i64(r3, size, ctx);
  562. emit(A64_ADD_I(1, r4, fp, MAX_BPF_STACK), ctx);
  563. emit_a64_mov_i64(r5, (unsigned long)bpf_load_pointer, ctx);
  564. emit(A64_PUSH(A64_FP, A64_LR, A64_SP), ctx);
  565. emit(A64_MOV(1, A64_FP, A64_SP), ctx);
  566. emit(A64_BLR(r5), ctx);
  567. emit(A64_MOV(1, r0, A64_R(0)), ctx);
  568. emit(A64_POP(A64_FP, A64_LR, A64_SP), ctx);
  569. jmp_offset = epilogue_offset(ctx);
  570. check_imm19(jmp_offset);
  571. emit(A64_CBZ(1, r0, jmp_offset), ctx);
  572. emit(A64_MOV(1, r5, r0), ctx);
  573. switch (BPF_SIZE(code)) {
  574. case BPF_W:
  575. emit(A64_LDR32(r0, r5, A64_ZR), ctx);
  576. #ifndef CONFIG_CPU_BIG_ENDIAN
  577. emit(A64_REV32(0, r0, r0), ctx);
  578. #endif
  579. break;
  580. case BPF_H:
  581. emit(A64_LDRH(r0, r5, A64_ZR), ctx);
  582. #ifndef CONFIG_CPU_BIG_ENDIAN
  583. emit(A64_REV16(0, r0, r0), ctx);
  584. #endif
  585. break;
  586. case BPF_B:
  587. emit(A64_LDRB(r0, r5, A64_ZR), ctx);
  588. break;
  589. }
  590. break;
  591. }
  592. notyet:
  593. pr_info_once("*** NOT YET: opcode %02x ***\n", code);
  594. return -EFAULT;
  595. default:
  596. pr_err_once("unknown opcode %02x\n", code);
  597. return -EINVAL;
  598. }
  599. return 0;
  600. }
  601. static int build_body(struct jit_ctx *ctx)
  602. {
  603. const struct bpf_prog *prog = ctx->prog;
  604. int i;
  605. for (i = 0; i < prog->len; i++) {
  606. const struct bpf_insn *insn = &prog->insnsi[i];
  607. int ret;
  608. ret = build_insn(insn, ctx);
  609. if (ctx->image == NULL)
  610. ctx->offset[i] = ctx->idx;
  611. if (ret > 0) {
  612. i++;
  613. continue;
  614. }
  615. if (ret)
  616. return ret;
  617. }
  618. return 0;
  619. }
  620. static inline void bpf_flush_icache(void *start, void *end)
  621. {
  622. flush_icache_range((unsigned long)start, (unsigned long)end);
  623. }
  624. void bpf_jit_compile(struct bpf_prog *prog)
  625. {
  626. /* Nothing to do here. We support Internal BPF. */
  627. }
  628. void bpf_int_jit_compile(struct bpf_prog *prog)
  629. {
  630. struct bpf_binary_header *header;
  631. struct jit_ctx ctx;
  632. int image_size;
  633. u8 *image_ptr;
  634. if (!bpf_jit_enable)
  635. return;
  636. if (!prog || !prog->len)
  637. return;
  638. memset(&ctx, 0, sizeof(ctx));
  639. ctx.prog = prog;
  640. ctx.offset = kcalloc(prog->len, sizeof(int), GFP_KERNEL);
  641. if (ctx.offset == NULL)
  642. return;
  643. /* 1. Initial fake pass to compute ctx->idx. */
  644. /* Fake pass to fill in ctx->offset and ctx->tmp_used. */
  645. if (build_body(&ctx))
  646. goto out;
  647. build_prologue(&ctx);
  648. ctx.epilogue_offset = ctx.idx;
  649. build_epilogue(&ctx);
  650. /* Now we know the actual image size. */
  651. image_size = sizeof(u32) * ctx.idx;
  652. header = bpf_jit_binary_alloc(image_size, &image_ptr,
  653. sizeof(u32), jit_fill_hole);
  654. if (header == NULL)
  655. goto out;
  656. /* 2. Now, the actual pass. */
  657. ctx.image = (u32 *)image_ptr;
  658. ctx.idx = 0;
  659. build_prologue(&ctx);
  660. if (build_body(&ctx)) {
  661. bpf_jit_binary_free(header);
  662. goto out;
  663. }
  664. build_epilogue(&ctx);
  665. /* And we're done. */
  666. if (bpf_jit_enable > 1)
  667. bpf_jit_dump(prog->len, image_size, 2, ctx.image);
  668. bpf_flush_icache(ctx.image, ctx.image + ctx.idx);
  669. set_memory_ro((unsigned long)header, header->pages);
  670. prog->bpf_func = (void *)ctx.image;
  671. prog->jited = true;
  672. out:
  673. kfree(ctx.offset);
  674. }
  675. void bpf_jit_free(struct bpf_prog *prog)
  676. {
  677. unsigned long addr = (unsigned long)prog->bpf_func & PAGE_MASK;
  678. struct bpf_binary_header *header = (void *)addr;
  679. if (!prog->jited)
  680. goto free_filter;
  681. set_memory_rw(addr, header->pages);
  682. bpf_jit_binary_free(header);
  683. free_filter:
  684. bpf_prog_unlock_free(prog);
  685. }