bpf_jit_32.c 26 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069
  1. /*
  2. * Just-In-Time compiler for BPF filters on 32bit ARM
  3. *
  4. * Copyright (c) 2011 Mircea Gherzan <mgherzan@gmail.com>
  5. *
  6. * This program is free software; you can redistribute it and/or modify it
  7. * under the terms of the GNU General Public License as published by the
  8. * Free Software Foundation; version 2 of the License.
  9. */
  10. #include <linux/bitops.h>
  11. #include <linux/compiler.h>
  12. #include <linux/errno.h>
  13. #include <linux/filter.h>
  14. #include <linux/netdevice.h>
  15. #include <linux/string.h>
  16. #include <linux/slab.h>
  17. #include <linux/if_vlan.h>
  18. #include <asm/cacheflush.h>
  19. #include <asm/hwcap.h>
  20. #include <asm/opcodes.h>
  21. #include "bpf_jit_32.h"
  22. /*
  23. * ABI:
  24. *
  25. * r0 scratch register
  26. * r4 BPF register A
  27. * r5 BPF register X
  28. * r6 pointer to the skb
  29. * r7 skb->data
  30. * r8 skb_headlen(skb)
  31. */
  32. #define r_scratch ARM_R0
  33. /* r1-r3 are (also) used for the unaligned loads on the non-ARMv7 slowpath */
  34. #define r_off ARM_R1
  35. #define r_A ARM_R4
  36. #define r_X ARM_R5
  37. #define r_skb ARM_R6
  38. #define r_skb_data ARM_R7
  39. #define r_skb_hl ARM_R8
  40. #define SCRATCH_SP_OFFSET 0
  41. #define SCRATCH_OFF(k) (SCRATCH_SP_OFFSET + 4 * (k))
  42. #define SEEN_MEM ((1 << BPF_MEMWORDS) - 1)
  43. #define SEEN_MEM_WORD(k) (1 << (k))
  44. #define SEEN_X (1 << BPF_MEMWORDS)
  45. #define SEEN_CALL (1 << (BPF_MEMWORDS + 1))
  46. #define SEEN_SKB (1 << (BPF_MEMWORDS + 2))
  47. #define SEEN_DATA (1 << (BPF_MEMWORDS + 3))
  48. #define FLAG_NEED_X_RESET (1 << 0)
  49. #define FLAG_IMM_OVERFLOW (1 << 1)
  50. struct jit_ctx {
  51. const struct bpf_prog *skf;
  52. unsigned idx;
  53. unsigned prologue_bytes;
  54. int ret0_fp_idx;
  55. u32 seen;
  56. u32 flags;
  57. u32 *offsets;
  58. u32 *target;
  59. #if __LINUX_ARM_ARCH__ < 7
  60. u16 epilogue_bytes;
  61. u16 imm_count;
  62. u32 *imms;
  63. #endif
  64. };
  65. int bpf_jit_enable __read_mostly;
  66. static inline int call_neg_helper(struct sk_buff *skb, int offset, void *ret,
  67. unsigned int size)
  68. {
  69. void *ptr = bpf_internal_load_pointer_neg_helper(skb, offset, size);
  70. if (!ptr)
  71. return -EFAULT;
  72. memcpy(ret, ptr, size);
  73. return 0;
  74. }
  75. static u64 jit_get_skb_b(struct sk_buff *skb, int offset)
  76. {
  77. u8 ret;
  78. int err;
  79. if (offset < 0)
  80. err = call_neg_helper(skb, offset, &ret, 1);
  81. else
  82. err = skb_copy_bits(skb, offset, &ret, 1);
  83. return (u64)err << 32 | ret;
  84. }
  85. static u64 jit_get_skb_h(struct sk_buff *skb, int offset)
  86. {
  87. u16 ret;
  88. int err;
  89. if (offset < 0)
  90. err = call_neg_helper(skb, offset, &ret, 2);
  91. else
  92. err = skb_copy_bits(skb, offset, &ret, 2);
  93. return (u64)err << 32 | ntohs(ret);
  94. }
  95. static u64 jit_get_skb_w(struct sk_buff *skb, int offset)
  96. {
  97. u32 ret;
  98. int err;
  99. if (offset < 0)
  100. err = call_neg_helper(skb, offset, &ret, 4);
  101. else
  102. err = skb_copy_bits(skb, offset, &ret, 4);
  103. return (u64)err << 32 | ntohl(ret);
  104. }
  105. /*
  106. * Wrapper that handles both OABI and EABI and assures Thumb2 interworking
  107. * (where the assembly routines like __aeabi_uidiv could cause problems).
  108. */
  109. static u32 jit_udiv(u32 dividend, u32 divisor)
  110. {
  111. return dividend / divisor;
  112. }
  113. static inline void _emit(int cond, u32 inst, struct jit_ctx *ctx)
  114. {
  115. inst |= (cond << 28);
  116. inst = __opcode_to_mem_arm(inst);
  117. if (ctx->target != NULL)
  118. ctx->target[ctx->idx] = inst;
  119. ctx->idx++;
  120. }
  121. /*
  122. * Emit an instruction that will be executed unconditionally.
  123. */
  124. static inline void emit(u32 inst, struct jit_ctx *ctx)
  125. {
  126. _emit(ARM_COND_AL, inst, ctx);
  127. }
  128. static u16 saved_regs(struct jit_ctx *ctx)
  129. {
  130. u16 ret = 0;
  131. if ((ctx->skf->len > 1) ||
  132. (ctx->skf->insns[0].code == (BPF_RET | BPF_A)))
  133. ret |= 1 << r_A;
  134. #ifdef CONFIG_FRAME_POINTER
  135. ret |= (1 << ARM_FP) | (1 << ARM_IP) | (1 << ARM_LR) | (1 << ARM_PC);
  136. #else
  137. if (ctx->seen & SEEN_CALL)
  138. ret |= 1 << ARM_LR;
  139. #endif
  140. if (ctx->seen & (SEEN_DATA | SEEN_SKB))
  141. ret |= 1 << r_skb;
  142. if (ctx->seen & SEEN_DATA)
  143. ret |= (1 << r_skb_data) | (1 << r_skb_hl);
  144. if (ctx->seen & SEEN_X)
  145. ret |= 1 << r_X;
  146. return ret;
  147. }
  148. static inline int mem_words_used(struct jit_ctx *ctx)
  149. {
  150. /* yes, we do waste some stack space IF there are "holes" in the set" */
  151. return fls(ctx->seen & SEEN_MEM);
  152. }
  153. static inline bool is_load_to_a(u16 inst)
  154. {
  155. switch (inst) {
  156. case BPF_LD | BPF_W | BPF_LEN:
  157. case BPF_LD | BPF_W | BPF_ABS:
  158. case BPF_LD | BPF_H | BPF_ABS:
  159. case BPF_LD | BPF_B | BPF_ABS:
  160. return true;
  161. default:
  162. return false;
  163. }
  164. }
  165. static void jit_fill_hole(void *area, unsigned int size)
  166. {
  167. u32 *ptr;
  168. /* We are guaranteed to have aligned memory. */
  169. for (ptr = area; size >= sizeof(u32); size -= sizeof(u32))
  170. *ptr++ = __opcode_to_mem_arm(ARM_INST_UDF);
  171. }
  172. static void build_prologue(struct jit_ctx *ctx)
  173. {
  174. u16 reg_set = saved_regs(ctx);
  175. u16 first_inst = ctx->skf->insns[0].code;
  176. u16 off;
  177. #ifdef CONFIG_FRAME_POINTER
  178. emit(ARM_MOV_R(ARM_IP, ARM_SP), ctx);
  179. emit(ARM_PUSH(reg_set), ctx);
  180. emit(ARM_SUB_I(ARM_FP, ARM_IP, 4), ctx);
  181. #else
  182. if (reg_set)
  183. emit(ARM_PUSH(reg_set), ctx);
  184. #endif
  185. if (ctx->seen & (SEEN_DATA | SEEN_SKB))
  186. emit(ARM_MOV_R(r_skb, ARM_R0), ctx);
  187. if (ctx->seen & SEEN_DATA) {
  188. off = offsetof(struct sk_buff, data);
  189. emit(ARM_LDR_I(r_skb_data, r_skb, off), ctx);
  190. /* headlen = len - data_len */
  191. off = offsetof(struct sk_buff, len);
  192. emit(ARM_LDR_I(r_skb_hl, r_skb, off), ctx);
  193. off = offsetof(struct sk_buff, data_len);
  194. emit(ARM_LDR_I(r_scratch, r_skb, off), ctx);
  195. emit(ARM_SUB_R(r_skb_hl, r_skb_hl, r_scratch), ctx);
  196. }
  197. if (ctx->flags & FLAG_NEED_X_RESET)
  198. emit(ARM_MOV_I(r_X, 0), ctx);
  199. /* do not leak kernel data to userspace */
  200. if ((first_inst != (BPF_RET | BPF_K)) && !(is_load_to_a(first_inst)))
  201. emit(ARM_MOV_I(r_A, 0), ctx);
  202. /* stack space for the BPF_MEM words */
  203. if (ctx->seen & SEEN_MEM)
  204. emit(ARM_SUB_I(ARM_SP, ARM_SP, mem_words_used(ctx) * 4), ctx);
  205. }
  206. static void build_epilogue(struct jit_ctx *ctx)
  207. {
  208. u16 reg_set = saved_regs(ctx);
  209. if (ctx->seen & SEEN_MEM)
  210. emit(ARM_ADD_I(ARM_SP, ARM_SP, mem_words_used(ctx) * 4), ctx);
  211. reg_set &= ~(1 << ARM_LR);
  212. #ifdef CONFIG_FRAME_POINTER
  213. /* the first instruction of the prologue was: mov ip, sp */
  214. reg_set &= ~(1 << ARM_IP);
  215. reg_set |= (1 << ARM_SP);
  216. emit(ARM_LDM(ARM_SP, reg_set), ctx);
  217. #else
  218. if (reg_set) {
  219. if (ctx->seen & SEEN_CALL)
  220. reg_set |= 1 << ARM_PC;
  221. emit(ARM_POP(reg_set), ctx);
  222. }
  223. if (!(ctx->seen & SEEN_CALL))
  224. emit(ARM_BX(ARM_LR), ctx);
  225. #endif
  226. }
  227. static int16_t imm8m(u32 x)
  228. {
  229. u32 rot;
  230. for (rot = 0; rot < 16; rot++)
  231. if ((x & ~ror32(0xff, 2 * rot)) == 0)
  232. return rol32(x, 2 * rot) | (rot << 8);
  233. return -1;
  234. }
  235. #if __LINUX_ARM_ARCH__ < 7
  236. static u16 imm_offset(u32 k, struct jit_ctx *ctx)
  237. {
  238. unsigned i = 0, offset;
  239. u16 imm;
  240. /* on the "fake" run we just count them (duplicates included) */
  241. if (ctx->target == NULL) {
  242. ctx->imm_count++;
  243. return 0;
  244. }
  245. while ((i < ctx->imm_count) && ctx->imms[i]) {
  246. if (ctx->imms[i] == k)
  247. break;
  248. i++;
  249. }
  250. if (ctx->imms[i] == 0)
  251. ctx->imms[i] = k;
  252. /* constants go just after the epilogue */
  253. offset = ctx->offsets[ctx->skf->len];
  254. offset += ctx->prologue_bytes;
  255. offset += ctx->epilogue_bytes;
  256. offset += i * 4;
  257. ctx->target[offset / 4] = k;
  258. /* PC in ARM mode == address of the instruction + 8 */
  259. imm = offset - (8 + ctx->idx * 4);
  260. if (imm & ~0xfff) {
  261. /*
  262. * literal pool is too far, signal it into flags. we
  263. * can only detect it on the second pass unfortunately.
  264. */
  265. ctx->flags |= FLAG_IMM_OVERFLOW;
  266. return 0;
  267. }
  268. return imm;
  269. }
  270. #endif /* __LINUX_ARM_ARCH__ */
  271. /*
  272. * Move an immediate that's not an imm8m to a core register.
  273. */
  274. static inline void emit_mov_i_no8m(int rd, u32 val, struct jit_ctx *ctx)
  275. {
  276. #if __LINUX_ARM_ARCH__ < 7
  277. emit(ARM_LDR_I(rd, ARM_PC, imm_offset(val, ctx)), ctx);
  278. #else
  279. emit(ARM_MOVW(rd, val & 0xffff), ctx);
  280. if (val > 0xffff)
  281. emit(ARM_MOVT(rd, val >> 16), ctx);
  282. #endif
  283. }
  284. static inline void emit_mov_i(int rd, u32 val, struct jit_ctx *ctx)
  285. {
  286. int imm12 = imm8m(val);
  287. if (imm12 >= 0)
  288. emit(ARM_MOV_I(rd, imm12), ctx);
  289. else
  290. emit_mov_i_no8m(rd, val, ctx);
  291. }
  292. #if __LINUX_ARM_ARCH__ < 6
  293. static void emit_load_be32(u8 cond, u8 r_res, u8 r_addr, struct jit_ctx *ctx)
  294. {
  295. _emit(cond, ARM_LDRB_I(ARM_R3, r_addr, 1), ctx);
  296. _emit(cond, ARM_LDRB_I(ARM_R1, r_addr, 0), ctx);
  297. _emit(cond, ARM_LDRB_I(ARM_R2, r_addr, 3), ctx);
  298. _emit(cond, ARM_LSL_I(ARM_R3, ARM_R3, 16), ctx);
  299. _emit(cond, ARM_LDRB_I(ARM_R0, r_addr, 2), ctx);
  300. _emit(cond, ARM_ORR_S(ARM_R3, ARM_R3, ARM_R1, SRTYPE_LSL, 24), ctx);
  301. _emit(cond, ARM_ORR_R(ARM_R3, ARM_R3, ARM_R2), ctx);
  302. _emit(cond, ARM_ORR_S(r_res, ARM_R3, ARM_R0, SRTYPE_LSL, 8), ctx);
  303. }
  304. static void emit_load_be16(u8 cond, u8 r_res, u8 r_addr, struct jit_ctx *ctx)
  305. {
  306. _emit(cond, ARM_LDRB_I(ARM_R1, r_addr, 0), ctx);
  307. _emit(cond, ARM_LDRB_I(ARM_R2, r_addr, 1), ctx);
  308. _emit(cond, ARM_ORR_S(r_res, ARM_R2, ARM_R1, SRTYPE_LSL, 8), ctx);
  309. }
  310. static inline void emit_swap16(u8 r_dst, u8 r_src, struct jit_ctx *ctx)
  311. {
  312. /* r_dst = (r_src << 8) | (r_src >> 8) */
  313. emit(ARM_LSL_I(ARM_R1, r_src, 8), ctx);
  314. emit(ARM_ORR_S(r_dst, ARM_R1, r_src, SRTYPE_LSR, 8), ctx);
  315. /*
  316. * we need to mask out the bits set in r_dst[23:16] due to
  317. * the first shift instruction.
  318. *
  319. * note that 0x8ff is the encoded immediate 0x00ff0000.
  320. */
  321. emit(ARM_BIC_I(r_dst, r_dst, 0x8ff), ctx);
  322. }
  323. #else /* ARMv6+ */
  324. static void emit_load_be32(u8 cond, u8 r_res, u8 r_addr, struct jit_ctx *ctx)
  325. {
  326. _emit(cond, ARM_LDR_I(r_res, r_addr, 0), ctx);
  327. #ifdef __LITTLE_ENDIAN
  328. _emit(cond, ARM_REV(r_res, r_res), ctx);
  329. #endif
  330. }
  331. static void emit_load_be16(u8 cond, u8 r_res, u8 r_addr, struct jit_ctx *ctx)
  332. {
  333. _emit(cond, ARM_LDRH_I(r_res, r_addr, 0), ctx);
  334. #ifdef __LITTLE_ENDIAN
  335. _emit(cond, ARM_REV16(r_res, r_res), ctx);
  336. #endif
  337. }
  338. static inline void emit_swap16(u8 r_dst __maybe_unused,
  339. u8 r_src __maybe_unused,
  340. struct jit_ctx *ctx __maybe_unused)
  341. {
  342. #ifdef __LITTLE_ENDIAN
  343. emit(ARM_REV16(r_dst, r_src), ctx);
  344. #endif
  345. }
  346. #endif /* __LINUX_ARM_ARCH__ < 6 */
  347. /* Compute the immediate value for a PC-relative branch. */
  348. static inline u32 b_imm(unsigned tgt, struct jit_ctx *ctx)
  349. {
  350. u32 imm;
  351. if (ctx->target == NULL)
  352. return 0;
  353. /*
  354. * BPF allows only forward jumps and the offset of the target is
  355. * still the one computed during the first pass.
  356. */
  357. imm = ctx->offsets[tgt] + ctx->prologue_bytes - (ctx->idx * 4 + 8);
  358. return imm >> 2;
  359. }
  360. #define OP_IMM3(op, r1, r2, imm_val, ctx) \
  361. do { \
  362. imm12 = imm8m(imm_val); \
  363. if (imm12 < 0) { \
  364. emit_mov_i_no8m(r_scratch, imm_val, ctx); \
  365. emit(op ## _R((r1), (r2), r_scratch), ctx); \
  366. } else { \
  367. emit(op ## _I((r1), (r2), imm12), ctx); \
  368. } \
  369. } while (0)
  370. static inline void emit_err_ret(u8 cond, struct jit_ctx *ctx)
  371. {
  372. if (ctx->ret0_fp_idx >= 0) {
  373. _emit(cond, ARM_B(b_imm(ctx->ret0_fp_idx, ctx)), ctx);
  374. /* NOP to keep the size constant between passes */
  375. emit(ARM_MOV_R(ARM_R0, ARM_R0), ctx);
  376. } else {
  377. _emit(cond, ARM_MOV_I(ARM_R0, 0), ctx);
  378. _emit(cond, ARM_B(b_imm(ctx->skf->len, ctx)), ctx);
  379. }
  380. }
  381. static inline void emit_blx_r(u8 tgt_reg, struct jit_ctx *ctx)
  382. {
  383. #if __LINUX_ARM_ARCH__ < 5
  384. emit(ARM_MOV_R(ARM_LR, ARM_PC), ctx);
  385. if (elf_hwcap & HWCAP_THUMB)
  386. emit(ARM_BX(tgt_reg), ctx);
  387. else
  388. emit(ARM_MOV_R(ARM_PC, tgt_reg), ctx);
  389. #else
  390. emit(ARM_BLX_R(tgt_reg), ctx);
  391. #endif
  392. }
  393. static inline void emit_udiv(u8 rd, u8 rm, u8 rn, struct jit_ctx *ctx)
  394. {
  395. #if __LINUX_ARM_ARCH__ == 7
  396. if (elf_hwcap & HWCAP_IDIVA) {
  397. emit(ARM_UDIV(rd, rm, rn), ctx);
  398. return;
  399. }
  400. #endif
  401. /*
  402. * For BPF_ALU | BPF_DIV | BPF_K instructions, rm is ARM_R4
  403. * (r_A) and rn is ARM_R0 (r_scratch) so load rn first into
  404. * ARM_R1 to avoid accidentally overwriting ARM_R0 with rm
  405. * before using it as a source for ARM_R1.
  406. *
  407. * For BPF_ALU | BPF_DIV | BPF_X rm is ARM_R4 (r_A) and rn is
  408. * ARM_R5 (r_X) so there is no particular register overlap
  409. * issues.
  410. */
  411. if (rn != ARM_R1)
  412. emit(ARM_MOV_R(ARM_R1, rn), ctx);
  413. if (rm != ARM_R0)
  414. emit(ARM_MOV_R(ARM_R0, rm), ctx);
  415. ctx->seen |= SEEN_CALL;
  416. emit_mov_i(ARM_R3, (u32)jit_udiv, ctx);
  417. emit_blx_r(ARM_R3, ctx);
  418. if (rd != ARM_R0)
  419. emit(ARM_MOV_R(rd, ARM_R0), ctx);
  420. }
  421. static inline void update_on_xread(struct jit_ctx *ctx)
  422. {
  423. if (!(ctx->seen & SEEN_X))
  424. ctx->flags |= FLAG_NEED_X_RESET;
  425. ctx->seen |= SEEN_X;
  426. }
  427. static int build_body(struct jit_ctx *ctx)
  428. {
  429. void *load_func[] = {jit_get_skb_b, jit_get_skb_h, jit_get_skb_w};
  430. const struct bpf_prog *prog = ctx->skf;
  431. const struct sock_filter *inst;
  432. unsigned i, load_order, off, condt;
  433. int imm12;
  434. u32 k;
  435. for (i = 0; i < prog->len; i++) {
  436. u16 code;
  437. inst = &(prog->insns[i]);
  438. /* K as an immediate value operand */
  439. k = inst->k;
  440. code = bpf_anc_helper(inst);
  441. /* compute offsets only in the fake pass */
  442. if (ctx->target == NULL)
  443. ctx->offsets[i] = ctx->idx * 4;
  444. switch (code) {
  445. case BPF_LD | BPF_IMM:
  446. emit_mov_i(r_A, k, ctx);
  447. break;
  448. case BPF_LD | BPF_W | BPF_LEN:
  449. ctx->seen |= SEEN_SKB;
  450. BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, len) != 4);
  451. emit(ARM_LDR_I(r_A, r_skb,
  452. offsetof(struct sk_buff, len)), ctx);
  453. break;
  454. case BPF_LD | BPF_MEM:
  455. /* A = scratch[k] */
  456. ctx->seen |= SEEN_MEM_WORD(k);
  457. emit(ARM_LDR_I(r_A, ARM_SP, SCRATCH_OFF(k)), ctx);
  458. break;
  459. case BPF_LD | BPF_W | BPF_ABS:
  460. load_order = 2;
  461. goto load;
  462. case BPF_LD | BPF_H | BPF_ABS:
  463. load_order = 1;
  464. goto load;
  465. case BPF_LD | BPF_B | BPF_ABS:
  466. load_order = 0;
  467. load:
  468. emit_mov_i(r_off, k, ctx);
  469. load_common:
  470. ctx->seen |= SEEN_DATA | SEEN_CALL;
  471. if (load_order > 0) {
  472. emit(ARM_SUB_I(r_scratch, r_skb_hl,
  473. 1 << load_order), ctx);
  474. emit(ARM_CMP_R(r_scratch, r_off), ctx);
  475. condt = ARM_COND_GE;
  476. } else {
  477. emit(ARM_CMP_R(r_skb_hl, r_off), ctx);
  478. condt = ARM_COND_HI;
  479. }
  480. /*
  481. * test for negative offset, only if we are
  482. * currently scheduled to take the fast
  483. * path. this will update the flags so that
  484. * the slowpath instruction are ignored if the
  485. * offset is negative.
  486. *
  487. * for loard_order == 0 the HI condition will
  488. * make loads at offset 0 take the slow path too.
  489. */
  490. _emit(condt, ARM_CMP_I(r_off, 0), ctx);
  491. _emit(condt, ARM_ADD_R(r_scratch, r_off, r_skb_data),
  492. ctx);
  493. if (load_order == 0)
  494. _emit(condt, ARM_LDRB_I(r_A, r_scratch, 0),
  495. ctx);
  496. else if (load_order == 1)
  497. emit_load_be16(condt, r_A, r_scratch, ctx);
  498. else if (load_order == 2)
  499. emit_load_be32(condt, r_A, r_scratch, ctx);
  500. _emit(condt, ARM_B(b_imm(i + 1, ctx)), ctx);
  501. /* the slowpath */
  502. emit_mov_i(ARM_R3, (u32)load_func[load_order], ctx);
  503. emit(ARM_MOV_R(ARM_R0, r_skb), ctx);
  504. /* the offset is already in R1 */
  505. emit_blx_r(ARM_R3, ctx);
  506. /* check the result of skb_copy_bits */
  507. emit(ARM_CMP_I(ARM_R1, 0), ctx);
  508. emit_err_ret(ARM_COND_NE, ctx);
  509. emit(ARM_MOV_R(r_A, ARM_R0), ctx);
  510. break;
  511. case BPF_LD | BPF_W | BPF_IND:
  512. load_order = 2;
  513. goto load_ind;
  514. case BPF_LD | BPF_H | BPF_IND:
  515. load_order = 1;
  516. goto load_ind;
  517. case BPF_LD | BPF_B | BPF_IND:
  518. load_order = 0;
  519. load_ind:
  520. OP_IMM3(ARM_ADD, r_off, r_X, k, ctx);
  521. goto load_common;
  522. case BPF_LDX | BPF_IMM:
  523. ctx->seen |= SEEN_X;
  524. emit_mov_i(r_X, k, ctx);
  525. break;
  526. case BPF_LDX | BPF_W | BPF_LEN:
  527. ctx->seen |= SEEN_X | SEEN_SKB;
  528. emit(ARM_LDR_I(r_X, r_skb,
  529. offsetof(struct sk_buff, len)), ctx);
  530. break;
  531. case BPF_LDX | BPF_MEM:
  532. ctx->seen |= SEEN_X | SEEN_MEM_WORD(k);
  533. emit(ARM_LDR_I(r_X, ARM_SP, SCRATCH_OFF(k)), ctx);
  534. break;
  535. case BPF_LDX | BPF_B | BPF_MSH:
  536. /* x = ((*(frame + k)) & 0xf) << 2; */
  537. ctx->seen |= SEEN_X | SEEN_DATA | SEEN_CALL;
  538. /* the interpreter should deal with the negative K */
  539. if ((int)k < 0)
  540. return -1;
  541. /* offset in r1: we might have to take the slow path */
  542. emit_mov_i(r_off, k, ctx);
  543. emit(ARM_CMP_R(r_skb_hl, r_off), ctx);
  544. /* load in r0: common with the slowpath */
  545. _emit(ARM_COND_HI, ARM_LDRB_R(ARM_R0, r_skb_data,
  546. ARM_R1), ctx);
  547. /*
  548. * emit_mov_i() might generate one or two instructions,
  549. * the same holds for emit_blx_r()
  550. */
  551. _emit(ARM_COND_HI, ARM_B(b_imm(i + 1, ctx) - 2), ctx);
  552. emit(ARM_MOV_R(ARM_R0, r_skb), ctx);
  553. /* r_off is r1 */
  554. emit_mov_i(ARM_R3, (u32)jit_get_skb_b, ctx);
  555. emit_blx_r(ARM_R3, ctx);
  556. /* check the return value of skb_copy_bits */
  557. emit(ARM_CMP_I(ARM_R1, 0), ctx);
  558. emit_err_ret(ARM_COND_NE, ctx);
  559. emit(ARM_AND_I(r_X, ARM_R0, 0x00f), ctx);
  560. emit(ARM_LSL_I(r_X, r_X, 2), ctx);
  561. break;
  562. case BPF_ST:
  563. ctx->seen |= SEEN_MEM_WORD(k);
  564. emit(ARM_STR_I(r_A, ARM_SP, SCRATCH_OFF(k)), ctx);
  565. break;
  566. case BPF_STX:
  567. update_on_xread(ctx);
  568. ctx->seen |= SEEN_MEM_WORD(k);
  569. emit(ARM_STR_I(r_X, ARM_SP, SCRATCH_OFF(k)), ctx);
  570. break;
  571. case BPF_ALU | BPF_ADD | BPF_K:
  572. /* A += K */
  573. OP_IMM3(ARM_ADD, r_A, r_A, k, ctx);
  574. break;
  575. case BPF_ALU | BPF_ADD | BPF_X:
  576. update_on_xread(ctx);
  577. emit(ARM_ADD_R(r_A, r_A, r_X), ctx);
  578. break;
  579. case BPF_ALU | BPF_SUB | BPF_K:
  580. /* A -= K */
  581. OP_IMM3(ARM_SUB, r_A, r_A, k, ctx);
  582. break;
  583. case BPF_ALU | BPF_SUB | BPF_X:
  584. update_on_xread(ctx);
  585. emit(ARM_SUB_R(r_A, r_A, r_X), ctx);
  586. break;
  587. case BPF_ALU | BPF_MUL | BPF_K:
  588. /* A *= K */
  589. emit_mov_i(r_scratch, k, ctx);
  590. emit(ARM_MUL(r_A, r_A, r_scratch), ctx);
  591. break;
  592. case BPF_ALU | BPF_MUL | BPF_X:
  593. update_on_xread(ctx);
  594. emit(ARM_MUL(r_A, r_A, r_X), ctx);
  595. break;
  596. case BPF_ALU | BPF_DIV | BPF_K:
  597. if (k == 1)
  598. break;
  599. emit_mov_i(r_scratch, k, ctx);
  600. emit_udiv(r_A, r_A, r_scratch, ctx);
  601. break;
  602. case BPF_ALU | BPF_DIV | BPF_X:
  603. update_on_xread(ctx);
  604. emit(ARM_CMP_I(r_X, 0), ctx);
  605. emit_err_ret(ARM_COND_EQ, ctx);
  606. emit_udiv(r_A, r_A, r_X, ctx);
  607. break;
  608. case BPF_ALU | BPF_OR | BPF_K:
  609. /* A |= K */
  610. OP_IMM3(ARM_ORR, r_A, r_A, k, ctx);
  611. break;
  612. case BPF_ALU | BPF_OR | BPF_X:
  613. update_on_xread(ctx);
  614. emit(ARM_ORR_R(r_A, r_A, r_X), ctx);
  615. break;
  616. case BPF_ALU | BPF_XOR | BPF_K:
  617. /* A ^= K; */
  618. OP_IMM3(ARM_EOR, r_A, r_A, k, ctx);
  619. break;
  620. case BPF_ANC | SKF_AD_ALU_XOR_X:
  621. case BPF_ALU | BPF_XOR | BPF_X:
  622. /* A ^= X */
  623. update_on_xread(ctx);
  624. emit(ARM_EOR_R(r_A, r_A, r_X), ctx);
  625. break;
  626. case BPF_ALU | BPF_AND | BPF_K:
  627. /* A &= K */
  628. OP_IMM3(ARM_AND, r_A, r_A, k, ctx);
  629. break;
  630. case BPF_ALU | BPF_AND | BPF_X:
  631. update_on_xread(ctx);
  632. emit(ARM_AND_R(r_A, r_A, r_X), ctx);
  633. break;
  634. case BPF_ALU | BPF_LSH | BPF_K:
  635. if (unlikely(k > 31))
  636. return -1;
  637. emit(ARM_LSL_I(r_A, r_A, k), ctx);
  638. break;
  639. case BPF_ALU | BPF_LSH | BPF_X:
  640. update_on_xread(ctx);
  641. emit(ARM_LSL_R(r_A, r_A, r_X), ctx);
  642. break;
  643. case BPF_ALU | BPF_RSH | BPF_K:
  644. if (unlikely(k > 31))
  645. return -1;
  646. emit(ARM_LSR_I(r_A, r_A, k), ctx);
  647. break;
  648. case BPF_ALU | BPF_RSH | BPF_X:
  649. update_on_xread(ctx);
  650. emit(ARM_LSR_R(r_A, r_A, r_X), ctx);
  651. break;
  652. case BPF_ALU | BPF_NEG:
  653. /* A = -A */
  654. emit(ARM_RSB_I(r_A, r_A, 0), ctx);
  655. break;
  656. case BPF_JMP | BPF_JA:
  657. /* pc += K */
  658. emit(ARM_B(b_imm(i + k + 1, ctx)), ctx);
  659. break;
  660. case BPF_JMP | BPF_JEQ | BPF_K:
  661. /* pc += (A == K) ? pc->jt : pc->jf */
  662. condt = ARM_COND_EQ;
  663. goto cmp_imm;
  664. case BPF_JMP | BPF_JGT | BPF_K:
  665. /* pc += (A > K) ? pc->jt : pc->jf */
  666. condt = ARM_COND_HI;
  667. goto cmp_imm;
  668. case BPF_JMP | BPF_JGE | BPF_K:
  669. /* pc += (A >= K) ? pc->jt : pc->jf */
  670. condt = ARM_COND_HS;
  671. cmp_imm:
  672. imm12 = imm8m(k);
  673. if (imm12 < 0) {
  674. emit_mov_i_no8m(r_scratch, k, ctx);
  675. emit(ARM_CMP_R(r_A, r_scratch), ctx);
  676. } else {
  677. emit(ARM_CMP_I(r_A, imm12), ctx);
  678. }
  679. cond_jump:
  680. if (inst->jt)
  681. _emit(condt, ARM_B(b_imm(i + inst->jt + 1,
  682. ctx)), ctx);
  683. if (inst->jf)
  684. _emit(condt ^ 1, ARM_B(b_imm(i + inst->jf + 1,
  685. ctx)), ctx);
  686. break;
  687. case BPF_JMP | BPF_JEQ | BPF_X:
  688. /* pc += (A == X) ? pc->jt : pc->jf */
  689. condt = ARM_COND_EQ;
  690. goto cmp_x;
  691. case BPF_JMP | BPF_JGT | BPF_X:
  692. /* pc += (A > X) ? pc->jt : pc->jf */
  693. condt = ARM_COND_HI;
  694. goto cmp_x;
  695. case BPF_JMP | BPF_JGE | BPF_X:
  696. /* pc += (A >= X) ? pc->jt : pc->jf */
  697. condt = ARM_COND_CS;
  698. cmp_x:
  699. update_on_xread(ctx);
  700. emit(ARM_CMP_R(r_A, r_X), ctx);
  701. goto cond_jump;
  702. case BPF_JMP | BPF_JSET | BPF_K:
  703. /* pc += (A & K) ? pc->jt : pc->jf */
  704. condt = ARM_COND_NE;
  705. /* not set iff all zeroes iff Z==1 iff EQ */
  706. imm12 = imm8m(k);
  707. if (imm12 < 0) {
  708. emit_mov_i_no8m(r_scratch, k, ctx);
  709. emit(ARM_TST_R(r_A, r_scratch), ctx);
  710. } else {
  711. emit(ARM_TST_I(r_A, imm12), ctx);
  712. }
  713. goto cond_jump;
  714. case BPF_JMP | BPF_JSET | BPF_X:
  715. /* pc += (A & X) ? pc->jt : pc->jf */
  716. update_on_xread(ctx);
  717. condt = ARM_COND_NE;
  718. emit(ARM_TST_R(r_A, r_X), ctx);
  719. goto cond_jump;
  720. case BPF_RET | BPF_A:
  721. emit(ARM_MOV_R(ARM_R0, r_A), ctx);
  722. goto b_epilogue;
  723. case BPF_RET | BPF_K:
  724. if ((k == 0) && (ctx->ret0_fp_idx < 0))
  725. ctx->ret0_fp_idx = i;
  726. emit_mov_i(ARM_R0, k, ctx);
  727. b_epilogue:
  728. if (i != ctx->skf->len - 1)
  729. emit(ARM_B(b_imm(prog->len, ctx)), ctx);
  730. break;
  731. case BPF_MISC | BPF_TAX:
  732. /* X = A */
  733. ctx->seen |= SEEN_X;
  734. emit(ARM_MOV_R(r_X, r_A), ctx);
  735. break;
  736. case BPF_MISC | BPF_TXA:
  737. /* A = X */
  738. update_on_xread(ctx);
  739. emit(ARM_MOV_R(r_A, r_X), ctx);
  740. break;
  741. case BPF_ANC | SKF_AD_PROTOCOL:
  742. /* A = ntohs(skb->protocol) */
  743. ctx->seen |= SEEN_SKB;
  744. BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff,
  745. protocol) != 2);
  746. off = offsetof(struct sk_buff, protocol);
  747. emit(ARM_LDRH_I(r_scratch, r_skb, off), ctx);
  748. emit_swap16(r_A, r_scratch, ctx);
  749. break;
  750. case BPF_ANC | SKF_AD_CPU:
  751. /* r_scratch = current_thread_info() */
  752. OP_IMM3(ARM_BIC, r_scratch, ARM_SP, THREAD_SIZE - 1, ctx);
  753. /* A = current_thread_info()->cpu */
  754. BUILD_BUG_ON(FIELD_SIZEOF(struct thread_info, cpu) != 4);
  755. off = offsetof(struct thread_info, cpu);
  756. emit(ARM_LDR_I(r_A, r_scratch, off), ctx);
  757. break;
  758. case BPF_ANC | SKF_AD_IFINDEX:
  759. case BPF_ANC | SKF_AD_HATYPE:
  760. /* A = skb->dev->ifindex */
  761. /* A = skb->dev->type */
  762. ctx->seen |= SEEN_SKB;
  763. off = offsetof(struct sk_buff, dev);
  764. emit(ARM_LDR_I(r_scratch, r_skb, off), ctx);
  765. emit(ARM_CMP_I(r_scratch, 0), ctx);
  766. emit_err_ret(ARM_COND_EQ, ctx);
  767. BUILD_BUG_ON(FIELD_SIZEOF(struct net_device,
  768. ifindex) != 4);
  769. BUILD_BUG_ON(FIELD_SIZEOF(struct net_device,
  770. type) != 2);
  771. if (code == (BPF_ANC | SKF_AD_IFINDEX)) {
  772. off = offsetof(struct net_device, ifindex);
  773. emit(ARM_LDR_I(r_A, r_scratch, off), ctx);
  774. } else {
  775. /*
  776. * offset of field "type" in "struct
  777. * net_device" is above what can be
  778. * used in the ldrh rd, [rn, #imm]
  779. * instruction, so load the offset in
  780. * a register and use ldrh rd, [rn, rm]
  781. */
  782. off = offsetof(struct net_device, type);
  783. emit_mov_i(ARM_R3, off, ctx);
  784. emit(ARM_LDRH_R(r_A, r_scratch, ARM_R3), ctx);
  785. }
  786. break;
  787. case BPF_ANC | SKF_AD_MARK:
  788. ctx->seen |= SEEN_SKB;
  789. BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4);
  790. off = offsetof(struct sk_buff, mark);
  791. emit(ARM_LDR_I(r_A, r_skb, off), ctx);
  792. break;
  793. case BPF_ANC | SKF_AD_RXHASH:
  794. ctx->seen |= SEEN_SKB;
  795. BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, hash) != 4);
  796. off = offsetof(struct sk_buff, hash);
  797. emit(ARM_LDR_I(r_A, r_skb, off), ctx);
  798. break;
  799. case BPF_ANC | SKF_AD_VLAN_TAG:
  800. case BPF_ANC | SKF_AD_VLAN_TAG_PRESENT:
  801. ctx->seen |= SEEN_SKB;
  802. BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_tci) != 2);
  803. off = offsetof(struct sk_buff, vlan_tci);
  804. emit(ARM_LDRH_I(r_A, r_skb, off), ctx);
  805. if (code == (BPF_ANC | SKF_AD_VLAN_TAG))
  806. OP_IMM3(ARM_AND, r_A, r_A, ~VLAN_TAG_PRESENT, ctx);
  807. else {
  808. OP_IMM3(ARM_LSR, r_A, r_A, 12, ctx);
  809. OP_IMM3(ARM_AND, r_A, r_A, 0x1, ctx);
  810. }
  811. break;
  812. case BPF_ANC | SKF_AD_PKTTYPE:
  813. ctx->seen |= SEEN_SKB;
  814. BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff,
  815. __pkt_type_offset[0]) != 1);
  816. off = PKT_TYPE_OFFSET();
  817. emit(ARM_LDRB_I(r_A, r_skb, off), ctx);
  818. emit(ARM_AND_I(r_A, r_A, PKT_TYPE_MAX), ctx);
  819. #ifdef __BIG_ENDIAN_BITFIELD
  820. emit(ARM_LSR_I(r_A, r_A, 5), ctx);
  821. #endif
  822. break;
  823. case BPF_ANC | SKF_AD_QUEUE:
  824. ctx->seen |= SEEN_SKB;
  825. BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff,
  826. queue_mapping) != 2);
  827. BUILD_BUG_ON(offsetof(struct sk_buff,
  828. queue_mapping) > 0xff);
  829. off = offsetof(struct sk_buff, queue_mapping);
  830. emit(ARM_LDRH_I(r_A, r_skb, off), ctx);
  831. break;
  832. case BPF_ANC | SKF_AD_PAY_OFFSET:
  833. ctx->seen |= SEEN_SKB | SEEN_CALL;
  834. emit(ARM_MOV_R(ARM_R0, r_skb), ctx);
  835. emit_mov_i(ARM_R3, (unsigned int)skb_get_poff, ctx);
  836. emit_blx_r(ARM_R3, ctx);
  837. emit(ARM_MOV_R(r_A, ARM_R0), ctx);
  838. break;
  839. case BPF_LDX | BPF_W | BPF_ABS:
  840. /*
  841. * load a 32bit word from struct seccomp_data.
  842. * seccomp_check_filter() will already have checked
  843. * that k is 32bit aligned and lies within the
  844. * struct seccomp_data.
  845. */
  846. ctx->seen |= SEEN_SKB;
  847. emit(ARM_LDR_I(r_A, r_skb, k), ctx);
  848. break;
  849. default:
  850. return -1;
  851. }
  852. if (ctx->flags & FLAG_IMM_OVERFLOW)
  853. /*
  854. * this instruction generated an overflow when
  855. * trying to access the literal pool, so
  856. * delegate this filter to the kernel interpreter.
  857. */
  858. return -1;
  859. }
  860. /* compute offsets only during the first pass */
  861. if (ctx->target == NULL)
  862. ctx->offsets[i] = ctx->idx * 4;
  863. return 0;
  864. }
  865. void bpf_jit_compile(struct bpf_prog *fp)
  866. {
  867. struct bpf_binary_header *header;
  868. struct jit_ctx ctx;
  869. unsigned tmp_idx;
  870. unsigned alloc_size;
  871. u8 *target_ptr;
  872. if (!bpf_jit_enable)
  873. return;
  874. memset(&ctx, 0, sizeof(ctx));
  875. ctx.skf = fp;
  876. ctx.ret0_fp_idx = -1;
  877. ctx.offsets = kzalloc(4 * (ctx.skf->len + 1), GFP_KERNEL);
  878. if (ctx.offsets == NULL)
  879. return;
  880. /* fake pass to fill in the ctx->seen */
  881. if (unlikely(build_body(&ctx)))
  882. goto out;
  883. tmp_idx = ctx.idx;
  884. build_prologue(&ctx);
  885. ctx.prologue_bytes = (ctx.idx - tmp_idx) * 4;
  886. #if __LINUX_ARM_ARCH__ < 7
  887. tmp_idx = ctx.idx;
  888. build_epilogue(&ctx);
  889. ctx.epilogue_bytes = (ctx.idx - tmp_idx) * 4;
  890. ctx.idx += ctx.imm_count;
  891. if (ctx.imm_count) {
  892. ctx.imms = kzalloc(4 * ctx.imm_count, GFP_KERNEL);
  893. if (ctx.imms == NULL)
  894. goto out;
  895. }
  896. #else
  897. /* there's nothing after the epilogue on ARMv7 */
  898. build_epilogue(&ctx);
  899. #endif
  900. alloc_size = 4 * ctx.idx;
  901. header = bpf_jit_binary_alloc(alloc_size, &target_ptr,
  902. 4, jit_fill_hole);
  903. if (header == NULL)
  904. goto out;
  905. ctx.target = (u32 *) target_ptr;
  906. ctx.idx = 0;
  907. build_prologue(&ctx);
  908. if (build_body(&ctx) < 0) {
  909. #if __LINUX_ARM_ARCH__ < 7
  910. if (ctx.imm_count)
  911. kfree(ctx.imms);
  912. #endif
  913. bpf_jit_binary_free(header);
  914. goto out;
  915. }
  916. build_epilogue(&ctx);
  917. flush_icache_range((u32)ctx.target, (u32)(ctx.target + ctx.idx));
  918. #if __LINUX_ARM_ARCH__ < 7
  919. if (ctx.imm_count)
  920. kfree(ctx.imms);
  921. #endif
  922. if (bpf_jit_enable > 1)
  923. /* there are 2 passes here */
  924. bpf_jit_dump(fp->len, alloc_size, 2, ctx.target);
  925. set_memory_ro((unsigned long)header, header->pages);
  926. fp->bpf_func = (void *)ctx.target;
  927. fp->jited = true;
  928. out:
  929. kfree(ctx.offsets);
  930. return;
  931. }
  932. void bpf_jit_free(struct bpf_prog *fp)
  933. {
  934. unsigned long addr = (unsigned long)fp->bpf_func & PAGE_MASK;
  935. struct bpf_binary_header *header = (void *)addr;
  936. if (!fp->jited)
  937. goto free_filter;
  938. set_memory_rw(addr, header->pages);
  939. bpf_jit_binary_free(header);
  940. free_filter:
  941. bpf_prog_unlock_free(fp);
  942. }