bpf_jit_32.c 26 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070
  1. /*
  2. * Just-In-Time compiler for BPF filters on 32bit ARM
  3. *
  4. * Copyright (c) 2011 Mircea Gherzan <mgherzan@gmail.com>
  5. *
  6. * This program is free software; you can redistribute it and/or modify it
  7. * under the terms of the GNU General Public License as published by the
  8. * Free Software Foundation; version 2 of the License.
  9. */
  10. #include <linux/bitops.h>
  11. #include <linux/compiler.h>
  12. #include <linux/errno.h>
  13. #include <linux/filter.h>
  14. #include <linux/netdevice.h>
  15. #include <linux/string.h>
  16. #include <linux/slab.h>
  17. #include <linux/if_vlan.h>
  18. #include <asm/cacheflush.h>
  19. #include <asm/hwcap.h>
  20. #include <asm/opcodes.h>
  21. #include "bpf_jit_32.h"
  22. /*
  23. * ABI:
  24. *
  25. * r0 scratch register
  26. * r4 BPF register A
  27. * r5 BPF register X
  28. * r6 pointer to the skb
  29. * r7 skb->data
  30. * r8 skb_headlen(skb)
  31. */
  32. #define r_scratch ARM_R0
  33. /* r1-r3 are (also) used for the unaligned loads on the non-ARMv7 slowpath */
  34. #define r_off ARM_R1
  35. #define r_A ARM_R4
  36. #define r_X ARM_R5
  37. #define r_skb ARM_R6
  38. #define r_skb_data ARM_R7
  39. #define r_skb_hl ARM_R8
  40. #define SCRATCH_SP_OFFSET 0
  41. #define SCRATCH_OFF(k) (SCRATCH_SP_OFFSET + 4 * (k))
  42. #define SEEN_MEM ((1 << BPF_MEMWORDS) - 1)
  43. #define SEEN_MEM_WORD(k) (1 << (k))
  44. #define SEEN_X (1 << BPF_MEMWORDS)
  45. #define SEEN_CALL (1 << (BPF_MEMWORDS + 1))
  46. #define SEEN_SKB (1 << (BPF_MEMWORDS + 2))
  47. #define SEEN_DATA (1 << (BPF_MEMWORDS + 3))
  48. #define FLAG_NEED_X_RESET (1 << 0)
  49. #define FLAG_IMM_OVERFLOW (1 << 1)
  50. struct jit_ctx {
  51. const struct bpf_prog *skf;
  52. unsigned idx;
  53. unsigned prologue_bytes;
  54. int ret0_fp_idx;
  55. u32 seen;
  56. u32 flags;
  57. u32 *offsets;
  58. u32 *target;
  59. #if __LINUX_ARM_ARCH__ < 7
  60. u16 epilogue_bytes;
  61. u16 imm_count;
  62. u32 *imms;
  63. #endif
  64. };
  65. int bpf_jit_enable __read_mostly;
  66. static inline int call_neg_helper(struct sk_buff *skb, int offset, void *ret,
  67. unsigned int size)
  68. {
  69. void *ptr = bpf_internal_load_pointer_neg_helper(skb, offset, size);
  70. if (!ptr)
  71. return -EFAULT;
  72. memcpy(ret, ptr, size);
  73. return 0;
  74. }
  75. static u64 jit_get_skb_b(struct sk_buff *skb, int offset)
  76. {
  77. u8 ret;
  78. int err;
  79. if (offset < 0)
  80. err = call_neg_helper(skb, offset, &ret, 1);
  81. else
  82. err = skb_copy_bits(skb, offset, &ret, 1);
  83. return (u64)err << 32 | ret;
  84. }
  85. static u64 jit_get_skb_h(struct sk_buff *skb, int offset)
  86. {
  87. u16 ret;
  88. int err;
  89. if (offset < 0)
  90. err = call_neg_helper(skb, offset, &ret, 2);
  91. else
  92. err = skb_copy_bits(skb, offset, &ret, 2);
  93. return (u64)err << 32 | ntohs(ret);
  94. }
  95. static u64 jit_get_skb_w(struct sk_buff *skb, int offset)
  96. {
  97. u32 ret;
  98. int err;
  99. if (offset < 0)
  100. err = call_neg_helper(skb, offset, &ret, 4);
  101. else
  102. err = skb_copy_bits(skb, offset, &ret, 4);
  103. return (u64)err << 32 | ntohl(ret);
  104. }
  105. /*
  106. * Wrapper that handles both OABI and EABI and assures Thumb2 interworking
  107. * (where the assembly routines like __aeabi_uidiv could cause problems).
  108. */
  109. static u32 jit_udiv(u32 dividend, u32 divisor)
  110. {
  111. return dividend / divisor;
  112. }
  113. static inline void _emit(int cond, u32 inst, struct jit_ctx *ctx)
  114. {
  115. inst |= (cond << 28);
  116. inst = __opcode_to_mem_arm(inst);
  117. if (ctx->target != NULL)
  118. ctx->target[ctx->idx] = inst;
  119. ctx->idx++;
  120. }
  121. /*
  122. * Emit an instruction that will be executed unconditionally.
  123. */
  124. static inline void emit(u32 inst, struct jit_ctx *ctx)
  125. {
  126. _emit(ARM_COND_AL, inst, ctx);
  127. }
  128. static u16 saved_regs(struct jit_ctx *ctx)
  129. {
  130. u16 ret = 0;
  131. if ((ctx->skf->len > 1) ||
  132. (ctx->skf->insns[0].code == (BPF_RET | BPF_A)))
  133. ret |= 1 << r_A;
  134. #ifdef CONFIG_FRAME_POINTER
  135. ret |= (1 << ARM_FP) | (1 << ARM_IP) | (1 << ARM_LR) | (1 << ARM_PC);
  136. #else
  137. if (ctx->seen & SEEN_CALL)
  138. ret |= 1 << ARM_LR;
  139. #endif
  140. if (ctx->seen & (SEEN_DATA | SEEN_SKB))
  141. ret |= 1 << r_skb;
  142. if (ctx->seen & SEEN_DATA)
  143. ret |= (1 << r_skb_data) | (1 << r_skb_hl);
  144. if (ctx->seen & SEEN_X)
  145. ret |= 1 << r_X;
  146. return ret;
  147. }
  148. static inline int mem_words_used(struct jit_ctx *ctx)
  149. {
  150. /* yes, we do waste some stack space IF there are "holes" in the set" */
  151. return fls(ctx->seen & SEEN_MEM);
  152. }
  153. static inline bool is_load_to_a(u16 inst)
  154. {
  155. switch (inst) {
  156. case BPF_LD | BPF_W | BPF_LEN:
  157. case BPF_LD | BPF_W | BPF_ABS:
  158. case BPF_LD | BPF_H | BPF_ABS:
  159. case BPF_LD | BPF_B | BPF_ABS:
  160. return true;
  161. default:
  162. return false;
  163. }
  164. }
  165. static void jit_fill_hole(void *area, unsigned int size)
  166. {
  167. u32 *ptr;
  168. /* We are guaranteed to have aligned memory. */
  169. for (ptr = area; size >= sizeof(u32); size -= sizeof(u32))
  170. *ptr++ = __opcode_to_mem_arm(ARM_INST_UDF);
  171. }
  172. static void build_prologue(struct jit_ctx *ctx)
  173. {
  174. u16 reg_set = saved_regs(ctx);
  175. u16 first_inst = ctx->skf->insns[0].code;
  176. u16 off;
  177. #ifdef CONFIG_FRAME_POINTER
  178. emit(ARM_MOV_R(ARM_IP, ARM_SP), ctx);
  179. emit(ARM_PUSH(reg_set), ctx);
  180. emit(ARM_SUB_I(ARM_FP, ARM_IP, 4), ctx);
  181. #else
  182. if (reg_set)
  183. emit(ARM_PUSH(reg_set), ctx);
  184. #endif
  185. if (ctx->seen & (SEEN_DATA | SEEN_SKB))
  186. emit(ARM_MOV_R(r_skb, ARM_R0), ctx);
  187. if (ctx->seen & SEEN_DATA) {
  188. off = offsetof(struct sk_buff, data);
  189. emit(ARM_LDR_I(r_skb_data, r_skb, off), ctx);
  190. /* headlen = len - data_len */
  191. off = offsetof(struct sk_buff, len);
  192. emit(ARM_LDR_I(r_skb_hl, r_skb, off), ctx);
  193. off = offsetof(struct sk_buff, data_len);
  194. emit(ARM_LDR_I(r_scratch, r_skb, off), ctx);
  195. emit(ARM_SUB_R(r_skb_hl, r_skb_hl, r_scratch), ctx);
  196. }
  197. if (ctx->flags & FLAG_NEED_X_RESET)
  198. emit(ARM_MOV_I(r_X, 0), ctx);
  199. /* do not leak kernel data to userspace */
  200. if ((first_inst != (BPF_RET | BPF_K)) && !(is_load_to_a(first_inst)))
  201. emit(ARM_MOV_I(r_A, 0), ctx);
  202. /* stack space for the BPF_MEM words */
  203. if (ctx->seen & SEEN_MEM)
  204. emit(ARM_SUB_I(ARM_SP, ARM_SP, mem_words_used(ctx) * 4), ctx);
  205. }
  206. static void build_epilogue(struct jit_ctx *ctx)
  207. {
  208. u16 reg_set = saved_regs(ctx);
  209. if (ctx->seen & SEEN_MEM)
  210. emit(ARM_ADD_I(ARM_SP, ARM_SP, mem_words_used(ctx) * 4), ctx);
  211. reg_set &= ~(1 << ARM_LR);
  212. #ifdef CONFIG_FRAME_POINTER
  213. /* the first instruction of the prologue was: mov ip, sp */
  214. reg_set &= ~(1 << ARM_IP);
  215. reg_set |= (1 << ARM_SP);
  216. emit(ARM_LDM(ARM_SP, reg_set), ctx);
  217. #else
  218. if (reg_set) {
  219. if (ctx->seen & SEEN_CALL)
  220. reg_set |= 1 << ARM_PC;
  221. emit(ARM_POP(reg_set), ctx);
  222. }
  223. if (!(ctx->seen & SEEN_CALL))
  224. emit(ARM_BX(ARM_LR), ctx);
  225. #endif
  226. }
  227. static int16_t imm8m(u32 x)
  228. {
  229. u32 rot;
  230. for (rot = 0; rot < 16; rot++)
  231. if ((x & ~ror32(0xff, 2 * rot)) == 0)
  232. return rol32(x, 2 * rot) | (rot << 8);
  233. return -1;
  234. }
  235. #if __LINUX_ARM_ARCH__ < 7
  236. static u16 imm_offset(u32 k, struct jit_ctx *ctx)
  237. {
  238. unsigned i = 0, offset;
  239. u16 imm;
  240. /* on the "fake" run we just count them (duplicates included) */
  241. if (ctx->target == NULL) {
  242. ctx->imm_count++;
  243. return 0;
  244. }
  245. while ((i < ctx->imm_count) && ctx->imms[i]) {
  246. if (ctx->imms[i] == k)
  247. break;
  248. i++;
  249. }
  250. if (ctx->imms[i] == 0)
  251. ctx->imms[i] = k;
  252. /* constants go just after the epilogue */
  253. offset = ctx->offsets[ctx->skf->len];
  254. offset += ctx->prologue_bytes;
  255. offset += ctx->epilogue_bytes;
  256. offset += i * 4;
  257. ctx->target[offset / 4] = k;
  258. /* PC in ARM mode == address of the instruction + 8 */
  259. imm = offset - (8 + ctx->idx * 4);
  260. if (imm & ~0xfff) {
  261. /*
  262. * literal pool is too far, signal it into flags. we
  263. * can only detect it on the second pass unfortunately.
  264. */
  265. ctx->flags |= FLAG_IMM_OVERFLOW;
  266. return 0;
  267. }
  268. return imm;
  269. }
  270. #endif /* __LINUX_ARM_ARCH__ */
  271. /*
  272. * Move an immediate that's not an imm8m to a core register.
  273. */
  274. static inline void emit_mov_i_no8m(int rd, u32 val, struct jit_ctx *ctx)
  275. {
  276. #if __LINUX_ARM_ARCH__ < 7
  277. emit(ARM_LDR_I(rd, ARM_PC, imm_offset(val, ctx)), ctx);
  278. #else
  279. emit(ARM_MOVW(rd, val & 0xffff), ctx);
  280. if (val > 0xffff)
  281. emit(ARM_MOVT(rd, val >> 16), ctx);
  282. #endif
  283. }
  284. static inline void emit_mov_i(int rd, u32 val, struct jit_ctx *ctx)
  285. {
  286. int imm12 = imm8m(val);
  287. if (imm12 >= 0)
  288. emit(ARM_MOV_I(rd, imm12), ctx);
  289. else
  290. emit_mov_i_no8m(rd, val, ctx);
  291. }
  292. #if __LINUX_ARM_ARCH__ < 6
  293. static void emit_load_be32(u8 cond, u8 r_res, u8 r_addr, struct jit_ctx *ctx)
  294. {
  295. _emit(cond, ARM_LDRB_I(ARM_R3, r_addr, 1), ctx);
  296. _emit(cond, ARM_LDRB_I(ARM_R1, r_addr, 0), ctx);
  297. _emit(cond, ARM_LDRB_I(ARM_R2, r_addr, 3), ctx);
  298. _emit(cond, ARM_LSL_I(ARM_R3, ARM_R3, 16), ctx);
  299. _emit(cond, ARM_LDRB_I(ARM_R0, r_addr, 2), ctx);
  300. _emit(cond, ARM_ORR_S(ARM_R3, ARM_R3, ARM_R1, SRTYPE_LSL, 24), ctx);
  301. _emit(cond, ARM_ORR_R(ARM_R3, ARM_R3, ARM_R2), ctx);
  302. _emit(cond, ARM_ORR_S(r_res, ARM_R3, ARM_R0, SRTYPE_LSL, 8), ctx);
  303. }
  304. static void emit_load_be16(u8 cond, u8 r_res, u8 r_addr, struct jit_ctx *ctx)
  305. {
  306. _emit(cond, ARM_LDRB_I(ARM_R1, r_addr, 0), ctx);
  307. _emit(cond, ARM_LDRB_I(ARM_R2, r_addr, 1), ctx);
  308. _emit(cond, ARM_ORR_S(r_res, ARM_R2, ARM_R1, SRTYPE_LSL, 8), ctx);
  309. }
  310. static inline void emit_swap16(u8 r_dst, u8 r_src, struct jit_ctx *ctx)
  311. {
  312. /* r_dst = (r_src << 8) | (r_src >> 8) */
  313. emit(ARM_LSL_I(ARM_R1, r_src, 8), ctx);
  314. emit(ARM_ORR_S(r_dst, ARM_R1, r_src, SRTYPE_LSR, 8), ctx);
  315. /*
  316. * we need to mask out the bits set in r_dst[23:16] due to
  317. * the first shift instruction.
  318. *
  319. * note that 0x8ff is the encoded immediate 0x00ff0000.
  320. */
  321. emit(ARM_BIC_I(r_dst, r_dst, 0x8ff), ctx);
  322. }
  323. #else /* ARMv6+ */
  324. static void emit_load_be32(u8 cond, u8 r_res, u8 r_addr, struct jit_ctx *ctx)
  325. {
  326. _emit(cond, ARM_LDR_I(r_res, r_addr, 0), ctx);
  327. #ifdef __LITTLE_ENDIAN
  328. _emit(cond, ARM_REV(r_res, r_res), ctx);
  329. #endif
  330. }
  331. static void emit_load_be16(u8 cond, u8 r_res, u8 r_addr, struct jit_ctx *ctx)
  332. {
  333. _emit(cond, ARM_LDRH_I(r_res, r_addr, 0), ctx);
  334. #ifdef __LITTLE_ENDIAN
  335. _emit(cond, ARM_REV16(r_res, r_res), ctx);
  336. #endif
  337. }
  338. static inline void emit_swap16(u8 r_dst __maybe_unused,
  339. u8 r_src __maybe_unused,
  340. struct jit_ctx *ctx __maybe_unused)
  341. {
  342. #ifdef __LITTLE_ENDIAN
  343. emit(ARM_REV16(r_dst, r_src), ctx);
  344. #endif
  345. }
  346. #endif /* __LINUX_ARM_ARCH__ < 6 */
  347. /* Compute the immediate value for a PC-relative branch. */
  348. static inline u32 b_imm(unsigned tgt, struct jit_ctx *ctx)
  349. {
  350. u32 imm;
  351. if (ctx->target == NULL)
  352. return 0;
  353. /*
  354. * BPF allows only forward jumps and the offset of the target is
  355. * still the one computed during the first pass.
  356. */
  357. imm = ctx->offsets[tgt] + ctx->prologue_bytes - (ctx->idx * 4 + 8);
  358. return imm >> 2;
  359. }
  360. #define OP_IMM3(op, r1, r2, imm_val, ctx) \
  361. do { \
  362. imm12 = imm8m(imm_val); \
  363. if (imm12 < 0) { \
  364. emit_mov_i_no8m(r_scratch, imm_val, ctx); \
  365. emit(op ## _R((r1), (r2), r_scratch), ctx); \
  366. } else { \
  367. emit(op ## _I((r1), (r2), imm12), ctx); \
  368. } \
  369. } while (0)
  370. static inline void emit_err_ret(u8 cond, struct jit_ctx *ctx)
  371. {
  372. if (ctx->ret0_fp_idx >= 0) {
  373. _emit(cond, ARM_B(b_imm(ctx->ret0_fp_idx, ctx)), ctx);
  374. /* NOP to keep the size constant between passes */
  375. emit(ARM_MOV_R(ARM_R0, ARM_R0), ctx);
  376. } else {
  377. _emit(cond, ARM_MOV_I(ARM_R0, 0), ctx);
  378. _emit(cond, ARM_B(b_imm(ctx->skf->len, ctx)), ctx);
  379. }
  380. }
  381. static inline void emit_blx_r(u8 tgt_reg, struct jit_ctx *ctx)
  382. {
  383. #if __LINUX_ARM_ARCH__ < 5
  384. emit(ARM_MOV_R(ARM_LR, ARM_PC), ctx);
  385. if (elf_hwcap & HWCAP_THUMB)
  386. emit(ARM_BX(tgt_reg), ctx);
  387. else
  388. emit(ARM_MOV_R(ARM_PC, tgt_reg), ctx);
  389. #else
  390. emit(ARM_BLX_R(tgt_reg), ctx);
  391. #endif
  392. }
  393. static inline void emit_udiv(u8 rd, u8 rm, u8 rn, struct jit_ctx *ctx)
  394. {
  395. #if __LINUX_ARM_ARCH__ == 7
  396. if (elf_hwcap & HWCAP_IDIVA) {
  397. emit(ARM_UDIV(rd, rm, rn), ctx);
  398. return;
  399. }
  400. #endif
  401. /*
  402. * For BPF_ALU | BPF_DIV | BPF_K instructions, rm is ARM_R4
  403. * (r_A) and rn is ARM_R0 (r_scratch) so load rn first into
  404. * ARM_R1 to avoid accidentally overwriting ARM_R0 with rm
  405. * before using it as a source for ARM_R1.
  406. *
  407. * For BPF_ALU | BPF_DIV | BPF_X rm is ARM_R4 (r_A) and rn is
  408. * ARM_R5 (r_X) so there is no particular register overlap
  409. * issues.
  410. */
  411. if (rn != ARM_R1)
  412. emit(ARM_MOV_R(ARM_R1, rn), ctx);
  413. if (rm != ARM_R0)
  414. emit(ARM_MOV_R(ARM_R0, rm), ctx);
  415. ctx->seen |= SEEN_CALL;
  416. emit_mov_i(ARM_R3, (u32)jit_udiv, ctx);
  417. emit_blx_r(ARM_R3, ctx);
  418. if (rd != ARM_R0)
  419. emit(ARM_MOV_R(rd, ARM_R0), ctx);
  420. }
  421. static inline void update_on_xread(struct jit_ctx *ctx)
  422. {
  423. if (!(ctx->seen & SEEN_X))
  424. ctx->flags |= FLAG_NEED_X_RESET;
  425. ctx->seen |= SEEN_X;
  426. }
  427. static int build_body(struct jit_ctx *ctx)
  428. {
  429. void *load_func[] = {jit_get_skb_b, jit_get_skb_h, jit_get_skb_w};
  430. const struct bpf_prog *prog = ctx->skf;
  431. const struct sock_filter *inst;
  432. unsigned i, load_order, off, condt;
  433. int imm12;
  434. u32 k;
  435. for (i = 0; i < prog->len; i++) {
  436. u16 code;
  437. inst = &(prog->insns[i]);
  438. /* K as an immediate value operand */
  439. k = inst->k;
  440. code = bpf_anc_helper(inst);
  441. /* compute offsets only in the fake pass */
  442. if (ctx->target == NULL)
  443. ctx->offsets[i] = ctx->idx * 4;
  444. switch (code) {
  445. case BPF_LD | BPF_IMM:
  446. emit_mov_i(r_A, k, ctx);
  447. break;
  448. case BPF_LD | BPF_W | BPF_LEN:
  449. ctx->seen |= SEEN_SKB;
  450. BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, len) != 4);
  451. emit(ARM_LDR_I(r_A, r_skb,
  452. offsetof(struct sk_buff, len)), ctx);
  453. break;
  454. case BPF_LD | BPF_MEM:
  455. /* A = scratch[k] */
  456. ctx->seen |= SEEN_MEM_WORD(k);
  457. emit(ARM_LDR_I(r_A, ARM_SP, SCRATCH_OFF(k)), ctx);
  458. break;
  459. case BPF_LD | BPF_W | BPF_ABS:
  460. load_order = 2;
  461. goto load;
  462. case BPF_LD | BPF_H | BPF_ABS:
  463. load_order = 1;
  464. goto load;
  465. case BPF_LD | BPF_B | BPF_ABS:
  466. load_order = 0;
  467. load:
  468. emit_mov_i(r_off, k, ctx);
  469. load_common:
  470. ctx->seen |= SEEN_DATA | SEEN_CALL;
  471. if (load_order > 0) {
  472. emit(ARM_SUB_I(r_scratch, r_skb_hl,
  473. 1 << load_order), ctx);
  474. emit(ARM_CMP_R(r_scratch, r_off), ctx);
  475. condt = ARM_COND_GE;
  476. } else {
  477. emit(ARM_CMP_R(r_skb_hl, r_off), ctx);
  478. condt = ARM_COND_HI;
  479. }
  480. /*
  481. * test for negative offset, only if we are
  482. * currently scheduled to take the fast
  483. * path. this will update the flags so that
  484. * the slowpath instruction are ignored if the
  485. * offset is negative.
  486. *
  487. * for loard_order == 0 the HI condition will
  488. * make loads at offset 0 take the slow path too.
  489. */
  490. _emit(condt, ARM_CMP_I(r_off, 0), ctx);
  491. _emit(condt, ARM_ADD_R(r_scratch, r_off, r_skb_data),
  492. ctx);
  493. if (load_order == 0)
  494. _emit(condt, ARM_LDRB_I(r_A, r_scratch, 0),
  495. ctx);
  496. else if (load_order == 1)
  497. emit_load_be16(condt, r_A, r_scratch, ctx);
  498. else if (load_order == 2)
  499. emit_load_be32(condt, r_A, r_scratch, ctx);
  500. _emit(condt, ARM_B(b_imm(i + 1, ctx)), ctx);
  501. /* the slowpath */
  502. emit_mov_i(ARM_R3, (u32)load_func[load_order], ctx);
  503. emit(ARM_MOV_R(ARM_R0, r_skb), ctx);
  504. /* the offset is already in R1 */
  505. emit_blx_r(ARM_R3, ctx);
  506. /* check the result of skb_copy_bits */
  507. emit(ARM_CMP_I(ARM_R1, 0), ctx);
  508. emit_err_ret(ARM_COND_NE, ctx);
  509. emit(ARM_MOV_R(r_A, ARM_R0), ctx);
  510. break;
  511. case BPF_LD | BPF_W | BPF_IND:
  512. load_order = 2;
  513. goto load_ind;
  514. case BPF_LD | BPF_H | BPF_IND:
  515. load_order = 1;
  516. goto load_ind;
  517. case BPF_LD | BPF_B | BPF_IND:
  518. load_order = 0;
  519. load_ind:
  520. update_on_xread(ctx);
  521. OP_IMM3(ARM_ADD, r_off, r_X, k, ctx);
  522. goto load_common;
  523. case BPF_LDX | BPF_IMM:
  524. ctx->seen |= SEEN_X;
  525. emit_mov_i(r_X, k, ctx);
  526. break;
  527. case BPF_LDX | BPF_W | BPF_LEN:
  528. ctx->seen |= SEEN_X | SEEN_SKB;
  529. emit(ARM_LDR_I(r_X, r_skb,
  530. offsetof(struct sk_buff, len)), ctx);
  531. break;
  532. case BPF_LDX | BPF_MEM:
  533. ctx->seen |= SEEN_X | SEEN_MEM_WORD(k);
  534. emit(ARM_LDR_I(r_X, ARM_SP, SCRATCH_OFF(k)), ctx);
  535. break;
  536. case BPF_LDX | BPF_B | BPF_MSH:
  537. /* x = ((*(frame + k)) & 0xf) << 2; */
  538. ctx->seen |= SEEN_X | SEEN_DATA | SEEN_CALL;
  539. /* the interpreter should deal with the negative K */
  540. if ((int)k < 0)
  541. return -1;
  542. /* offset in r1: we might have to take the slow path */
  543. emit_mov_i(r_off, k, ctx);
  544. emit(ARM_CMP_R(r_skb_hl, r_off), ctx);
  545. /* load in r0: common with the slowpath */
  546. _emit(ARM_COND_HI, ARM_LDRB_R(ARM_R0, r_skb_data,
  547. ARM_R1), ctx);
  548. /*
  549. * emit_mov_i() might generate one or two instructions,
  550. * the same holds for emit_blx_r()
  551. */
  552. _emit(ARM_COND_HI, ARM_B(b_imm(i + 1, ctx) - 2), ctx);
  553. emit(ARM_MOV_R(ARM_R0, r_skb), ctx);
  554. /* r_off is r1 */
  555. emit_mov_i(ARM_R3, (u32)jit_get_skb_b, ctx);
  556. emit_blx_r(ARM_R3, ctx);
  557. /* check the return value of skb_copy_bits */
  558. emit(ARM_CMP_I(ARM_R1, 0), ctx);
  559. emit_err_ret(ARM_COND_NE, ctx);
  560. emit(ARM_AND_I(r_X, ARM_R0, 0x00f), ctx);
  561. emit(ARM_LSL_I(r_X, r_X, 2), ctx);
  562. break;
  563. case BPF_ST:
  564. ctx->seen |= SEEN_MEM_WORD(k);
  565. emit(ARM_STR_I(r_A, ARM_SP, SCRATCH_OFF(k)), ctx);
  566. break;
  567. case BPF_STX:
  568. update_on_xread(ctx);
  569. ctx->seen |= SEEN_MEM_WORD(k);
  570. emit(ARM_STR_I(r_X, ARM_SP, SCRATCH_OFF(k)), ctx);
  571. break;
  572. case BPF_ALU | BPF_ADD | BPF_K:
  573. /* A += K */
  574. OP_IMM3(ARM_ADD, r_A, r_A, k, ctx);
  575. break;
  576. case BPF_ALU | BPF_ADD | BPF_X:
  577. update_on_xread(ctx);
  578. emit(ARM_ADD_R(r_A, r_A, r_X), ctx);
  579. break;
  580. case BPF_ALU | BPF_SUB | BPF_K:
  581. /* A -= K */
  582. OP_IMM3(ARM_SUB, r_A, r_A, k, ctx);
  583. break;
  584. case BPF_ALU | BPF_SUB | BPF_X:
  585. update_on_xread(ctx);
  586. emit(ARM_SUB_R(r_A, r_A, r_X), ctx);
  587. break;
  588. case BPF_ALU | BPF_MUL | BPF_K:
  589. /* A *= K */
  590. emit_mov_i(r_scratch, k, ctx);
  591. emit(ARM_MUL(r_A, r_A, r_scratch), ctx);
  592. break;
  593. case BPF_ALU | BPF_MUL | BPF_X:
  594. update_on_xread(ctx);
  595. emit(ARM_MUL(r_A, r_A, r_X), ctx);
  596. break;
  597. case BPF_ALU | BPF_DIV | BPF_K:
  598. if (k == 1)
  599. break;
  600. emit_mov_i(r_scratch, k, ctx);
  601. emit_udiv(r_A, r_A, r_scratch, ctx);
  602. break;
  603. case BPF_ALU | BPF_DIV | BPF_X:
  604. update_on_xread(ctx);
  605. emit(ARM_CMP_I(r_X, 0), ctx);
  606. emit_err_ret(ARM_COND_EQ, ctx);
  607. emit_udiv(r_A, r_A, r_X, ctx);
  608. break;
  609. case BPF_ALU | BPF_OR | BPF_K:
  610. /* A |= K */
  611. OP_IMM3(ARM_ORR, r_A, r_A, k, ctx);
  612. break;
  613. case BPF_ALU | BPF_OR | BPF_X:
  614. update_on_xread(ctx);
  615. emit(ARM_ORR_R(r_A, r_A, r_X), ctx);
  616. break;
  617. case BPF_ALU | BPF_XOR | BPF_K:
  618. /* A ^= K; */
  619. OP_IMM3(ARM_EOR, r_A, r_A, k, ctx);
  620. break;
  621. case BPF_ANC | SKF_AD_ALU_XOR_X:
  622. case BPF_ALU | BPF_XOR | BPF_X:
  623. /* A ^= X */
  624. update_on_xread(ctx);
  625. emit(ARM_EOR_R(r_A, r_A, r_X), ctx);
  626. break;
  627. case BPF_ALU | BPF_AND | BPF_K:
  628. /* A &= K */
  629. OP_IMM3(ARM_AND, r_A, r_A, k, ctx);
  630. break;
  631. case BPF_ALU | BPF_AND | BPF_X:
  632. update_on_xread(ctx);
  633. emit(ARM_AND_R(r_A, r_A, r_X), ctx);
  634. break;
  635. case BPF_ALU | BPF_LSH | BPF_K:
  636. if (unlikely(k > 31))
  637. return -1;
  638. emit(ARM_LSL_I(r_A, r_A, k), ctx);
  639. break;
  640. case BPF_ALU | BPF_LSH | BPF_X:
  641. update_on_xread(ctx);
  642. emit(ARM_LSL_R(r_A, r_A, r_X), ctx);
  643. break;
  644. case BPF_ALU | BPF_RSH | BPF_K:
  645. if (unlikely(k > 31))
  646. return -1;
  647. emit(ARM_LSR_I(r_A, r_A, k), ctx);
  648. break;
  649. case BPF_ALU | BPF_RSH | BPF_X:
  650. update_on_xread(ctx);
  651. emit(ARM_LSR_R(r_A, r_A, r_X), ctx);
  652. break;
  653. case BPF_ALU | BPF_NEG:
  654. /* A = -A */
  655. emit(ARM_RSB_I(r_A, r_A, 0), ctx);
  656. break;
  657. case BPF_JMP | BPF_JA:
  658. /* pc += K */
  659. emit(ARM_B(b_imm(i + k + 1, ctx)), ctx);
  660. break;
  661. case BPF_JMP | BPF_JEQ | BPF_K:
  662. /* pc += (A == K) ? pc->jt : pc->jf */
  663. condt = ARM_COND_EQ;
  664. goto cmp_imm;
  665. case BPF_JMP | BPF_JGT | BPF_K:
  666. /* pc += (A > K) ? pc->jt : pc->jf */
  667. condt = ARM_COND_HI;
  668. goto cmp_imm;
  669. case BPF_JMP | BPF_JGE | BPF_K:
  670. /* pc += (A >= K) ? pc->jt : pc->jf */
  671. condt = ARM_COND_HS;
  672. cmp_imm:
  673. imm12 = imm8m(k);
  674. if (imm12 < 0) {
  675. emit_mov_i_no8m(r_scratch, k, ctx);
  676. emit(ARM_CMP_R(r_A, r_scratch), ctx);
  677. } else {
  678. emit(ARM_CMP_I(r_A, imm12), ctx);
  679. }
  680. cond_jump:
  681. if (inst->jt)
  682. _emit(condt, ARM_B(b_imm(i + inst->jt + 1,
  683. ctx)), ctx);
  684. if (inst->jf)
  685. _emit(condt ^ 1, ARM_B(b_imm(i + inst->jf + 1,
  686. ctx)), ctx);
  687. break;
  688. case BPF_JMP | BPF_JEQ | BPF_X:
  689. /* pc += (A == X) ? pc->jt : pc->jf */
  690. condt = ARM_COND_EQ;
  691. goto cmp_x;
  692. case BPF_JMP | BPF_JGT | BPF_X:
  693. /* pc += (A > X) ? pc->jt : pc->jf */
  694. condt = ARM_COND_HI;
  695. goto cmp_x;
  696. case BPF_JMP | BPF_JGE | BPF_X:
  697. /* pc += (A >= X) ? pc->jt : pc->jf */
  698. condt = ARM_COND_CS;
  699. cmp_x:
  700. update_on_xread(ctx);
  701. emit(ARM_CMP_R(r_A, r_X), ctx);
  702. goto cond_jump;
  703. case BPF_JMP | BPF_JSET | BPF_K:
  704. /* pc += (A & K) ? pc->jt : pc->jf */
  705. condt = ARM_COND_NE;
  706. /* not set iff all zeroes iff Z==1 iff EQ */
  707. imm12 = imm8m(k);
  708. if (imm12 < 0) {
  709. emit_mov_i_no8m(r_scratch, k, ctx);
  710. emit(ARM_TST_R(r_A, r_scratch), ctx);
  711. } else {
  712. emit(ARM_TST_I(r_A, imm12), ctx);
  713. }
  714. goto cond_jump;
  715. case BPF_JMP | BPF_JSET | BPF_X:
  716. /* pc += (A & X) ? pc->jt : pc->jf */
  717. update_on_xread(ctx);
  718. condt = ARM_COND_NE;
  719. emit(ARM_TST_R(r_A, r_X), ctx);
  720. goto cond_jump;
  721. case BPF_RET | BPF_A:
  722. emit(ARM_MOV_R(ARM_R0, r_A), ctx);
  723. goto b_epilogue;
  724. case BPF_RET | BPF_K:
  725. if ((k == 0) && (ctx->ret0_fp_idx < 0))
  726. ctx->ret0_fp_idx = i;
  727. emit_mov_i(ARM_R0, k, ctx);
  728. b_epilogue:
  729. if (i != ctx->skf->len - 1)
  730. emit(ARM_B(b_imm(prog->len, ctx)), ctx);
  731. break;
  732. case BPF_MISC | BPF_TAX:
  733. /* X = A */
  734. ctx->seen |= SEEN_X;
  735. emit(ARM_MOV_R(r_X, r_A), ctx);
  736. break;
  737. case BPF_MISC | BPF_TXA:
  738. /* A = X */
  739. update_on_xread(ctx);
  740. emit(ARM_MOV_R(r_A, r_X), ctx);
  741. break;
  742. case BPF_ANC | SKF_AD_PROTOCOL:
  743. /* A = ntohs(skb->protocol) */
  744. ctx->seen |= SEEN_SKB;
  745. BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff,
  746. protocol) != 2);
  747. off = offsetof(struct sk_buff, protocol);
  748. emit(ARM_LDRH_I(r_scratch, r_skb, off), ctx);
  749. emit_swap16(r_A, r_scratch, ctx);
  750. break;
  751. case BPF_ANC | SKF_AD_CPU:
  752. /* r_scratch = current_thread_info() */
  753. OP_IMM3(ARM_BIC, r_scratch, ARM_SP, THREAD_SIZE - 1, ctx);
  754. /* A = current_thread_info()->cpu */
  755. BUILD_BUG_ON(FIELD_SIZEOF(struct thread_info, cpu) != 4);
  756. off = offsetof(struct thread_info, cpu);
  757. emit(ARM_LDR_I(r_A, r_scratch, off), ctx);
  758. break;
  759. case BPF_ANC | SKF_AD_IFINDEX:
  760. case BPF_ANC | SKF_AD_HATYPE:
  761. /* A = skb->dev->ifindex */
  762. /* A = skb->dev->type */
  763. ctx->seen |= SEEN_SKB;
  764. off = offsetof(struct sk_buff, dev);
  765. emit(ARM_LDR_I(r_scratch, r_skb, off), ctx);
  766. emit(ARM_CMP_I(r_scratch, 0), ctx);
  767. emit_err_ret(ARM_COND_EQ, ctx);
  768. BUILD_BUG_ON(FIELD_SIZEOF(struct net_device,
  769. ifindex) != 4);
  770. BUILD_BUG_ON(FIELD_SIZEOF(struct net_device,
  771. type) != 2);
  772. if (code == (BPF_ANC | SKF_AD_IFINDEX)) {
  773. off = offsetof(struct net_device, ifindex);
  774. emit(ARM_LDR_I(r_A, r_scratch, off), ctx);
  775. } else {
  776. /*
  777. * offset of field "type" in "struct
  778. * net_device" is above what can be
  779. * used in the ldrh rd, [rn, #imm]
  780. * instruction, so load the offset in
  781. * a register and use ldrh rd, [rn, rm]
  782. */
  783. off = offsetof(struct net_device, type);
  784. emit_mov_i(ARM_R3, off, ctx);
  785. emit(ARM_LDRH_R(r_A, r_scratch, ARM_R3), ctx);
  786. }
  787. break;
  788. case BPF_ANC | SKF_AD_MARK:
  789. ctx->seen |= SEEN_SKB;
  790. BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4);
  791. off = offsetof(struct sk_buff, mark);
  792. emit(ARM_LDR_I(r_A, r_skb, off), ctx);
  793. break;
  794. case BPF_ANC | SKF_AD_RXHASH:
  795. ctx->seen |= SEEN_SKB;
  796. BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, hash) != 4);
  797. off = offsetof(struct sk_buff, hash);
  798. emit(ARM_LDR_I(r_A, r_skb, off), ctx);
  799. break;
  800. case BPF_ANC | SKF_AD_VLAN_TAG:
  801. case BPF_ANC | SKF_AD_VLAN_TAG_PRESENT:
  802. ctx->seen |= SEEN_SKB;
  803. BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_tci) != 2);
  804. off = offsetof(struct sk_buff, vlan_tci);
  805. emit(ARM_LDRH_I(r_A, r_skb, off), ctx);
  806. if (code == (BPF_ANC | SKF_AD_VLAN_TAG))
  807. OP_IMM3(ARM_AND, r_A, r_A, ~VLAN_TAG_PRESENT, ctx);
  808. else {
  809. OP_IMM3(ARM_LSR, r_A, r_A, 12, ctx);
  810. OP_IMM3(ARM_AND, r_A, r_A, 0x1, ctx);
  811. }
  812. break;
  813. case BPF_ANC | SKF_AD_PKTTYPE:
  814. ctx->seen |= SEEN_SKB;
  815. BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff,
  816. __pkt_type_offset[0]) != 1);
  817. off = PKT_TYPE_OFFSET();
  818. emit(ARM_LDRB_I(r_A, r_skb, off), ctx);
  819. emit(ARM_AND_I(r_A, r_A, PKT_TYPE_MAX), ctx);
  820. #ifdef __BIG_ENDIAN_BITFIELD
  821. emit(ARM_LSR_I(r_A, r_A, 5), ctx);
  822. #endif
  823. break;
  824. case BPF_ANC | SKF_AD_QUEUE:
  825. ctx->seen |= SEEN_SKB;
  826. BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff,
  827. queue_mapping) != 2);
  828. BUILD_BUG_ON(offsetof(struct sk_buff,
  829. queue_mapping) > 0xff);
  830. off = offsetof(struct sk_buff, queue_mapping);
  831. emit(ARM_LDRH_I(r_A, r_skb, off), ctx);
  832. break;
  833. case BPF_ANC | SKF_AD_PAY_OFFSET:
  834. ctx->seen |= SEEN_SKB | SEEN_CALL;
  835. emit(ARM_MOV_R(ARM_R0, r_skb), ctx);
  836. emit_mov_i(ARM_R3, (unsigned int)skb_get_poff, ctx);
  837. emit_blx_r(ARM_R3, ctx);
  838. emit(ARM_MOV_R(r_A, ARM_R0), ctx);
  839. break;
  840. case BPF_LDX | BPF_W | BPF_ABS:
  841. /*
  842. * load a 32bit word from struct seccomp_data.
  843. * seccomp_check_filter() will already have checked
  844. * that k is 32bit aligned and lies within the
  845. * struct seccomp_data.
  846. */
  847. ctx->seen |= SEEN_SKB;
  848. emit(ARM_LDR_I(r_A, r_skb, k), ctx);
  849. break;
  850. default:
  851. return -1;
  852. }
  853. if (ctx->flags & FLAG_IMM_OVERFLOW)
  854. /*
  855. * this instruction generated an overflow when
  856. * trying to access the literal pool, so
  857. * delegate this filter to the kernel interpreter.
  858. */
  859. return -1;
  860. }
  861. /* compute offsets only during the first pass */
  862. if (ctx->target == NULL)
  863. ctx->offsets[i] = ctx->idx * 4;
  864. return 0;
  865. }
  866. void bpf_jit_compile(struct bpf_prog *fp)
  867. {
  868. struct bpf_binary_header *header;
  869. struct jit_ctx ctx;
  870. unsigned tmp_idx;
  871. unsigned alloc_size;
  872. u8 *target_ptr;
  873. if (!bpf_jit_enable)
  874. return;
  875. memset(&ctx, 0, sizeof(ctx));
  876. ctx.skf = fp;
  877. ctx.ret0_fp_idx = -1;
  878. ctx.offsets = kzalloc(4 * (ctx.skf->len + 1), GFP_KERNEL);
  879. if (ctx.offsets == NULL)
  880. return;
  881. /* fake pass to fill in the ctx->seen */
  882. if (unlikely(build_body(&ctx)))
  883. goto out;
  884. tmp_idx = ctx.idx;
  885. build_prologue(&ctx);
  886. ctx.prologue_bytes = (ctx.idx - tmp_idx) * 4;
  887. #if __LINUX_ARM_ARCH__ < 7
  888. tmp_idx = ctx.idx;
  889. build_epilogue(&ctx);
  890. ctx.epilogue_bytes = (ctx.idx - tmp_idx) * 4;
  891. ctx.idx += ctx.imm_count;
  892. if (ctx.imm_count) {
  893. ctx.imms = kzalloc(4 * ctx.imm_count, GFP_KERNEL);
  894. if (ctx.imms == NULL)
  895. goto out;
  896. }
  897. #else
  898. /* there's nothing after the epilogue on ARMv7 */
  899. build_epilogue(&ctx);
  900. #endif
  901. alloc_size = 4 * ctx.idx;
  902. header = bpf_jit_binary_alloc(alloc_size, &target_ptr,
  903. 4, jit_fill_hole);
  904. if (header == NULL)
  905. goto out;
  906. ctx.target = (u32 *) target_ptr;
  907. ctx.idx = 0;
  908. build_prologue(&ctx);
  909. if (build_body(&ctx) < 0) {
  910. #if __LINUX_ARM_ARCH__ < 7
  911. if (ctx.imm_count)
  912. kfree(ctx.imms);
  913. #endif
  914. bpf_jit_binary_free(header);
  915. goto out;
  916. }
  917. build_epilogue(&ctx);
  918. flush_icache_range((u32)ctx.target, (u32)(ctx.target + ctx.idx));
  919. #if __LINUX_ARM_ARCH__ < 7
  920. if (ctx.imm_count)
  921. kfree(ctx.imms);
  922. #endif
  923. if (bpf_jit_enable > 1)
  924. /* there are 2 passes here */
  925. bpf_jit_dump(fp->len, alloc_size, 2, ctx.target);
  926. set_memory_ro((unsigned long)header, header->pages);
  927. fp->bpf_func = (void *)ctx.target;
  928. fp->jited = true;
  929. out:
  930. kfree(ctx.offsets);
  931. return;
  932. }
  933. void bpf_jit_free(struct bpf_prog *fp)
  934. {
  935. unsigned long addr = (unsigned long)fp->bpf_func & PAGE_MASK;
  936. struct bpf_binary_header *header = (void *)addr;
  937. if (!fp->jited)
  938. goto free_filter;
  939. set_memory_rw(addr, header->pages);
  940. bpf_jit_binary_free(header);
  941. free_filter:
  942. bpf_prog_unlock_free(fp);
  943. }