bpf_jit_comp.c 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896
  1. /*
  2. * BPF Jit compiler for s390.
  3. *
  4. * Copyright IBM Corp. 2012
  5. *
  6. * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
  7. */
  8. #include <linux/moduleloader.h>
  9. #include <linux/netdevice.h>
  10. #include <linux/if_vlan.h>
  11. #include <linux/filter.h>
  12. #include <linux/random.h>
  13. #include <linux/init.h>
  14. #include <asm/cacheflush.h>
  15. #include <asm/facility.h>
  16. #include <asm/dis.h>
  17. /*
  18. * Conventions:
  19. * %r2 = skb pointer
  20. * %r3 = offset parameter
  21. * %r4 = scratch register / length parameter
  22. * %r5 = BPF A accumulator
  23. * %r8 = return address
  24. * %r9 = save register for skb pointer
  25. * %r10 = skb->data
  26. * %r11 = skb->len - skb->data_len (headlen)
  27. * %r12 = BPF X accumulator
  28. * %r13 = literal pool pointer
  29. * 0(%r15) - 63(%r15) scratch memory array with BPF_MEMWORDS
  30. */
  31. int bpf_jit_enable __read_mostly;
  32. /*
  33. * assembly code in arch/x86/net/bpf_jit.S
  34. */
  35. extern u8 sk_load_word[], sk_load_half[], sk_load_byte[], sk_load_byte_msh[];
  36. extern u8 sk_load_word_ind[], sk_load_half_ind[], sk_load_byte_ind[];
  37. struct bpf_jit {
  38. unsigned int seen;
  39. u8 *start;
  40. u8 *prg;
  41. u8 *mid;
  42. u8 *lit;
  43. u8 *end;
  44. u8 *base_ip;
  45. u8 *ret0_ip;
  46. u8 *exit_ip;
  47. unsigned int off_load_word;
  48. unsigned int off_load_half;
  49. unsigned int off_load_byte;
  50. unsigned int off_load_bmsh;
  51. unsigned int off_load_iword;
  52. unsigned int off_load_ihalf;
  53. unsigned int off_load_ibyte;
  54. };
  55. #define BPF_SIZE_MAX 4096 /* Max size for program */
  56. #define SEEN_DATAREF 1 /* might call external helpers */
  57. #define SEEN_XREG 2 /* ebx is used */
  58. #define SEEN_MEM 4 /* use mem[] for temporary storage */
  59. #define SEEN_RET0 8 /* pc_ret0 points to a valid return 0 */
  60. #define SEEN_LITERAL 16 /* code uses literals */
  61. #define SEEN_LOAD_WORD 32 /* code uses sk_load_word */
  62. #define SEEN_LOAD_HALF 64 /* code uses sk_load_half */
  63. #define SEEN_LOAD_BYTE 128 /* code uses sk_load_byte */
  64. #define SEEN_LOAD_BMSH 256 /* code uses sk_load_byte_msh */
  65. #define SEEN_LOAD_IWORD 512 /* code uses sk_load_word_ind */
  66. #define SEEN_LOAD_IHALF 1024 /* code uses sk_load_half_ind */
  67. #define SEEN_LOAD_IBYTE 2048 /* code uses sk_load_byte_ind */
  68. #define EMIT2(op) \
  69. ({ \
  70. if (jit->prg + 2 <= jit->mid) \
  71. *(u16 *) jit->prg = op; \
  72. jit->prg += 2; \
  73. })
  74. #define EMIT4(op) \
  75. ({ \
  76. if (jit->prg + 4 <= jit->mid) \
  77. *(u32 *) jit->prg = op; \
  78. jit->prg += 4; \
  79. })
  80. #define EMIT4_DISP(op, disp) \
  81. ({ \
  82. unsigned int __disp = (disp) & 0xfff; \
  83. EMIT4(op | __disp); \
  84. })
  85. #define EMIT4_IMM(op, imm) \
  86. ({ \
  87. unsigned int __imm = (imm) & 0xffff; \
  88. EMIT4(op | __imm); \
  89. })
  90. #define EMIT4_PCREL(op, pcrel) \
  91. ({ \
  92. long __pcrel = ((pcrel) >> 1) & 0xffff; \
  93. EMIT4(op | __pcrel); \
  94. })
  95. #define EMIT6(op1, op2) \
  96. ({ \
  97. if (jit->prg + 6 <= jit->mid) { \
  98. *(u32 *) jit->prg = op1; \
  99. *(u16 *) (jit->prg + 4) = op2; \
  100. } \
  101. jit->prg += 6; \
  102. })
  103. #define EMIT6_DISP(op1, op2, disp) \
  104. ({ \
  105. unsigned int __disp = (disp) & 0xfff; \
  106. EMIT6(op1 | __disp, op2); \
  107. })
  108. #define EMIT6_IMM(op, imm) \
  109. ({ \
  110. unsigned int __imm = (imm); \
  111. EMIT6(op | (__imm >> 16), __imm & 0xffff); \
  112. })
  113. #define EMIT_CONST(val) \
  114. ({ \
  115. unsigned int ret; \
  116. ret = (unsigned int) (jit->lit - jit->base_ip); \
  117. jit->seen |= SEEN_LITERAL; \
  118. if (jit->lit + 4 <= jit->end) \
  119. *(u32 *) jit->lit = val; \
  120. jit->lit += 4; \
  121. ret; \
  122. })
  123. #define EMIT_FN_CONST(bit, fn) \
  124. ({ \
  125. unsigned int ret; \
  126. ret = (unsigned int) (jit->lit - jit->base_ip); \
  127. if (jit->seen & bit) { \
  128. jit->seen |= SEEN_LITERAL; \
  129. if (jit->lit + 8 <= jit->end) \
  130. *(void **) jit->lit = fn; \
  131. jit->lit += 8; \
  132. } \
  133. ret; \
  134. })
  135. static void bpf_jit_prologue(struct bpf_jit *jit)
  136. {
  137. /* Save registers and create stack frame if necessary */
  138. if (jit->seen & SEEN_DATAREF) {
  139. /* stmg %r8,%r15,88(%r15) */
  140. EMIT6(0xeb8ff058, 0x0024);
  141. /* lgr %r14,%r15 */
  142. EMIT4(0xb90400ef);
  143. /* aghi %r15,<offset> */
  144. EMIT4_IMM(0xa7fb0000, (jit->seen & SEEN_MEM) ? -112 : -80);
  145. /* stg %r14,152(%r15) */
  146. EMIT6(0xe3e0f098, 0x0024);
  147. } else if ((jit->seen & SEEN_XREG) && (jit->seen & SEEN_LITERAL))
  148. /* stmg %r12,%r13,120(%r15) */
  149. EMIT6(0xebcdf078, 0x0024);
  150. else if (jit->seen & SEEN_XREG)
  151. /* stg %r12,120(%r15) */
  152. EMIT6(0xe3c0f078, 0x0024);
  153. else if (jit->seen & SEEN_LITERAL)
  154. /* stg %r13,128(%r15) */
  155. EMIT6(0xe3d0f080, 0x0024);
  156. /* Setup literal pool */
  157. if (jit->seen & SEEN_LITERAL) {
  158. /* basr %r13,0 */
  159. EMIT2(0x0dd0);
  160. jit->base_ip = jit->prg;
  161. }
  162. jit->off_load_word = EMIT_FN_CONST(SEEN_LOAD_WORD, sk_load_word);
  163. jit->off_load_half = EMIT_FN_CONST(SEEN_LOAD_HALF, sk_load_half);
  164. jit->off_load_byte = EMIT_FN_CONST(SEEN_LOAD_BYTE, sk_load_byte);
  165. jit->off_load_bmsh = EMIT_FN_CONST(SEEN_LOAD_BMSH, sk_load_byte_msh);
  166. jit->off_load_iword = EMIT_FN_CONST(SEEN_LOAD_IWORD, sk_load_word_ind);
  167. jit->off_load_ihalf = EMIT_FN_CONST(SEEN_LOAD_IHALF, sk_load_half_ind);
  168. jit->off_load_ibyte = EMIT_FN_CONST(SEEN_LOAD_IBYTE, sk_load_byte_ind);
  169. /* Filter needs to access skb data */
  170. if (jit->seen & SEEN_DATAREF) {
  171. /* l %r11,<len>(%r2) */
  172. EMIT4_DISP(0x58b02000, offsetof(struct sk_buff, len));
  173. /* s %r11,<data_len>(%r2) */
  174. EMIT4_DISP(0x5bb02000, offsetof(struct sk_buff, data_len));
  175. /* lg %r10,<data>(%r2) */
  176. EMIT6_DISP(0xe3a02000, 0x0004,
  177. offsetof(struct sk_buff, data));
  178. }
  179. }
  180. static void bpf_jit_epilogue(struct bpf_jit *jit)
  181. {
  182. /* Return 0 */
  183. if (jit->seen & SEEN_RET0) {
  184. jit->ret0_ip = jit->prg;
  185. /* lghi %r2,0 */
  186. EMIT4(0xa7290000);
  187. }
  188. jit->exit_ip = jit->prg;
  189. /* Restore registers */
  190. if (jit->seen & SEEN_DATAREF)
  191. /* lmg %r8,%r15,<offset>(%r15) */
  192. EMIT6_DISP(0xeb8ff000, 0x0004,
  193. (jit->seen & SEEN_MEM) ? 200 : 168);
  194. else if ((jit->seen & SEEN_XREG) && (jit->seen & SEEN_LITERAL))
  195. /* lmg %r12,%r13,120(%r15) */
  196. EMIT6(0xebcdf078, 0x0004);
  197. else if (jit->seen & SEEN_XREG)
  198. /* lg %r12,120(%r15) */
  199. EMIT6(0xe3c0f078, 0x0004);
  200. else if (jit->seen & SEEN_LITERAL)
  201. /* lg %r13,128(%r15) */
  202. EMIT6(0xe3d0f080, 0x0004);
  203. /* br %r14 */
  204. EMIT2(0x07fe);
  205. }
  206. /* Helper to find the offset of pkt_type in sk_buff
  207. * Make sure its still a 3bit field starting at the MSBs within a byte.
  208. */
  209. #define PKT_TYPE_MAX 0xe0
  210. static int pkt_type_offset;
  211. static int __init bpf_pkt_type_offset_init(void)
  212. {
  213. struct sk_buff skb_probe = {
  214. .pkt_type = ~0,
  215. };
  216. char *ct = (char *)&skb_probe;
  217. int off;
  218. pkt_type_offset = -1;
  219. for (off = 0; off < sizeof(struct sk_buff); off++) {
  220. if (!ct[off])
  221. continue;
  222. if (ct[off] == PKT_TYPE_MAX)
  223. pkt_type_offset = off;
  224. else {
  225. /* Found non matching bit pattern, fix needed. */
  226. WARN_ON_ONCE(1);
  227. pkt_type_offset = -1;
  228. return -1;
  229. }
  230. }
  231. return 0;
  232. }
  233. device_initcall(bpf_pkt_type_offset_init);
  234. /*
  235. * make sure we dont leak kernel information to user
  236. */
  237. static void bpf_jit_noleaks(struct bpf_jit *jit, struct sock_filter *filter)
  238. {
  239. /* Clear temporary memory if (seen & SEEN_MEM) */
  240. if (jit->seen & SEEN_MEM)
  241. /* xc 0(64,%r15),0(%r15) */
  242. EMIT6(0xd73ff000, 0xf000);
  243. /* Clear X if (seen & SEEN_XREG) */
  244. if (jit->seen & SEEN_XREG)
  245. /* lhi %r12,0 */
  246. EMIT4(0xa7c80000);
  247. /* Clear A if the first register does not set it. */
  248. switch (filter[0].code) {
  249. case BPF_S_LD_W_ABS:
  250. case BPF_S_LD_H_ABS:
  251. case BPF_S_LD_B_ABS:
  252. case BPF_S_LD_W_LEN:
  253. case BPF_S_LD_W_IND:
  254. case BPF_S_LD_H_IND:
  255. case BPF_S_LD_B_IND:
  256. case BPF_S_LDX_B_MSH:
  257. case BPF_S_LD_IMM:
  258. case BPF_S_LD_MEM:
  259. case BPF_S_MISC_TXA:
  260. case BPF_S_ANC_PROTOCOL:
  261. case BPF_S_ANC_PKTTYPE:
  262. case BPF_S_ANC_IFINDEX:
  263. case BPF_S_ANC_MARK:
  264. case BPF_S_ANC_QUEUE:
  265. case BPF_S_ANC_HATYPE:
  266. case BPF_S_ANC_RXHASH:
  267. case BPF_S_ANC_CPU:
  268. case BPF_S_ANC_VLAN_TAG:
  269. case BPF_S_ANC_VLAN_TAG_PRESENT:
  270. case BPF_S_RET_K:
  271. /* first instruction sets A register */
  272. break;
  273. default: /* A = 0 */
  274. /* lhi %r5,0 */
  275. EMIT4(0xa7580000);
  276. }
  277. }
  278. static int bpf_jit_insn(struct bpf_jit *jit, struct sock_filter *filter,
  279. unsigned int *addrs, int i, int last)
  280. {
  281. unsigned int K;
  282. int offset;
  283. unsigned int mask;
  284. K = filter->k;
  285. switch (filter->code) {
  286. case BPF_S_ALU_ADD_X: /* A += X */
  287. jit->seen |= SEEN_XREG;
  288. /* ar %r5,%r12 */
  289. EMIT2(0x1a5c);
  290. break;
  291. case BPF_S_ALU_ADD_K: /* A += K */
  292. if (!K)
  293. break;
  294. if (K <= 16383)
  295. /* ahi %r5,<K> */
  296. EMIT4_IMM(0xa75a0000, K);
  297. else if (test_facility(21))
  298. /* alfi %r5,<K> */
  299. EMIT6_IMM(0xc25b0000, K);
  300. else
  301. /* a %r5,<d(K)>(%r13) */
  302. EMIT4_DISP(0x5a50d000, EMIT_CONST(K));
  303. break;
  304. case BPF_S_ALU_SUB_X: /* A -= X */
  305. jit->seen |= SEEN_XREG;
  306. /* sr %r5,%r12 */
  307. EMIT2(0x1b5c);
  308. break;
  309. case BPF_S_ALU_SUB_K: /* A -= K */
  310. if (!K)
  311. break;
  312. if (K <= 16384)
  313. /* ahi %r5,-K */
  314. EMIT4_IMM(0xa75a0000, -K);
  315. else if (test_facility(21))
  316. /* alfi %r5,-K */
  317. EMIT6_IMM(0xc25b0000, -K);
  318. else
  319. /* s %r5,<d(K)>(%r13) */
  320. EMIT4_DISP(0x5b50d000, EMIT_CONST(K));
  321. break;
  322. case BPF_S_ALU_MUL_X: /* A *= X */
  323. jit->seen |= SEEN_XREG;
  324. /* msr %r5,%r12 */
  325. EMIT4(0xb252005c);
  326. break;
  327. case BPF_S_ALU_MUL_K: /* A *= K */
  328. if (K <= 16383)
  329. /* mhi %r5,K */
  330. EMIT4_IMM(0xa75c0000, K);
  331. else if (test_facility(34))
  332. /* msfi %r5,<K> */
  333. EMIT6_IMM(0xc2510000, K);
  334. else
  335. /* ms %r5,<d(K)>(%r13) */
  336. EMIT4_DISP(0x7150d000, EMIT_CONST(K));
  337. break;
  338. case BPF_S_ALU_DIV_X: /* A /= X */
  339. jit->seen |= SEEN_XREG | SEEN_RET0;
  340. /* ltr %r12,%r12 */
  341. EMIT2(0x12cc);
  342. /* jz <ret0> */
  343. EMIT4_PCREL(0xa7840000, (jit->ret0_ip - jit->prg));
  344. /* lhi %r4,0 */
  345. EMIT4(0xa7480000);
  346. /* dlr %r4,%r12 */
  347. EMIT4(0xb997004c);
  348. break;
  349. case BPF_S_ALU_DIV_K: /* A /= K */
  350. if (K == 1)
  351. break;
  352. /* lhi %r4,0 */
  353. EMIT4(0xa7480000);
  354. /* dl %r4,<d(K)>(%r13) */
  355. EMIT6_DISP(0xe340d000, 0x0097, EMIT_CONST(K));
  356. break;
  357. case BPF_S_ALU_MOD_X: /* A %= X */
  358. jit->seen |= SEEN_XREG | SEEN_RET0;
  359. /* ltr %r12,%r12 */
  360. EMIT2(0x12cc);
  361. /* jz <ret0> */
  362. EMIT4_PCREL(0xa7840000, (jit->ret0_ip - jit->prg));
  363. /* lhi %r4,0 */
  364. EMIT4(0xa7480000);
  365. /* dlr %r4,%r12 */
  366. EMIT4(0xb997004c);
  367. /* lr %r5,%r4 */
  368. EMIT2(0x1854);
  369. break;
  370. case BPF_S_ALU_MOD_K: /* A %= K */
  371. if (K == 1) {
  372. /* lhi %r5,0 */
  373. EMIT4(0xa7580000);
  374. break;
  375. }
  376. /* lhi %r4,0 */
  377. EMIT4(0xa7480000);
  378. /* dl %r4,<d(K)>(%r13) */
  379. EMIT6_DISP(0xe340d000, 0x0097, EMIT_CONST(K));
  380. /* lr %r5,%r4 */
  381. EMIT2(0x1854);
  382. break;
  383. case BPF_S_ALU_AND_X: /* A &= X */
  384. jit->seen |= SEEN_XREG;
  385. /* nr %r5,%r12 */
  386. EMIT2(0x145c);
  387. break;
  388. case BPF_S_ALU_AND_K: /* A &= K */
  389. if (test_facility(21))
  390. /* nilf %r5,<K> */
  391. EMIT6_IMM(0xc05b0000, K);
  392. else
  393. /* n %r5,<d(K)>(%r13) */
  394. EMIT4_DISP(0x5450d000, EMIT_CONST(K));
  395. break;
  396. case BPF_S_ALU_OR_X: /* A |= X */
  397. jit->seen |= SEEN_XREG;
  398. /* or %r5,%r12 */
  399. EMIT2(0x165c);
  400. break;
  401. case BPF_S_ALU_OR_K: /* A |= K */
  402. if (test_facility(21))
  403. /* oilf %r5,<K> */
  404. EMIT6_IMM(0xc05d0000, K);
  405. else
  406. /* o %r5,<d(K)>(%r13) */
  407. EMIT4_DISP(0x5650d000, EMIT_CONST(K));
  408. break;
  409. case BPF_S_ANC_ALU_XOR_X: /* A ^= X; */
  410. case BPF_S_ALU_XOR_X:
  411. jit->seen |= SEEN_XREG;
  412. /* xr %r5,%r12 */
  413. EMIT2(0x175c);
  414. break;
  415. case BPF_S_ALU_XOR_K: /* A ^= K */
  416. if (!K)
  417. break;
  418. /* x %r5,<d(K)>(%r13) */
  419. EMIT4_DISP(0x5750d000, EMIT_CONST(K));
  420. break;
  421. case BPF_S_ALU_LSH_X: /* A <<= X; */
  422. jit->seen |= SEEN_XREG;
  423. /* sll %r5,0(%r12) */
  424. EMIT4(0x8950c000);
  425. break;
  426. case BPF_S_ALU_LSH_K: /* A <<= K */
  427. if (K == 0)
  428. break;
  429. /* sll %r5,K */
  430. EMIT4_DISP(0x89500000, K);
  431. break;
  432. case BPF_S_ALU_RSH_X: /* A >>= X; */
  433. jit->seen |= SEEN_XREG;
  434. /* srl %r5,0(%r12) */
  435. EMIT4(0x8850c000);
  436. break;
  437. case BPF_S_ALU_RSH_K: /* A >>= K; */
  438. if (K == 0)
  439. break;
  440. /* srl %r5,K */
  441. EMIT4_DISP(0x88500000, K);
  442. break;
  443. case BPF_S_ALU_NEG: /* A = -A */
  444. /* lnr %r5,%r5 */
  445. EMIT2(0x1155);
  446. break;
  447. case BPF_S_JMP_JA: /* ip += K */
  448. offset = addrs[i + K] + jit->start - jit->prg;
  449. EMIT4_PCREL(0xa7f40000, offset);
  450. break;
  451. case BPF_S_JMP_JGT_K: /* ip += (A > K) ? jt : jf */
  452. mask = 0x200000; /* jh */
  453. goto kbranch;
  454. case BPF_S_JMP_JGE_K: /* ip += (A >= K) ? jt : jf */
  455. mask = 0xa00000; /* jhe */
  456. goto kbranch;
  457. case BPF_S_JMP_JEQ_K: /* ip += (A == K) ? jt : jf */
  458. mask = 0x800000; /* je */
  459. kbranch: /* Emit compare if the branch targets are different */
  460. if (filter->jt != filter->jf) {
  461. if (K <= 16383)
  462. /* chi %r5,<K> */
  463. EMIT4_IMM(0xa75e0000, K);
  464. else if (test_facility(21))
  465. /* clfi %r5,<K> */
  466. EMIT6_IMM(0xc25f0000, K);
  467. else
  468. /* c %r5,<d(K)>(%r13) */
  469. EMIT4_DISP(0x5950d000, EMIT_CONST(K));
  470. }
  471. branch: if (filter->jt == filter->jf) {
  472. if (filter->jt == 0)
  473. break;
  474. /* j <jt> */
  475. offset = addrs[i + filter->jt] + jit->start - jit->prg;
  476. EMIT4_PCREL(0xa7f40000, offset);
  477. break;
  478. }
  479. if (filter->jt != 0) {
  480. /* brc <mask>,<jt> */
  481. offset = addrs[i + filter->jt] + jit->start - jit->prg;
  482. EMIT4_PCREL(0xa7040000 | mask, offset);
  483. }
  484. if (filter->jf != 0) {
  485. /* brc <mask^15>,<jf> */
  486. offset = addrs[i + filter->jf] + jit->start - jit->prg;
  487. EMIT4_PCREL(0xa7040000 | (mask ^ 0xf00000), offset);
  488. }
  489. break;
  490. case BPF_S_JMP_JSET_K: /* ip += (A & K) ? jt : jf */
  491. mask = 0x700000; /* jnz */
  492. /* Emit test if the branch targets are different */
  493. if (filter->jt != filter->jf) {
  494. if (K > 65535) {
  495. /* lr %r4,%r5 */
  496. EMIT2(0x1845);
  497. /* n %r4,<d(K)>(%r13) */
  498. EMIT4_DISP(0x5440d000, EMIT_CONST(K));
  499. } else
  500. /* tmll %r5,K */
  501. EMIT4_IMM(0xa7510000, K);
  502. }
  503. goto branch;
  504. case BPF_S_JMP_JGT_X: /* ip += (A > X) ? jt : jf */
  505. mask = 0x200000; /* jh */
  506. goto xbranch;
  507. case BPF_S_JMP_JGE_X: /* ip += (A >= X) ? jt : jf */
  508. mask = 0xa00000; /* jhe */
  509. goto xbranch;
  510. case BPF_S_JMP_JEQ_X: /* ip += (A == X) ? jt : jf */
  511. mask = 0x800000; /* je */
  512. xbranch: /* Emit compare if the branch targets are different */
  513. if (filter->jt != filter->jf) {
  514. jit->seen |= SEEN_XREG;
  515. /* cr %r5,%r12 */
  516. EMIT2(0x195c);
  517. }
  518. goto branch;
  519. case BPF_S_JMP_JSET_X: /* ip += (A & X) ? jt : jf */
  520. mask = 0x700000; /* jnz */
  521. /* Emit test if the branch targets are different */
  522. if (filter->jt != filter->jf) {
  523. jit->seen |= SEEN_XREG;
  524. /* lr %r4,%r5 */
  525. EMIT2(0x1845);
  526. /* nr %r4,%r12 */
  527. EMIT2(0x144c);
  528. }
  529. goto branch;
  530. case BPF_S_LD_W_ABS: /* A = *(u32 *) (skb->data+K) */
  531. jit->seen |= SEEN_DATAREF | SEEN_RET0 | SEEN_LOAD_WORD;
  532. offset = jit->off_load_word;
  533. goto load_abs;
  534. case BPF_S_LD_H_ABS: /* A = *(u16 *) (skb->data+K) */
  535. jit->seen |= SEEN_DATAREF | SEEN_RET0 | SEEN_LOAD_HALF;
  536. offset = jit->off_load_half;
  537. goto load_abs;
  538. case BPF_S_LD_B_ABS: /* A = *(u8 *) (skb->data+K) */
  539. jit->seen |= SEEN_DATAREF | SEEN_RET0 | SEEN_LOAD_BYTE;
  540. offset = jit->off_load_byte;
  541. load_abs: if ((int) K < 0)
  542. goto out;
  543. call_fn: /* lg %r1,<d(function)>(%r13) */
  544. EMIT6_DISP(0xe310d000, 0x0004, offset);
  545. /* l %r3,<d(K)>(%r13) */
  546. EMIT4_DISP(0x5830d000, EMIT_CONST(K));
  547. /* basr %r8,%r1 */
  548. EMIT2(0x0d81);
  549. /* jnz <ret0> */
  550. EMIT4_PCREL(0xa7740000, (jit->ret0_ip - jit->prg));
  551. break;
  552. case BPF_S_LD_W_IND: /* A = *(u32 *) (skb->data+K+X) */
  553. jit->seen |= SEEN_DATAREF | SEEN_RET0 | SEEN_LOAD_IWORD;
  554. offset = jit->off_load_iword;
  555. goto call_fn;
  556. case BPF_S_LD_H_IND: /* A = *(u16 *) (skb->data+K+X) */
  557. jit->seen |= SEEN_DATAREF | SEEN_RET0 | SEEN_LOAD_IHALF;
  558. offset = jit->off_load_ihalf;
  559. goto call_fn;
  560. case BPF_S_LD_B_IND: /* A = *(u8 *) (skb->data+K+X) */
  561. jit->seen |= SEEN_DATAREF | SEEN_RET0 | SEEN_LOAD_IBYTE;
  562. offset = jit->off_load_ibyte;
  563. goto call_fn;
  564. case BPF_S_LDX_B_MSH:
  565. /* X = (*(u8 *)(skb->data+K) & 0xf) << 2 */
  566. jit->seen |= SEEN_RET0;
  567. if ((int) K < 0) {
  568. /* j <ret0> */
  569. EMIT4_PCREL(0xa7f40000, (jit->ret0_ip - jit->prg));
  570. break;
  571. }
  572. jit->seen |= SEEN_DATAREF | SEEN_LOAD_BMSH;
  573. offset = jit->off_load_bmsh;
  574. goto call_fn;
  575. case BPF_S_LD_W_LEN: /* A = skb->len; */
  576. BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, len) != 4);
  577. /* l %r5,<d(len)>(%r2) */
  578. EMIT4_DISP(0x58502000, offsetof(struct sk_buff, len));
  579. break;
  580. case BPF_S_LDX_W_LEN: /* X = skb->len; */
  581. jit->seen |= SEEN_XREG;
  582. /* l %r12,<d(len)>(%r2) */
  583. EMIT4_DISP(0x58c02000, offsetof(struct sk_buff, len));
  584. break;
  585. case BPF_S_LD_IMM: /* A = K */
  586. if (K <= 16383)
  587. /* lhi %r5,K */
  588. EMIT4_IMM(0xa7580000, K);
  589. else if (test_facility(21))
  590. /* llilf %r5,<K> */
  591. EMIT6_IMM(0xc05f0000, K);
  592. else
  593. /* l %r5,<d(K)>(%r13) */
  594. EMIT4_DISP(0x5850d000, EMIT_CONST(K));
  595. break;
  596. case BPF_S_LDX_IMM: /* X = K */
  597. jit->seen |= SEEN_XREG;
  598. if (K <= 16383)
  599. /* lhi %r12,<K> */
  600. EMIT4_IMM(0xa7c80000, K);
  601. else if (test_facility(21))
  602. /* llilf %r12,<K> */
  603. EMIT6_IMM(0xc0cf0000, K);
  604. else
  605. /* l %r12,<d(K)>(%r13) */
  606. EMIT4_DISP(0x58c0d000, EMIT_CONST(K));
  607. break;
  608. case BPF_S_LD_MEM: /* A = mem[K] */
  609. jit->seen |= SEEN_MEM;
  610. /* l %r5,<K>(%r15) */
  611. EMIT4_DISP(0x5850f000,
  612. (jit->seen & SEEN_DATAREF) ? 160 + K*4 : K*4);
  613. break;
  614. case BPF_S_LDX_MEM: /* X = mem[K] */
  615. jit->seen |= SEEN_XREG | SEEN_MEM;
  616. /* l %r12,<K>(%r15) */
  617. EMIT4_DISP(0x58c0f000,
  618. (jit->seen & SEEN_DATAREF) ? 160 + K*4 : K*4);
  619. break;
  620. case BPF_S_MISC_TAX: /* X = A */
  621. jit->seen |= SEEN_XREG;
  622. /* lr %r12,%r5 */
  623. EMIT2(0x18c5);
  624. break;
  625. case BPF_S_MISC_TXA: /* A = X */
  626. jit->seen |= SEEN_XREG;
  627. /* lr %r5,%r12 */
  628. EMIT2(0x185c);
  629. break;
  630. case BPF_S_RET_K:
  631. if (K == 0) {
  632. jit->seen |= SEEN_RET0;
  633. if (last)
  634. break;
  635. /* j <ret0> */
  636. EMIT4_PCREL(0xa7f40000, jit->ret0_ip - jit->prg);
  637. } else {
  638. if (K <= 16383)
  639. /* lghi %r2,K */
  640. EMIT4_IMM(0xa7290000, K);
  641. else
  642. /* llgf %r2,<K>(%r13) */
  643. EMIT6_DISP(0xe320d000, 0x0016, EMIT_CONST(K));
  644. /* j <exit> */
  645. if (last && !(jit->seen & SEEN_RET0))
  646. break;
  647. EMIT4_PCREL(0xa7f40000, jit->exit_ip - jit->prg);
  648. }
  649. break;
  650. case BPF_S_RET_A:
  651. /* llgfr %r2,%r5 */
  652. EMIT4(0xb9160025);
  653. /* j <exit> */
  654. EMIT4_PCREL(0xa7f40000, jit->exit_ip - jit->prg);
  655. break;
  656. case BPF_S_ST: /* mem[K] = A */
  657. jit->seen |= SEEN_MEM;
  658. /* st %r5,<K>(%r15) */
  659. EMIT4_DISP(0x5050f000,
  660. (jit->seen & SEEN_DATAREF) ? 160 + K*4 : K*4);
  661. break;
  662. case BPF_S_STX: /* mem[K] = X : mov %ebx,off8(%rbp) */
  663. jit->seen |= SEEN_XREG | SEEN_MEM;
  664. /* st %r12,<K>(%r15) */
  665. EMIT4_DISP(0x50c0f000,
  666. (jit->seen & SEEN_DATAREF) ? 160 + K*4 : K*4);
  667. break;
  668. case BPF_S_ANC_PROTOCOL: /* A = ntohs(skb->protocol); */
  669. BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, protocol) != 2);
  670. /* lhi %r5,0 */
  671. EMIT4(0xa7580000);
  672. /* icm %r5,3,<d(protocol)>(%r2) */
  673. EMIT4_DISP(0xbf532000, offsetof(struct sk_buff, protocol));
  674. break;
  675. case BPF_S_ANC_IFINDEX: /* if (!skb->dev) return 0;
  676. * A = skb->dev->ifindex */
  677. BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, ifindex) != 4);
  678. jit->seen |= SEEN_RET0;
  679. /* lg %r1,<d(dev)>(%r2) */
  680. EMIT6_DISP(0xe3102000, 0x0004, offsetof(struct sk_buff, dev));
  681. /* ltgr %r1,%r1 */
  682. EMIT4(0xb9020011);
  683. /* jz <ret0> */
  684. EMIT4_PCREL(0xa7840000, jit->ret0_ip - jit->prg);
  685. /* l %r5,<d(ifindex)>(%r1) */
  686. EMIT4_DISP(0x58501000, offsetof(struct net_device, ifindex));
  687. break;
  688. case BPF_S_ANC_MARK: /* A = skb->mark */
  689. BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4);
  690. /* l %r5,<d(mark)>(%r2) */
  691. EMIT4_DISP(0x58502000, offsetof(struct sk_buff, mark));
  692. break;
  693. case BPF_S_ANC_QUEUE: /* A = skb->queue_mapping */
  694. BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, queue_mapping) != 2);
  695. /* lhi %r5,0 */
  696. EMIT4(0xa7580000);
  697. /* icm %r5,3,<d(queue_mapping)>(%r2) */
  698. EMIT4_DISP(0xbf532000, offsetof(struct sk_buff, queue_mapping));
  699. break;
  700. case BPF_S_ANC_HATYPE: /* if (!skb->dev) return 0;
  701. * A = skb->dev->type */
  702. BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, type) != 2);
  703. jit->seen |= SEEN_RET0;
  704. /* lg %r1,<d(dev)>(%r2) */
  705. EMIT6_DISP(0xe3102000, 0x0004, offsetof(struct sk_buff, dev));
  706. /* ltgr %r1,%r1 */
  707. EMIT4(0xb9020011);
  708. /* jz <ret0> */
  709. EMIT4_PCREL(0xa7840000, jit->ret0_ip - jit->prg);
  710. /* lhi %r5,0 */
  711. EMIT4(0xa7580000);
  712. /* icm %r5,3,<d(type)>(%r1) */
  713. EMIT4_DISP(0xbf531000, offsetof(struct net_device, type));
  714. break;
  715. case BPF_S_ANC_RXHASH: /* A = skb->rxhash */
  716. BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, rxhash) != 4);
  717. /* l %r5,<d(rxhash)>(%r2) */
  718. EMIT4_DISP(0x58502000, offsetof(struct sk_buff, rxhash));
  719. break;
  720. case BPF_S_ANC_VLAN_TAG:
  721. case BPF_S_ANC_VLAN_TAG_PRESENT:
  722. BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_tci) != 2);
  723. BUILD_BUG_ON(VLAN_TAG_PRESENT != 0x1000);
  724. /* lhi %r5,0 */
  725. EMIT4(0xa7580000);
  726. /* icm %r5,3,<d(vlan_tci)>(%r2) */
  727. EMIT4_DISP(0xbf532000, offsetof(struct sk_buff, vlan_tci));
  728. if (filter->code == BPF_S_ANC_VLAN_TAG) {
  729. /* nill %r5,0xefff */
  730. EMIT4_IMM(0xa5570000, ~VLAN_TAG_PRESENT);
  731. } else {
  732. /* nill %r5,0x1000 */
  733. EMIT4_IMM(0xa5570000, VLAN_TAG_PRESENT);
  734. /* srl %r5,12 */
  735. EMIT4_DISP(0x88500000, 12);
  736. }
  737. break;
  738. case BPF_S_ANC_PKTTYPE:
  739. if (pkt_type_offset < 0)
  740. goto out;
  741. /* lhi %r5,0 */
  742. EMIT4(0xa7580000);
  743. /* ic %r5,<d(pkt_type_offset)>(%r2) */
  744. EMIT4_DISP(0x43502000, pkt_type_offset);
  745. /* srl %r5,5 */
  746. EMIT4_DISP(0x88500000, 5);
  747. break;
  748. case BPF_S_ANC_CPU: /* A = smp_processor_id() */
  749. #ifdef CONFIG_SMP
  750. /* l %r5,<d(cpu_nr)> */
  751. EMIT4_DISP(0x58500000, offsetof(struct _lowcore, cpu_nr));
  752. #else
  753. /* lhi %r5,0 */
  754. EMIT4(0xa7580000);
  755. #endif
  756. break;
  757. default: /* too complex, give up */
  758. goto out;
  759. }
  760. addrs[i] = jit->prg - jit->start;
  761. return 0;
  762. out:
  763. return -1;
  764. }
  765. /*
  766. * Note: for security reasons, bpf code will follow a randomly
  767. * sized amount of illegal instructions.
  768. */
  769. struct bpf_binary_header {
  770. unsigned int pages;
  771. u8 image[];
  772. };
  773. static struct bpf_binary_header *bpf_alloc_binary(unsigned int bpfsize,
  774. u8 **image_ptr)
  775. {
  776. struct bpf_binary_header *header;
  777. unsigned int sz, hole;
  778. /* Most BPF filters are really small, but if some of them fill a page,
  779. * allow at least 128 extra bytes for illegal instructions.
  780. */
  781. sz = round_up(bpfsize + sizeof(*header) + 128, PAGE_SIZE);
  782. header = module_alloc(sz);
  783. if (!header)
  784. return NULL;
  785. memset(header, 0, sz);
  786. header->pages = sz / PAGE_SIZE;
  787. hole = sz - (bpfsize + sizeof(*header));
  788. /* Insert random number of illegal instructions before BPF code
  789. * and make sure the first instruction starts at an even address.
  790. */
  791. *image_ptr = &header->image[(prandom_u32() % hole) & -2];
  792. return header;
  793. }
  794. void bpf_jit_compile(struct sk_filter *fp)
  795. {
  796. struct bpf_binary_header *header = NULL;
  797. unsigned long size, prg_len, lit_len;
  798. struct bpf_jit jit, cjit;
  799. unsigned int *addrs;
  800. int pass, i;
  801. if (!bpf_jit_enable)
  802. return;
  803. addrs = kcalloc(fp->len, sizeof(*addrs), GFP_KERNEL);
  804. if (addrs == NULL)
  805. return;
  806. memset(&jit, 0, sizeof(cjit));
  807. memset(&cjit, 0, sizeof(cjit));
  808. for (pass = 0; pass < 10; pass++) {
  809. jit.prg = jit.start;
  810. jit.lit = jit.mid;
  811. bpf_jit_prologue(&jit);
  812. bpf_jit_noleaks(&jit, fp->insns);
  813. for (i = 0; i < fp->len; i++) {
  814. if (bpf_jit_insn(&jit, fp->insns + i, addrs, i,
  815. i == fp->len - 1))
  816. goto out;
  817. }
  818. bpf_jit_epilogue(&jit);
  819. if (jit.start) {
  820. WARN_ON(jit.prg > cjit.prg || jit.lit > cjit.lit);
  821. if (memcmp(&jit, &cjit, sizeof(jit)) == 0)
  822. break;
  823. } else if (jit.prg == cjit.prg && jit.lit == cjit.lit) {
  824. prg_len = jit.prg - jit.start;
  825. lit_len = jit.lit - jit.mid;
  826. size = prg_len + lit_len;
  827. if (size >= BPF_SIZE_MAX)
  828. goto out;
  829. header = bpf_alloc_binary(size, &jit.start);
  830. if (!header)
  831. goto out;
  832. jit.prg = jit.mid = jit.start + prg_len;
  833. jit.lit = jit.end = jit.start + prg_len + lit_len;
  834. jit.base_ip += (unsigned long) jit.start;
  835. jit.exit_ip += (unsigned long) jit.start;
  836. jit.ret0_ip += (unsigned long) jit.start;
  837. }
  838. cjit = jit;
  839. }
  840. if (bpf_jit_enable > 1) {
  841. bpf_jit_dump(fp->len, jit.end - jit.start, pass, jit.start);
  842. if (jit.start)
  843. print_fn_code(jit.start, jit.mid - jit.start);
  844. }
  845. if (jit.start) {
  846. set_memory_ro((unsigned long)header, header->pages);
  847. fp->bpf_func = (void *) jit.start;
  848. }
  849. out:
  850. kfree(addrs);
  851. }
  852. void bpf_jit_free(struct sk_filter *fp)
  853. {
  854. unsigned long addr = (unsigned long)fp->bpf_func & PAGE_MASK;
  855. struct bpf_binary_header *header = (void *)addr;
  856. if (fp->bpf_func == sk_run_filter)
  857. goto free_filter;
  858. set_memory_rw(addr, header->pages);
  859. module_free(NULL, header);
  860. free_filter:
  861. kfree(fp);
  862. }