ebpf_jit.c 53 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972
  1. /*
  2. * Just-In-Time compiler for eBPF filters on MIPS
  3. *
  4. * Copyright (c) 2017 Cavium, Inc.
  5. *
  6. * Based on code from:
  7. *
  8. * Copyright (c) 2014 Imagination Technologies Ltd.
  9. * Author: Markos Chandras <markos.chandras@imgtec.com>
  10. *
  11. * This program is free software; you can redistribute it and/or modify it
  12. * under the terms of the GNU General Public License as published by the
  13. * Free Software Foundation; version 2 of the License.
  14. */
  15. #include <linux/bitops.h>
  16. #include <linux/errno.h>
  17. #include <linux/filter.h>
  18. #include <linux/bpf.h>
  19. #include <linux/slab.h>
  20. #include <asm/bitops.h>
  21. #include <asm/byteorder.h>
  22. #include <asm/cacheflush.h>
  23. #include <asm/cpu-features.h>
  24. #include <asm/uasm.h>
  25. /* Registers used by JIT */
  26. #define MIPS_R_ZERO 0
  27. #define MIPS_R_AT 1
  28. #define MIPS_R_V0 2 /* BPF_R0 */
  29. #define MIPS_R_V1 3
  30. #define MIPS_R_A0 4 /* BPF_R1 */
  31. #define MIPS_R_A1 5 /* BPF_R2 */
  32. #define MIPS_R_A2 6 /* BPF_R3 */
  33. #define MIPS_R_A3 7 /* BPF_R4 */
  34. #define MIPS_R_A4 8 /* BPF_R5 */
  35. #define MIPS_R_T4 12 /* BPF_AX */
  36. #define MIPS_R_T5 13
  37. #define MIPS_R_T6 14
  38. #define MIPS_R_T7 15
  39. #define MIPS_R_S0 16 /* BPF_R6 */
  40. #define MIPS_R_S1 17 /* BPF_R7 */
  41. #define MIPS_R_S2 18 /* BPF_R8 */
  42. #define MIPS_R_S3 19 /* BPF_R9 */
  43. #define MIPS_R_S4 20 /* BPF_TCC */
  44. #define MIPS_R_S5 21
  45. #define MIPS_R_S6 22
  46. #define MIPS_R_S7 23
  47. #define MIPS_R_T8 24
  48. #define MIPS_R_T9 25
  49. #define MIPS_R_SP 29
  50. #define MIPS_R_RA 31
  51. /* eBPF flags */
  52. #define EBPF_SAVE_S0 BIT(0)
  53. #define EBPF_SAVE_S1 BIT(1)
  54. #define EBPF_SAVE_S2 BIT(2)
  55. #define EBPF_SAVE_S3 BIT(3)
  56. #define EBPF_SAVE_S4 BIT(4)
  57. #define EBPF_SAVE_RA BIT(5)
  58. #define EBPF_SEEN_FP BIT(6)
  59. #define EBPF_SEEN_TC BIT(7)
  60. #define EBPF_TCC_IN_V1 BIT(8)
  61. /*
  62. * For the mips64 ISA, we need to track the value range or type for
  63. * each JIT register. The BPF machine requires zero extended 32-bit
  64. * values, but the mips64 ISA requires sign extended 32-bit values.
  65. * At each point in the BPF program we track the state of every
  66. * register so that we can zero extend or sign extend as the BPF
  67. * semantics require.
  68. */
  69. enum reg_val_type {
  70. /* uninitialized */
  71. REG_UNKNOWN,
  72. /* not known to be 32-bit compatible. */
  73. REG_64BIT,
  74. /* 32-bit compatible, no truncation needed for 64-bit ops. */
  75. REG_64BIT_32BIT,
  76. /* 32-bit compatible, need truncation for 64-bit ops. */
  77. REG_32BIT,
  78. /* 32-bit zero extended. */
  79. REG_32BIT_ZERO_EX,
  80. /* 32-bit no sign/zero extension needed. */
  81. REG_32BIT_POS
  82. };
  83. /*
  84. * high bit of offsets indicates if long branch conversion done at
  85. * this insn.
  86. */
  87. #define OFFSETS_B_CONV BIT(31)
  88. /**
  89. * struct jit_ctx - JIT context
  90. * @skf: The sk_filter
  91. * @stack_size: eBPF stack size
  92. * @tmp_offset: eBPF $sp offset to 8-byte temporary memory
  93. * @idx: Instruction index
  94. * @flags: JIT flags
  95. * @offsets: Instruction offsets
  96. * @target: Memory location for the compiled filter
  97. * @reg_val_types Packed enum reg_val_type for each register.
  98. */
  99. struct jit_ctx {
  100. const struct bpf_prog *skf;
  101. int stack_size;
  102. int tmp_offset;
  103. u32 idx;
  104. u32 flags;
  105. u32 *offsets;
  106. u32 *target;
  107. u64 *reg_val_types;
  108. unsigned int long_b_conversion:1;
  109. unsigned int gen_b_offsets:1;
  110. unsigned int use_bbit_insns:1;
  111. };
  112. static void set_reg_val_type(u64 *rvt, int reg, enum reg_val_type type)
  113. {
  114. *rvt &= ~(7ull << (reg * 3));
  115. *rvt |= ((u64)type << (reg * 3));
  116. }
  117. static enum reg_val_type get_reg_val_type(const struct jit_ctx *ctx,
  118. int index, int reg)
  119. {
  120. return (ctx->reg_val_types[index] >> (reg * 3)) & 7;
  121. }
  122. /* Simply emit the instruction if the JIT memory space has been allocated */
  123. #define emit_instr(ctx, func, ...) \
  124. do { \
  125. if ((ctx)->target != NULL) { \
  126. u32 *p = &(ctx)->target[ctx->idx]; \
  127. uasm_i_##func(&p, ##__VA_ARGS__); \
  128. } \
  129. (ctx)->idx++; \
  130. } while (0)
  131. static unsigned int j_target(struct jit_ctx *ctx, int target_idx)
  132. {
  133. unsigned long target_va, base_va;
  134. unsigned int r;
  135. if (!ctx->target)
  136. return 0;
  137. base_va = (unsigned long)ctx->target;
  138. target_va = base_va + (ctx->offsets[target_idx] & ~OFFSETS_B_CONV);
  139. if ((base_va & ~0x0ffffffful) != (target_va & ~0x0ffffffful))
  140. return (unsigned int)-1;
  141. r = target_va & 0x0ffffffful;
  142. return r;
  143. }
  144. /* Compute the immediate value for PC-relative branches. */
  145. static u32 b_imm(unsigned int tgt, struct jit_ctx *ctx)
  146. {
  147. if (!ctx->gen_b_offsets)
  148. return 0;
  149. /*
  150. * We want a pc-relative branch. tgt is the instruction offset
  151. * we want to jump to.
  152. * Branch on MIPS:
  153. * I: target_offset <- sign_extend(offset)
  154. * I+1: PC += target_offset (delay slot)
  155. *
  156. * ctx->idx currently points to the branch instruction
  157. * but the offset is added to the delay slot so we need
  158. * to subtract 4.
  159. */
  160. return (ctx->offsets[tgt] & ~OFFSETS_B_CONV) -
  161. (ctx->idx * 4) - 4;
  162. }
  163. enum which_ebpf_reg {
  164. src_reg,
  165. src_reg_no_fp,
  166. dst_reg,
  167. dst_reg_fp_ok
  168. };
  169. /*
  170. * For eBPF, the register mapping naturally falls out of the
  171. * requirements of eBPF and the MIPS n64 ABI. We don't maintain a
  172. * separate frame pointer, so BPF_REG_10 relative accesses are
  173. * adjusted to be $sp relative.
  174. */
  175. int ebpf_to_mips_reg(struct jit_ctx *ctx, const struct bpf_insn *insn,
  176. enum which_ebpf_reg w)
  177. {
  178. int ebpf_reg = (w == src_reg || w == src_reg_no_fp) ?
  179. insn->src_reg : insn->dst_reg;
  180. switch (ebpf_reg) {
  181. case BPF_REG_0:
  182. return MIPS_R_V0;
  183. case BPF_REG_1:
  184. return MIPS_R_A0;
  185. case BPF_REG_2:
  186. return MIPS_R_A1;
  187. case BPF_REG_3:
  188. return MIPS_R_A2;
  189. case BPF_REG_4:
  190. return MIPS_R_A3;
  191. case BPF_REG_5:
  192. return MIPS_R_A4;
  193. case BPF_REG_6:
  194. ctx->flags |= EBPF_SAVE_S0;
  195. return MIPS_R_S0;
  196. case BPF_REG_7:
  197. ctx->flags |= EBPF_SAVE_S1;
  198. return MIPS_R_S1;
  199. case BPF_REG_8:
  200. ctx->flags |= EBPF_SAVE_S2;
  201. return MIPS_R_S2;
  202. case BPF_REG_9:
  203. ctx->flags |= EBPF_SAVE_S3;
  204. return MIPS_R_S3;
  205. case BPF_REG_10:
  206. if (w == dst_reg || w == src_reg_no_fp)
  207. goto bad_reg;
  208. ctx->flags |= EBPF_SEEN_FP;
  209. /*
  210. * Needs special handling, return something that
  211. * cannot be clobbered just in case.
  212. */
  213. return MIPS_R_ZERO;
  214. case BPF_REG_AX:
  215. return MIPS_R_T4;
  216. default:
  217. bad_reg:
  218. WARN(1, "Illegal bpf reg: %d\n", ebpf_reg);
  219. return -EINVAL;
  220. }
  221. }
  222. /*
  223. * eBPF stack frame will be something like:
  224. *
  225. * Entry $sp ------> +--------------------------------+
  226. * | $ra (optional) |
  227. * +--------------------------------+
  228. * | $s0 (optional) |
  229. * +--------------------------------+
  230. * | $s1 (optional) |
  231. * +--------------------------------+
  232. * | $s2 (optional) |
  233. * +--------------------------------+
  234. * | $s3 (optional) |
  235. * +--------------------------------+
  236. * | $s4 (optional) |
  237. * +--------------------------------+
  238. * | tmp-storage (if $ra saved) |
  239. * $sp + tmp_offset --> +--------------------------------+ <--BPF_REG_10
  240. * | BPF_REG_10 relative storage |
  241. * | MAX_BPF_STACK (optional) |
  242. * | . |
  243. * | . |
  244. * | . |
  245. * $sp --------> +--------------------------------+
  246. *
  247. * If BPF_REG_10 is never referenced, then the MAX_BPF_STACK sized
  248. * area is not allocated.
  249. */
  250. static int gen_int_prologue(struct jit_ctx *ctx)
  251. {
  252. int stack_adjust = 0;
  253. int store_offset;
  254. int locals_size;
  255. if (ctx->flags & EBPF_SAVE_RA)
  256. /*
  257. * If RA we are doing a function call and may need
  258. * extra 8-byte tmp area.
  259. */
  260. stack_adjust += 16;
  261. if (ctx->flags & EBPF_SAVE_S0)
  262. stack_adjust += 8;
  263. if (ctx->flags & EBPF_SAVE_S1)
  264. stack_adjust += 8;
  265. if (ctx->flags & EBPF_SAVE_S2)
  266. stack_adjust += 8;
  267. if (ctx->flags & EBPF_SAVE_S3)
  268. stack_adjust += 8;
  269. if (ctx->flags & EBPF_SAVE_S4)
  270. stack_adjust += 8;
  271. BUILD_BUG_ON(MAX_BPF_STACK & 7);
  272. locals_size = (ctx->flags & EBPF_SEEN_FP) ? MAX_BPF_STACK : 0;
  273. stack_adjust += locals_size;
  274. ctx->tmp_offset = locals_size;
  275. ctx->stack_size = stack_adjust;
  276. /*
  277. * First instruction initializes the tail call count (TCC).
  278. * On tail call we skip this instruction, and the TCC is
  279. * passed in $v1 from the caller.
  280. */
  281. emit_instr(ctx, daddiu, MIPS_R_V1, MIPS_R_ZERO, MAX_TAIL_CALL_CNT);
  282. if (stack_adjust)
  283. emit_instr(ctx, daddiu, MIPS_R_SP, MIPS_R_SP, -stack_adjust);
  284. else
  285. return 0;
  286. store_offset = stack_adjust - 8;
  287. if (ctx->flags & EBPF_SAVE_RA) {
  288. emit_instr(ctx, sd, MIPS_R_RA, store_offset, MIPS_R_SP);
  289. store_offset -= 8;
  290. }
  291. if (ctx->flags & EBPF_SAVE_S0) {
  292. emit_instr(ctx, sd, MIPS_R_S0, store_offset, MIPS_R_SP);
  293. store_offset -= 8;
  294. }
  295. if (ctx->flags & EBPF_SAVE_S1) {
  296. emit_instr(ctx, sd, MIPS_R_S1, store_offset, MIPS_R_SP);
  297. store_offset -= 8;
  298. }
  299. if (ctx->flags & EBPF_SAVE_S2) {
  300. emit_instr(ctx, sd, MIPS_R_S2, store_offset, MIPS_R_SP);
  301. store_offset -= 8;
  302. }
  303. if (ctx->flags & EBPF_SAVE_S3) {
  304. emit_instr(ctx, sd, MIPS_R_S3, store_offset, MIPS_R_SP);
  305. store_offset -= 8;
  306. }
  307. if (ctx->flags & EBPF_SAVE_S4) {
  308. emit_instr(ctx, sd, MIPS_R_S4, store_offset, MIPS_R_SP);
  309. store_offset -= 8;
  310. }
  311. if ((ctx->flags & EBPF_SEEN_TC) && !(ctx->flags & EBPF_TCC_IN_V1))
  312. emit_instr(ctx, daddu, MIPS_R_S4, MIPS_R_V1, MIPS_R_ZERO);
  313. return 0;
  314. }
  315. static int build_int_epilogue(struct jit_ctx *ctx, int dest_reg)
  316. {
  317. const struct bpf_prog *prog = ctx->skf;
  318. int stack_adjust = ctx->stack_size;
  319. int store_offset = stack_adjust - 8;
  320. int r0 = MIPS_R_V0;
  321. if (dest_reg == MIPS_R_RA &&
  322. get_reg_val_type(ctx, prog->len, BPF_REG_0) == REG_32BIT_ZERO_EX)
  323. /* Don't let zero extended value escape. */
  324. emit_instr(ctx, sll, r0, r0, 0);
  325. if (ctx->flags & EBPF_SAVE_RA) {
  326. emit_instr(ctx, ld, MIPS_R_RA, store_offset, MIPS_R_SP);
  327. store_offset -= 8;
  328. }
  329. if (ctx->flags & EBPF_SAVE_S0) {
  330. emit_instr(ctx, ld, MIPS_R_S0, store_offset, MIPS_R_SP);
  331. store_offset -= 8;
  332. }
  333. if (ctx->flags & EBPF_SAVE_S1) {
  334. emit_instr(ctx, ld, MIPS_R_S1, store_offset, MIPS_R_SP);
  335. store_offset -= 8;
  336. }
  337. if (ctx->flags & EBPF_SAVE_S2) {
  338. emit_instr(ctx, ld, MIPS_R_S2, store_offset, MIPS_R_SP);
  339. store_offset -= 8;
  340. }
  341. if (ctx->flags & EBPF_SAVE_S3) {
  342. emit_instr(ctx, ld, MIPS_R_S3, store_offset, MIPS_R_SP);
  343. store_offset -= 8;
  344. }
  345. if (ctx->flags & EBPF_SAVE_S4) {
  346. emit_instr(ctx, ld, MIPS_R_S4, store_offset, MIPS_R_SP);
  347. store_offset -= 8;
  348. }
  349. emit_instr(ctx, jr, dest_reg);
  350. if (stack_adjust)
  351. emit_instr(ctx, daddiu, MIPS_R_SP, MIPS_R_SP, stack_adjust);
  352. else
  353. emit_instr(ctx, nop);
  354. return 0;
  355. }
  356. static void gen_imm_to_reg(const struct bpf_insn *insn, int reg,
  357. struct jit_ctx *ctx)
  358. {
  359. if (insn->imm >= S16_MIN && insn->imm <= S16_MAX) {
  360. emit_instr(ctx, addiu, reg, MIPS_R_ZERO, insn->imm);
  361. } else {
  362. int lower = (s16)(insn->imm & 0xffff);
  363. int upper = insn->imm - lower;
  364. emit_instr(ctx, lui, reg, upper >> 16);
  365. emit_instr(ctx, addiu, reg, reg, lower);
  366. }
  367. }
  368. static int gen_imm_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
  369. int idx)
  370. {
  371. int upper_bound, lower_bound;
  372. int dst = ebpf_to_mips_reg(ctx, insn, dst_reg);
  373. if (dst < 0)
  374. return dst;
  375. switch (BPF_OP(insn->code)) {
  376. case BPF_MOV:
  377. case BPF_ADD:
  378. upper_bound = S16_MAX;
  379. lower_bound = S16_MIN;
  380. break;
  381. case BPF_SUB:
  382. upper_bound = -(int)S16_MIN;
  383. lower_bound = -(int)S16_MAX;
  384. break;
  385. case BPF_AND:
  386. case BPF_OR:
  387. case BPF_XOR:
  388. upper_bound = 0xffff;
  389. lower_bound = 0;
  390. break;
  391. case BPF_RSH:
  392. case BPF_LSH:
  393. case BPF_ARSH:
  394. /* Shift amounts are truncated, no need for bounds */
  395. upper_bound = S32_MAX;
  396. lower_bound = S32_MIN;
  397. break;
  398. default:
  399. return -EINVAL;
  400. }
  401. /*
  402. * Immediate move clobbers the register, so no sign/zero
  403. * extension needed.
  404. */
  405. if (BPF_CLASS(insn->code) == BPF_ALU64 &&
  406. BPF_OP(insn->code) != BPF_MOV &&
  407. get_reg_val_type(ctx, idx, insn->dst_reg) == REG_32BIT)
  408. emit_instr(ctx, dinsu, dst, MIPS_R_ZERO, 32, 32);
  409. /* BPF_ALU | BPF_LSH doesn't need separate sign extension */
  410. if (BPF_CLASS(insn->code) == BPF_ALU &&
  411. BPF_OP(insn->code) != BPF_LSH &&
  412. BPF_OP(insn->code) != BPF_MOV &&
  413. get_reg_val_type(ctx, idx, insn->dst_reg) != REG_32BIT)
  414. emit_instr(ctx, sll, dst, dst, 0);
  415. if (insn->imm >= lower_bound && insn->imm <= upper_bound) {
  416. /* single insn immediate case */
  417. switch (BPF_OP(insn->code) | BPF_CLASS(insn->code)) {
  418. case BPF_ALU64 | BPF_MOV:
  419. emit_instr(ctx, daddiu, dst, MIPS_R_ZERO, insn->imm);
  420. break;
  421. case BPF_ALU64 | BPF_AND:
  422. case BPF_ALU | BPF_AND:
  423. emit_instr(ctx, andi, dst, dst, insn->imm);
  424. break;
  425. case BPF_ALU64 | BPF_OR:
  426. case BPF_ALU | BPF_OR:
  427. emit_instr(ctx, ori, dst, dst, insn->imm);
  428. break;
  429. case BPF_ALU64 | BPF_XOR:
  430. case BPF_ALU | BPF_XOR:
  431. emit_instr(ctx, xori, dst, dst, insn->imm);
  432. break;
  433. case BPF_ALU64 | BPF_ADD:
  434. emit_instr(ctx, daddiu, dst, dst, insn->imm);
  435. break;
  436. case BPF_ALU64 | BPF_SUB:
  437. emit_instr(ctx, daddiu, dst, dst, -insn->imm);
  438. break;
  439. case BPF_ALU64 | BPF_RSH:
  440. emit_instr(ctx, dsrl_safe, dst, dst, insn->imm & 0x3f);
  441. break;
  442. case BPF_ALU | BPF_RSH:
  443. emit_instr(ctx, srl, dst, dst, insn->imm & 0x1f);
  444. break;
  445. case BPF_ALU64 | BPF_LSH:
  446. emit_instr(ctx, dsll_safe, dst, dst, insn->imm & 0x3f);
  447. break;
  448. case BPF_ALU | BPF_LSH:
  449. emit_instr(ctx, sll, dst, dst, insn->imm & 0x1f);
  450. break;
  451. case BPF_ALU64 | BPF_ARSH:
  452. emit_instr(ctx, dsra_safe, dst, dst, insn->imm & 0x3f);
  453. break;
  454. case BPF_ALU | BPF_ARSH:
  455. emit_instr(ctx, sra, dst, dst, insn->imm & 0x1f);
  456. break;
  457. case BPF_ALU | BPF_MOV:
  458. emit_instr(ctx, addiu, dst, MIPS_R_ZERO, insn->imm);
  459. break;
  460. case BPF_ALU | BPF_ADD:
  461. emit_instr(ctx, addiu, dst, dst, insn->imm);
  462. break;
  463. case BPF_ALU | BPF_SUB:
  464. emit_instr(ctx, addiu, dst, dst, -insn->imm);
  465. break;
  466. default:
  467. return -EINVAL;
  468. }
  469. } else {
  470. /* multi insn immediate case */
  471. if (BPF_OP(insn->code) == BPF_MOV) {
  472. gen_imm_to_reg(insn, dst, ctx);
  473. } else {
  474. gen_imm_to_reg(insn, MIPS_R_AT, ctx);
  475. switch (BPF_OP(insn->code) | BPF_CLASS(insn->code)) {
  476. case BPF_ALU64 | BPF_AND:
  477. case BPF_ALU | BPF_AND:
  478. emit_instr(ctx, and, dst, dst, MIPS_R_AT);
  479. break;
  480. case BPF_ALU64 | BPF_OR:
  481. case BPF_ALU | BPF_OR:
  482. emit_instr(ctx, or, dst, dst, MIPS_R_AT);
  483. break;
  484. case BPF_ALU64 | BPF_XOR:
  485. case BPF_ALU | BPF_XOR:
  486. emit_instr(ctx, xor, dst, dst, MIPS_R_AT);
  487. break;
  488. case BPF_ALU64 | BPF_ADD:
  489. emit_instr(ctx, daddu, dst, dst, MIPS_R_AT);
  490. break;
  491. case BPF_ALU64 | BPF_SUB:
  492. emit_instr(ctx, dsubu, dst, dst, MIPS_R_AT);
  493. break;
  494. case BPF_ALU | BPF_ADD:
  495. emit_instr(ctx, addu, dst, dst, MIPS_R_AT);
  496. break;
  497. case BPF_ALU | BPF_SUB:
  498. emit_instr(ctx, subu, dst, dst, MIPS_R_AT);
  499. break;
  500. default:
  501. return -EINVAL;
  502. }
  503. }
  504. }
  505. return 0;
  506. }
  507. static void * __must_check
  508. ool_skb_header_pointer(const struct sk_buff *skb, int offset,
  509. int len, void *buffer)
  510. {
  511. return skb_header_pointer(skb, offset, len, buffer);
  512. }
  513. static int size_to_len(const struct bpf_insn *insn)
  514. {
  515. switch (BPF_SIZE(insn->code)) {
  516. case BPF_B:
  517. return 1;
  518. case BPF_H:
  519. return 2;
  520. case BPF_W:
  521. return 4;
  522. case BPF_DW:
  523. return 8;
  524. }
  525. return 0;
  526. }
  527. static void emit_const_to_reg(struct jit_ctx *ctx, int dst, u64 value)
  528. {
  529. if (value >= 0xffffffffffff8000ull || value < 0x8000ull) {
  530. emit_instr(ctx, daddiu, dst, MIPS_R_ZERO, (int)value);
  531. } else if (value >= 0xffffffff80000000ull ||
  532. (value < 0x80000000 && value > 0xffff)) {
  533. emit_instr(ctx, lui, dst, (s32)(s16)(value >> 16));
  534. emit_instr(ctx, ori, dst, dst, (unsigned int)(value & 0xffff));
  535. } else {
  536. int i;
  537. bool seen_part = false;
  538. int needed_shift = 0;
  539. for (i = 0; i < 4; i++) {
  540. u64 part = (value >> (16 * (3 - i))) & 0xffff;
  541. if (seen_part && needed_shift > 0 && (part || i == 3)) {
  542. emit_instr(ctx, dsll_safe, dst, dst, needed_shift);
  543. needed_shift = 0;
  544. }
  545. if (part) {
  546. if (i == 0 || (!seen_part && i < 3 && part < 0x8000)) {
  547. emit_instr(ctx, lui, dst, (s32)(s16)part);
  548. needed_shift = -16;
  549. } else {
  550. emit_instr(ctx, ori, dst,
  551. seen_part ? dst : MIPS_R_ZERO,
  552. (unsigned int)part);
  553. }
  554. seen_part = true;
  555. }
  556. if (seen_part)
  557. needed_shift += 16;
  558. }
  559. }
  560. }
  561. static int emit_bpf_tail_call(struct jit_ctx *ctx, int this_idx)
  562. {
  563. int off, b_off;
  564. ctx->flags |= EBPF_SEEN_TC;
  565. /*
  566. * if (index >= array->map.max_entries)
  567. * goto out;
  568. */
  569. off = offsetof(struct bpf_array, map.max_entries);
  570. emit_instr(ctx, lwu, MIPS_R_T5, off, MIPS_R_A1);
  571. emit_instr(ctx, sltu, MIPS_R_AT, MIPS_R_T5, MIPS_R_A2);
  572. b_off = b_imm(this_idx + 1, ctx);
  573. emit_instr(ctx, bne, MIPS_R_AT, MIPS_R_ZERO, b_off);
  574. /*
  575. * if (--TCC < 0)
  576. * goto out;
  577. */
  578. /* Delay slot */
  579. emit_instr(ctx, daddiu, MIPS_R_T5,
  580. (ctx->flags & EBPF_TCC_IN_V1) ? MIPS_R_V1 : MIPS_R_S4, -1);
  581. b_off = b_imm(this_idx + 1, ctx);
  582. emit_instr(ctx, bltz, MIPS_R_T5, b_off);
  583. /*
  584. * prog = array->ptrs[index];
  585. * if (prog == NULL)
  586. * goto out;
  587. */
  588. /* Delay slot */
  589. emit_instr(ctx, dsll, MIPS_R_T8, MIPS_R_A2, 3);
  590. emit_instr(ctx, daddu, MIPS_R_T8, MIPS_R_T8, MIPS_R_A1);
  591. off = offsetof(struct bpf_array, ptrs);
  592. emit_instr(ctx, ld, MIPS_R_AT, off, MIPS_R_T8);
  593. b_off = b_imm(this_idx + 1, ctx);
  594. emit_instr(ctx, beq, MIPS_R_AT, MIPS_R_ZERO, b_off);
  595. /* Delay slot */
  596. emit_instr(ctx, nop);
  597. /* goto *(prog->bpf_func + 4); */
  598. off = offsetof(struct bpf_prog, bpf_func);
  599. emit_instr(ctx, ld, MIPS_R_T9, off, MIPS_R_AT);
  600. /* All systems are go... propagate TCC */
  601. emit_instr(ctx, daddu, MIPS_R_V1, MIPS_R_T5, MIPS_R_ZERO);
  602. /* Skip first instruction (TCC initialization) */
  603. emit_instr(ctx, daddiu, MIPS_R_T9, MIPS_R_T9, 4);
  604. return build_int_epilogue(ctx, MIPS_R_T9);
  605. }
  606. static bool is_bad_offset(int b_off)
  607. {
  608. return b_off > 0x1ffff || b_off < -0x20000;
  609. }
  610. /* Returns the number of insn slots consumed. */
  611. static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
  612. int this_idx, int exit_idx)
  613. {
  614. int src, dst, r, td, ts, mem_off, b_off;
  615. bool need_swap, did_move, cmp_eq;
  616. unsigned int target = 0;
  617. u64 t64;
  618. s64 t64s;
  619. int bpf_op = BPF_OP(insn->code);
  620. switch (insn->code) {
  621. case BPF_ALU64 | BPF_ADD | BPF_K: /* ALU64_IMM */
  622. case BPF_ALU64 | BPF_SUB | BPF_K: /* ALU64_IMM */
  623. case BPF_ALU64 | BPF_OR | BPF_K: /* ALU64_IMM */
  624. case BPF_ALU64 | BPF_AND | BPF_K: /* ALU64_IMM */
  625. case BPF_ALU64 | BPF_LSH | BPF_K: /* ALU64_IMM */
  626. case BPF_ALU64 | BPF_RSH | BPF_K: /* ALU64_IMM */
  627. case BPF_ALU64 | BPF_XOR | BPF_K: /* ALU64_IMM */
  628. case BPF_ALU64 | BPF_ARSH | BPF_K: /* ALU64_IMM */
  629. case BPF_ALU64 | BPF_MOV | BPF_K: /* ALU64_IMM */
  630. case BPF_ALU | BPF_MOV | BPF_K: /* ALU32_IMM */
  631. case BPF_ALU | BPF_ADD | BPF_K: /* ALU32_IMM */
  632. case BPF_ALU | BPF_SUB | BPF_K: /* ALU32_IMM */
  633. case BPF_ALU | BPF_OR | BPF_K: /* ALU64_IMM */
  634. case BPF_ALU | BPF_AND | BPF_K: /* ALU64_IMM */
  635. case BPF_ALU | BPF_LSH | BPF_K: /* ALU64_IMM */
  636. case BPF_ALU | BPF_RSH | BPF_K: /* ALU64_IMM */
  637. case BPF_ALU | BPF_XOR | BPF_K: /* ALU64_IMM */
  638. case BPF_ALU | BPF_ARSH | BPF_K: /* ALU64_IMM */
  639. r = gen_imm_insn(insn, ctx, this_idx);
  640. if (r < 0)
  641. return r;
  642. break;
  643. case BPF_ALU64 | BPF_MUL | BPF_K: /* ALU64_IMM */
  644. dst = ebpf_to_mips_reg(ctx, insn, dst_reg);
  645. if (dst < 0)
  646. return dst;
  647. if (get_reg_val_type(ctx, this_idx, insn->dst_reg) == REG_32BIT)
  648. emit_instr(ctx, dinsu, dst, MIPS_R_ZERO, 32, 32);
  649. if (insn->imm == 1) /* Mult by 1 is a nop */
  650. break;
  651. gen_imm_to_reg(insn, MIPS_R_AT, ctx);
  652. emit_instr(ctx, dmultu, MIPS_R_AT, dst);
  653. emit_instr(ctx, mflo, dst);
  654. break;
  655. case BPF_ALU64 | BPF_NEG | BPF_K: /* ALU64_IMM */
  656. dst = ebpf_to_mips_reg(ctx, insn, dst_reg);
  657. if (dst < 0)
  658. return dst;
  659. if (get_reg_val_type(ctx, this_idx, insn->dst_reg) == REG_32BIT)
  660. emit_instr(ctx, dinsu, dst, MIPS_R_ZERO, 32, 32);
  661. emit_instr(ctx, dsubu, dst, MIPS_R_ZERO, dst);
  662. break;
  663. case BPF_ALU | BPF_MUL | BPF_K: /* ALU_IMM */
  664. dst = ebpf_to_mips_reg(ctx, insn, dst_reg);
  665. if (dst < 0)
  666. return dst;
  667. td = get_reg_val_type(ctx, this_idx, insn->dst_reg);
  668. if (td == REG_64BIT || td == REG_32BIT_ZERO_EX) {
  669. /* sign extend */
  670. emit_instr(ctx, sll, dst, dst, 0);
  671. }
  672. if (insn->imm == 1) /* Mult by 1 is a nop */
  673. break;
  674. gen_imm_to_reg(insn, MIPS_R_AT, ctx);
  675. emit_instr(ctx, multu, dst, MIPS_R_AT);
  676. emit_instr(ctx, mflo, dst);
  677. break;
  678. case BPF_ALU | BPF_NEG | BPF_K: /* ALU_IMM */
  679. dst = ebpf_to_mips_reg(ctx, insn, dst_reg);
  680. if (dst < 0)
  681. return dst;
  682. td = get_reg_val_type(ctx, this_idx, insn->dst_reg);
  683. if (td == REG_64BIT || td == REG_32BIT_ZERO_EX) {
  684. /* sign extend */
  685. emit_instr(ctx, sll, dst, dst, 0);
  686. }
  687. emit_instr(ctx, subu, dst, MIPS_R_ZERO, dst);
  688. break;
  689. case BPF_ALU | BPF_DIV | BPF_K: /* ALU_IMM */
  690. case BPF_ALU | BPF_MOD | BPF_K: /* ALU_IMM */
  691. if (insn->imm == 0)
  692. return -EINVAL;
  693. dst = ebpf_to_mips_reg(ctx, insn, dst_reg);
  694. if (dst < 0)
  695. return dst;
  696. td = get_reg_val_type(ctx, this_idx, insn->dst_reg);
  697. if (td == REG_64BIT || td == REG_32BIT_ZERO_EX)
  698. /* sign extend */
  699. emit_instr(ctx, sll, dst, dst, 0);
  700. if (insn->imm == 1) {
  701. /* div by 1 is a nop, mod by 1 is zero */
  702. if (bpf_op == BPF_MOD)
  703. emit_instr(ctx, addu, dst, MIPS_R_ZERO, MIPS_R_ZERO);
  704. break;
  705. }
  706. gen_imm_to_reg(insn, MIPS_R_AT, ctx);
  707. emit_instr(ctx, divu, dst, MIPS_R_AT);
  708. if (bpf_op == BPF_DIV)
  709. emit_instr(ctx, mflo, dst);
  710. else
  711. emit_instr(ctx, mfhi, dst);
  712. break;
  713. case BPF_ALU64 | BPF_DIV | BPF_K: /* ALU_IMM */
  714. case BPF_ALU64 | BPF_MOD | BPF_K: /* ALU_IMM */
  715. if (insn->imm == 0)
  716. return -EINVAL;
  717. dst = ebpf_to_mips_reg(ctx, insn, dst_reg);
  718. if (dst < 0)
  719. return dst;
  720. if (get_reg_val_type(ctx, this_idx, insn->dst_reg) == REG_32BIT)
  721. emit_instr(ctx, dinsu, dst, MIPS_R_ZERO, 32, 32);
  722. if (insn->imm == 1) {
  723. /* div by 1 is a nop, mod by 1 is zero */
  724. if (bpf_op == BPF_MOD)
  725. emit_instr(ctx, addu, dst, MIPS_R_ZERO, MIPS_R_ZERO);
  726. break;
  727. }
  728. gen_imm_to_reg(insn, MIPS_R_AT, ctx);
  729. emit_instr(ctx, ddivu, dst, MIPS_R_AT);
  730. if (bpf_op == BPF_DIV)
  731. emit_instr(ctx, mflo, dst);
  732. else
  733. emit_instr(ctx, mfhi, dst);
  734. break;
  735. case BPF_ALU64 | BPF_MOV | BPF_X: /* ALU64_REG */
  736. case BPF_ALU64 | BPF_ADD | BPF_X: /* ALU64_REG */
  737. case BPF_ALU64 | BPF_SUB | BPF_X: /* ALU64_REG */
  738. case BPF_ALU64 | BPF_XOR | BPF_X: /* ALU64_REG */
  739. case BPF_ALU64 | BPF_OR | BPF_X: /* ALU64_REG */
  740. case BPF_ALU64 | BPF_AND | BPF_X: /* ALU64_REG */
  741. case BPF_ALU64 | BPF_MUL | BPF_X: /* ALU64_REG */
  742. case BPF_ALU64 | BPF_DIV | BPF_X: /* ALU64_REG */
  743. case BPF_ALU64 | BPF_MOD | BPF_X: /* ALU64_REG */
  744. case BPF_ALU64 | BPF_LSH | BPF_X: /* ALU64_REG */
  745. case BPF_ALU64 | BPF_RSH | BPF_X: /* ALU64_REG */
  746. case BPF_ALU64 | BPF_ARSH | BPF_X: /* ALU64_REG */
  747. src = ebpf_to_mips_reg(ctx, insn, src_reg);
  748. dst = ebpf_to_mips_reg(ctx, insn, dst_reg);
  749. if (src < 0 || dst < 0)
  750. return -EINVAL;
  751. if (get_reg_val_type(ctx, this_idx, insn->dst_reg) == REG_32BIT)
  752. emit_instr(ctx, dinsu, dst, MIPS_R_ZERO, 32, 32);
  753. did_move = false;
  754. if (insn->src_reg == BPF_REG_10) {
  755. if (bpf_op == BPF_MOV) {
  756. emit_instr(ctx, daddiu, dst, MIPS_R_SP, MAX_BPF_STACK);
  757. did_move = true;
  758. } else {
  759. emit_instr(ctx, daddiu, MIPS_R_AT, MIPS_R_SP, MAX_BPF_STACK);
  760. src = MIPS_R_AT;
  761. }
  762. } else if (get_reg_val_type(ctx, this_idx, insn->src_reg) == REG_32BIT) {
  763. int tmp_reg = MIPS_R_AT;
  764. if (bpf_op == BPF_MOV) {
  765. tmp_reg = dst;
  766. did_move = true;
  767. }
  768. emit_instr(ctx, daddu, tmp_reg, src, MIPS_R_ZERO);
  769. emit_instr(ctx, dinsu, tmp_reg, MIPS_R_ZERO, 32, 32);
  770. src = MIPS_R_AT;
  771. }
  772. switch (bpf_op) {
  773. case BPF_MOV:
  774. if (!did_move)
  775. emit_instr(ctx, daddu, dst, src, MIPS_R_ZERO);
  776. break;
  777. case BPF_ADD:
  778. emit_instr(ctx, daddu, dst, dst, src);
  779. break;
  780. case BPF_SUB:
  781. emit_instr(ctx, dsubu, dst, dst, src);
  782. break;
  783. case BPF_XOR:
  784. emit_instr(ctx, xor, dst, dst, src);
  785. break;
  786. case BPF_OR:
  787. emit_instr(ctx, or, dst, dst, src);
  788. break;
  789. case BPF_AND:
  790. emit_instr(ctx, and, dst, dst, src);
  791. break;
  792. case BPF_MUL:
  793. emit_instr(ctx, dmultu, dst, src);
  794. emit_instr(ctx, mflo, dst);
  795. break;
  796. case BPF_DIV:
  797. case BPF_MOD:
  798. emit_instr(ctx, ddivu, dst, src);
  799. if (bpf_op == BPF_DIV)
  800. emit_instr(ctx, mflo, dst);
  801. else
  802. emit_instr(ctx, mfhi, dst);
  803. break;
  804. case BPF_LSH:
  805. emit_instr(ctx, dsllv, dst, dst, src);
  806. break;
  807. case BPF_RSH:
  808. emit_instr(ctx, dsrlv, dst, dst, src);
  809. break;
  810. case BPF_ARSH:
  811. emit_instr(ctx, dsrav, dst, dst, src);
  812. break;
  813. default:
  814. pr_err("ALU64_REG NOT HANDLED\n");
  815. return -EINVAL;
  816. }
  817. break;
  818. case BPF_ALU | BPF_MOV | BPF_X: /* ALU_REG */
  819. case BPF_ALU | BPF_ADD | BPF_X: /* ALU_REG */
  820. case BPF_ALU | BPF_SUB | BPF_X: /* ALU_REG */
  821. case BPF_ALU | BPF_XOR | BPF_X: /* ALU_REG */
  822. case BPF_ALU | BPF_OR | BPF_X: /* ALU_REG */
  823. case BPF_ALU | BPF_AND | BPF_X: /* ALU_REG */
  824. case BPF_ALU | BPF_MUL | BPF_X: /* ALU_REG */
  825. case BPF_ALU | BPF_DIV | BPF_X: /* ALU_REG */
  826. case BPF_ALU | BPF_MOD | BPF_X: /* ALU_REG */
  827. case BPF_ALU | BPF_LSH | BPF_X: /* ALU_REG */
  828. case BPF_ALU | BPF_RSH | BPF_X: /* ALU_REG */
  829. src = ebpf_to_mips_reg(ctx, insn, src_reg_no_fp);
  830. dst = ebpf_to_mips_reg(ctx, insn, dst_reg);
  831. if (src < 0 || dst < 0)
  832. return -EINVAL;
  833. td = get_reg_val_type(ctx, this_idx, insn->dst_reg);
  834. if (td == REG_64BIT || td == REG_32BIT_ZERO_EX) {
  835. /* sign extend */
  836. emit_instr(ctx, sll, dst, dst, 0);
  837. }
  838. did_move = false;
  839. ts = get_reg_val_type(ctx, this_idx, insn->src_reg);
  840. if (ts == REG_64BIT || ts == REG_32BIT_ZERO_EX) {
  841. int tmp_reg = MIPS_R_AT;
  842. if (bpf_op == BPF_MOV) {
  843. tmp_reg = dst;
  844. did_move = true;
  845. }
  846. /* sign extend */
  847. emit_instr(ctx, sll, tmp_reg, src, 0);
  848. src = MIPS_R_AT;
  849. }
  850. switch (bpf_op) {
  851. case BPF_MOV:
  852. if (!did_move)
  853. emit_instr(ctx, addu, dst, src, MIPS_R_ZERO);
  854. break;
  855. case BPF_ADD:
  856. emit_instr(ctx, addu, dst, dst, src);
  857. break;
  858. case BPF_SUB:
  859. emit_instr(ctx, subu, dst, dst, src);
  860. break;
  861. case BPF_XOR:
  862. emit_instr(ctx, xor, dst, dst, src);
  863. break;
  864. case BPF_OR:
  865. emit_instr(ctx, or, dst, dst, src);
  866. break;
  867. case BPF_AND:
  868. emit_instr(ctx, and, dst, dst, src);
  869. break;
  870. case BPF_MUL:
  871. emit_instr(ctx, mul, dst, dst, src);
  872. break;
  873. case BPF_DIV:
  874. case BPF_MOD:
  875. emit_instr(ctx, divu, dst, src);
  876. if (bpf_op == BPF_DIV)
  877. emit_instr(ctx, mflo, dst);
  878. else
  879. emit_instr(ctx, mfhi, dst);
  880. break;
  881. case BPF_LSH:
  882. emit_instr(ctx, sllv, dst, dst, src);
  883. break;
  884. case BPF_RSH:
  885. emit_instr(ctx, srlv, dst, dst, src);
  886. break;
  887. default:
  888. pr_err("ALU_REG NOT HANDLED\n");
  889. return -EINVAL;
  890. }
  891. break;
  892. case BPF_JMP | BPF_EXIT:
  893. if (this_idx + 1 < exit_idx) {
  894. b_off = b_imm(exit_idx, ctx);
  895. if (is_bad_offset(b_off))
  896. return -E2BIG;
  897. emit_instr(ctx, beq, MIPS_R_ZERO, MIPS_R_ZERO, b_off);
  898. emit_instr(ctx, nop);
  899. }
  900. break;
  901. case BPF_JMP | BPF_JEQ | BPF_K: /* JMP_IMM */
  902. case BPF_JMP | BPF_JNE | BPF_K: /* JMP_IMM */
  903. cmp_eq = (bpf_op == BPF_JEQ);
  904. dst = ebpf_to_mips_reg(ctx, insn, dst_reg_fp_ok);
  905. if (dst < 0)
  906. return dst;
  907. if (insn->imm == 0) {
  908. src = MIPS_R_ZERO;
  909. } else {
  910. gen_imm_to_reg(insn, MIPS_R_AT, ctx);
  911. src = MIPS_R_AT;
  912. }
  913. goto jeq_common;
  914. case BPF_JMP | BPF_JEQ | BPF_X: /* JMP_REG */
  915. case BPF_JMP | BPF_JNE | BPF_X:
  916. case BPF_JMP | BPF_JSLT | BPF_X:
  917. case BPF_JMP | BPF_JSLE | BPF_X:
  918. case BPF_JMP | BPF_JSGT | BPF_X:
  919. case BPF_JMP | BPF_JSGE | BPF_X:
  920. case BPF_JMP | BPF_JLT | BPF_X:
  921. case BPF_JMP | BPF_JLE | BPF_X:
  922. case BPF_JMP | BPF_JGT | BPF_X:
  923. case BPF_JMP | BPF_JGE | BPF_X:
  924. case BPF_JMP | BPF_JSET | BPF_X:
  925. src = ebpf_to_mips_reg(ctx, insn, src_reg_no_fp);
  926. dst = ebpf_to_mips_reg(ctx, insn, dst_reg);
  927. if (src < 0 || dst < 0)
  928. return -EINVAL;
  929. td = get_reg_val_type(ctx, this_idx, insn->dst_reg);
  930. ts = get_reg_val_type(ctx, this_idx, insn->src_reg);
  931. if (td == REG_32BIT && ts != REG_32BIT) {
  932. emit_instr(ctx, sll, MIPS_R_AT, src, 0);
  933. src = MIPS_R_AT;
  934. } else if (ts == REG_32BIT && td != REG_32BIT) {
  935. emit_instr(ctx, sll, MIPS_R_AT, dst, 0);
  936. dst = MIPS_R_AT;
  937. }
  938. if (bpf_op == BPF_JSET) {
  939. emit_instr(ctx, and, MIPS_R_AT, dst, src);
  940. cmp_eq = false;
  941. dst = MIPS_R_AT;
  942. src = MIPS_R_ZERO;
  943. } else if (bpf_op == BPF_JSGT || bpf_op == BPF_JSLE) {
  944. emit_instr(ctx, dsubu, MIPS_R_AT, dst, src);
  945. if ((insn + 1)->code == (BPF_JMP | BPF_EXIT) && insn->off == 1) {
  946. b_off = b_imm(exit_idx, ctx);
  947. if (is_bad_offset(b_off))
  948. return -E2BIG;
  949. if (bpf_op == BPF_JSGT)
  950. emit_instr(ctx, blez, MIPS_R_AT, b_off);
  951. else
  952. emit_instr(ctx, bgtz, MIPS_R_AT, b_off);
  953. emit_instr(ctx, nop);
  954. return 2; /* We consumed the exit. */
  955. }
  956. b_off = b_imm(this_idx + insn->off + 1, ctx);
  957. if (is_bad_offset(b_off))
  958. return -E2BIG;
  959. if (bpf_op == BPF_JSGT)
  960. emit_instr(ctx, bgtz, MIPS_R_AT, b_off);
  961. else
  962. emit_instr(ctx, blez, MIPS_R_AT, b_off);
  963. emit_instr(ctx, nop);
  964. break;
  965. } else if (bpf_op == BPF_JSGE || bpf_op == BPF_JSLT) {
  966. emit_instr(ctx, slt, MIPS_R_AT, dst, src);
  967. cmp_eq = bpf_op == BPF_JSGE;
  968. dst = MIPS_R_AT;
  969. src = MIPS_R_ZERO;
  970. } else if (bpf_op == BPF_JGT || bpf_op == BPF_JLE) {
  971. /* dst or src could be AT */
  972. emit_instr(ctx, dsubu, MIPS_R_T8, dst, src);
  973. emit_instr(ctx, sltu, MIPS_R_AT, dst, src);
  974. /* SP known to be non-zero, movz becomes boolean not */
  975. emit_instr(ctx, movz, MIPS_R_T9, MIPS_R_SP, MIPS_R_T8);
  976. emit_instr(ctx, movn, MIPS_R_T9, MIPS_R_ZERO, MIPS_R_T8);
  977. emit_instr(ctx, or, MIPS_R_AT, MIPS_R_T9, MIPS_R_AT);
  978. cmp_eq = bpf_op == BPF_JGT;
  979. dst = MIPS_R_AT;
  980. src = MIPS_R_ZERO;
  981. } else if (bpf_op == BPF_JGE || bpf_op == BPF_JLT) {
  982. emit_instr(ctx, sltu, MIPS_R_AT, dst, src);
  983. cmp_eq = bpf_op == BPF_JGE;
  984. dst = MIPS_R_AT;
  985. src = MIPS_R_ZERO;
  986. } else { /* JNE/JEQ case */
  987. cmp_eq = (bpf_op == BPF_JEQ);
  988. }
  989. jeq_common:
  990. /*
  991. * If the next insn is EXIT and we are jumping arround
  992. * only it, invert the sense of the compare and
  993. * conditionally jump to the exit. Poor man's branch
  994. * chaining.
  995. */
  996. if ((insn + 1)->code == (BPF_JMP | BPF_EXIT) && insn->off == 1) {
  997. b_off = b_imm(exit_idx, ctx);
  998. if (is_bad_offset(b_off)) {
  999. target = j_target(ctx, exit_idx);
  1000. if (target == (unsigned int)-1)
  1001. return -E2BIG;
  1002. cmp_eq = !cmp_eq;
  1003. b_off = 4 * 3;
  1004. if (!(ctx->offsets[this_idx] & OFFSETS_B_CONV)) {
  1005. ctx->offsets[this_idx] |= OFFSETS_B_CONV;
  1006. ctx->long_b_conversion = 1;
  1007. }
  1008. }
  1009. if (cmp_eq)
  1010. emit_instr(ctx, bne, dst, src, b_off);
  1011. else
  1012. emit_instr(ctx, beq, dst, src, b_off);
  1013. emit_instr(ctx, nop);
  1014. if (ctx->offsets[this_idx] & OFFSETS_B_CONV) {
  1015. emit_instr(ctx, j, target);
  1016. emit_instr(ctx, nop);
  1017. }
  1018. return 2; /* We consumed the exit. */
  1019. }
  1020. b_off = b_imm(this_idx + insn->off + 1, ctx);
  1021. if (is_bad_offset(b_off)) {
  1022. target = j_target(ctx, this_idx + insn->off + 1);
  1023. if (target == (unsigned int)-1)
  1024. return -E2BIG;
  1025. cmp_eq = !cmp_eq;
  1026. b_off = 4 * 3;
  1027. if (!(ctx->offsets[this_idx] & OFFSETS_B_CONV)) {
  1028. ctx->offsets[this_idx] |= OFFSETS_B_CONV;
  1029. ctx->long_b_conversion = 1;
  1030. }
  1031. }
  1032. if (cmp_eq)
  1033. emit_instr(ctx, beq, dst, src, b_off);
  1034. else
  1035. emit_instr(ctx, bne, dst, src, b_off);
  1036. emit_instr(ctx, nop);
  1037. if (ctx->offsets[this_idx] & OFFSETS_B_CONV) {
  1038. emit_instr(ctx, j, target);
  1039. emit_instr(ctx, nop);
  1040. }
  1041. break;
  1042. case BPF_JMP | BPF_JSGT | BPF_K: /* JMP_IMM */
  1043. case BPF_JMP | BPF_JSGE | BPF_K: /* JMP_IMM */
  1044. case BPF_JMP | BPF_JSLT | BPF_K: /* JMP_IMM */
  1045. case BPF_JMP | BPF_JSLE | BPF_K: /* JMP_IMM */
  1046. cmp_eq = (bpf_op == BPF_JSGE);
  1047. dst = ebpf_to_mips_reg(ctx, insn, dst_reg_fp_ok);
  1048. if (dst < 0)
  1049. return dst;
  1050. if (insn->imm == 0) {
  1051. if ((insn + 1)->code == (BPF_JMP | BPF_EXIT) && insn->off == 1) {
  1052. b_off = b_imm(exit_idx, ctx);
  1053. if (is_bad_offset(b_off))
  1054. return -E2BIG;
  1055. switch (bpf_op) {
  1056. case BPF_JSGT:
  1057. emit_instr(ctx, blez, dst, b_off);
  1058. break;
  1059. case BPF_JSGE:
  1060. emit_instr(ctx, bltz, dst, b_off);
  1061. break;
  1062. case BPF_JSLT:
  1063. emit_instr(ctx, bgez, dst, b_off);
  1064. break;
  1065. case BPF_JSLE:
  1066. emit_instr(ctx, bgtz, dst, b_off);
  1067. break;
  1068. }
  1069. emit_instr(ctx, nop);
  1070. return 2; /* We consumed the exit. */
  1071. }
  1072. b_off = b_imm(this_idx + insn->off + 1, ctx);
  1073. if (is_bad_offset(b_off))
  1074. return -E2BIG;
  1075. switch (bpf_op) {
  1076. case BPF_JSGT:
  1077. emit_instr(ctx, bgtz, dst, b_off);
  1078. break;
  1079. case BPF_JSGE:
  1080. emit_instr(ctx, bgez, dst, b_off);
  1081. break;
  1082. case BPF_JSLT:
  1083. emit_instr(ctx, bltz, dst, b_off);
  1084. break;
  1085. case BPF_JSLE:
  1086. emit_instr(ctx, blez, dst, b_off);
  1087. break;
  1088. }
  1089. emit_instr(ctx, nop);
  1090. break;
  1091. }
  1092. /*
  1093. * only "LT" compare available, so we must use imm + 1
  1094. * to generate "GT" and imm -1 to generate LE
  1095. */
  1096. if (bpf_op == BPF_JSGT)
  1097. t64s = insn->imm + 1;
  1098. else if (bpf_op == BPF_JSLE)
  1099. t64s = insn->imm + 1;
  1100. else
  1101. t64s = insn->imm;
  1102. cmp_eq = bpf_op == BPF_JSGT || bpf_op == BPF_JSGE;
  1103. if (t64s >= S16_MIN && t64s <= S16_MAX) {
  1104. emit_instr(ctx, slti, MIPS_R_AT, dst, (int)t64s);
  1105. src = MIPS_R_AT;
  1106. dst = MIPS_R_ZERO;
  1107. goto jeq_common;
  1108. }
  1109. emit_const_to_reg(ctx, MIPS_R_AT, (u64)t64s);
  1110. emit_instr(ctx, slt, MIPS_R_AT, dst, MIPS_R_AT);
  1111. src = MIPS_R_AT;
  1112. dst = MIPS_R_ZERO;
  1113. goto jeq_common;
  1114. case BPF_JMP | BPF_JGT | BPF_K:
  1115. case BPF_JMP | BPF_JGE | BPF_K:
  1116. case BPF_JMP | BPF_JLT | BPF_K:
  1117. case BPF_JMP | BPF_JLE | BPF_K:
  1118. cmp_eq = (bpf_op == BPF_JGE);
  1119. dst = ebpf_to_mips_reg(ctx, insn, dst_reg_fp_ok);
  1120. if (dst < 0)
  1121. return dst;
  1122. /*
  1123. * only "LT" compare available, so we must use imm + 1
  1124. * to generate "GT" and imm -1 to generate LE
  1125. */
  1126. if (bpf_op == BPF_JGT)
  1127. t64s = (u64)(u32)(insn->imm) + 1;
  1128. else if (bpf_op == BPF_JLE)
  1129. t64s = (u64)(u32)(insn->imm) + 1;
  1130. else
  1131. t64s = (u64)(u32)(insn->imm);
  1132. cmp_eq = bpf_op == BPF_JGT || bpf_op == BPF_JGE;
  1133. emit_const_to_reg(ctx, MIPS_R_AT, (u64)t64s);
  1134. emit_instr(ctx, sltu, MIPS_R_AT, dst, MIPS_R_AT);
  1135. src = MIPS_R_AT;
  1136. dst = MIPS_R_ZERO;
  1137. goto jeq_common;
  1138. case BPF_JMP | BPF_JSET | BPF_K: /* JMP_IMM */
  1139. dst = ebpf_to_mips_reg(ctx, insn, dst_reg_fp_ok);
  1140. if (dst < 0)
  1141. return dst;
  1142. if (ctx->use_bbit_insns && hweight32((u32)insn->imm) == 1) {
  1143. if ((insn + 1)->code == (BPF_JMP | BPF_EXIT) && insn->off == 1) {
  1144. b_off = b_imm(exit_idx, ctx);
  1145. if (is_bad_offset(b_off))
  1146. return -E2BIG;
  1147. emit_instr(ctx, bbit0, dst, ffs((u32)insn->imm) - 1, b_off);
  1148. emit_instr(ctx, nop);
  1149. return 2; /* We consumed the exit. */
  1150. }
  1151. b_off = b_imm(this_idx + insn->off + 1, ctx);
  1152. if (is_bad_offset(b_off))
  1153. return -E2BIG;
  1154. emit_instr(ctx, bbit1, dst, ffs((u32)insn->imm) - 1, b_off);
  1155. emit_instr(ctx, nop);
  1156. break;
  1157. }
  1158. t64 = (u32)insn->imm;
  1159. emit_const_to_reg(ctx, MIPS_R_AT, t64);
  1160. emit_instr(ctx, and, MIPS_R_AT, dst, MIPS_R_AT);
  1161. src = MIPS_R_AT;
  1162. dst = MIPS_R_ZERO;
  1163. cmp_eq = false;
  1164. goto jeq_common;
  1165. case BPF_JMP | BPF_JA:
  1166. /*
  1167. * Prefer relative branch for easier debugging, but
  1168. * fall back if needed.
  1169. */
  1170. b_off = b_imm(this_idx + insn->off + 1, ctx);
  1171. if (is_bad_offset(b_off)) {
  1172. target = j_target(ctx, this_idx + insn->off + 1);
  1173. if (target == (unsigned int)-1)
  1174. return -E2BIG;
  1175. emit_instr(ctx, j, target);
  1176. } else {
  1177. emit_instr(ctx, b, b_off);
  1178. }
  1179. emit_instr(ctx, nop);
  1180. break;
  1181. case BPF_LD | BPF_DW | BPF_IMM:
  1182. if (insn->src_reg != 0)
  1183. return -EINVAL;
  1184. dst = ebpf_to_mips_reg(ctx, insn, dst_reg);
  1185. if (dst < 0)
  1186. return dst;
  1187. t64 = ((u64)(u32)insn->imm) | ((u64)(insn + 1)->imm << 32);
  1188. emit_const_to_reg(ctx, dst, t64);
  1189. return 2; /* Double slot insn */
  1190. case BPF_JMP | BPF_CALL:
  1191. ctx->flags |= EBPF_SAVE_RA;
  1192. t64s = (s64)insn->imm + (s64)__bpf_call_base;
  1193. emit_const_to_reg(ctx, MIPS_R_T9, (u64)t64s);
  1194. emit_instr(ctx, jalr, MIPS_R_RA, MIPS_R_T9);
  1195. /* delay slot */
  1196. emit_instr(ctx, nop);
  1197. break;
  1198. case BPF_JMP | BPF_TAIL_CALL:
  1199. if (emit_bpf_tail_call(ctx, this_idx))
  1200. return -EINVAL;
  1201. break;
  1202. case BPF_LD | BPF_B | BPF_ABS:
  1203. case BPF_LD | BPF_H | BPF_ABS:
  1204. case BPF_LD | BPF_W | BPF_ABS:
  1205. case BPF_LD | BPF_DW | BPF_ABS:
  1206. ctx->flags |= EBPF_SAVE_RA;
  1207. gen_imm_to_reg(insn, MIPS_R_A1, ctx);
  1208. emit_instr(ctx, addiu, MIPS_R_A2, MIPS_R_ZERO, size_to_len(insn));
  1209. if (insn->imm < 0) {
  1210. emit_const_to_reg(ctx, MIPS_R_T9, (u64)bpf_internal_load_pointer_neg_helper);
  1211. } else {
  1212. emit_const_to_reg(ctx, MIPS_R_T9, (u64)ool_skb_header_pointer);
  1213. emit_instr(ctx, daddiu, MIPS_R_A3, MIPS_R_SP, ctx->tmp_offset);
  1214. }
  1215. goto ld_skb_common;
  1216. case BPF_LD | BPF_B | BPF_IND:
  1217. case BPF_LD | BPF_H | BPF_IND:
  1218. case BPF_LD | BPF_W | BPF_IND:
  1219. case BPF_LD | BPF_DW | BPF_IND:
  1220. ctx->flags |= EBPF_SAVE_RA;
  1221. src = ebpf_to_mips_reg(ctx, insn, src_reg_no_fp);
  1222. if (src < 0)
  1223. return src;
  1224. ts = get_reg_val_type(ctx, this_idx, insn->src_reg);
  1225. if (ts == REG_32BIT_ZERO_EX) {
  1226. /* sign extend */
  1227. emit_instr(ctx, sll, MIPS_R_A1, src, 0);
  1228. src = MIPS_R_A1;
  1229. }
  1230. if (insn->imm >= S16_MIN && insn->imm <= S16_MAX) {
  1231. emit_instr(ctx, daddiu, MIPS_R_A1, src, insn->imm);
  1232. } else {
  1233. gen_imm_to_reg(insn, MIPS_R_AT, ctx);
  1234. emit_instr(ctx, daddu, MIPS_R_A1, MIPS_R_AT, src);
  1235. }
  1236. /* truncate to 32-bit int */
  1237. emit_instr(ctx, sll, MIPS_R_A1, MIPS_R_A1, 0);
  1238. emit_instr(ctx, daddiu, MIPS_R_A3, MIPS_R_SP, ctx->tmp_offset);
  1239. emit_instr(ctx, slt, MIPS_R_AT, MIPS_R_A1, MIPS_R_ZERO);
  1240. emit_const_to_reg(ctx, MIPS_R_T8, (u64)bpf_internal_load_pointer_neg_helper);
  1241. emit_const_to_reg(ctx, MIPS_R_T9, (u64)ool_skb_header_pointer);
  1242. emit_instr(ctx, addiu, MIPS_R_A2, MIPS_R_ZERO, size_to_len(insn));
  1243. emit_instr(ctx, movn, MIPS_R_T9, MIPS_R_T8, MIPS_R_AT);
  1244. ld_skb_common:
  1245. emit_instr(ctx, jalr, MIPS_R_RA, MIPS_R_T9);
  1246. /* delay slot move */
  1247. emit_instr(ctx, daddu, MIPS_R_A0, MIPS_R_S0, MIPS_R_ZERO);
  1248. /* Check the error value */
  1249. b_off = b_imm(exit_idx, ctx);
  1250. if (is_bad_offset(b_off)) {
  1251. target = j_target(ctx, exit_idx);
  1252. if (target == (unsigned int)-1)
  1253. return -E2BIG;
  1254. if (!(ctx->offsets[this_idx] & OFFSETS_B_CONV)) {
  1255. ctx->offsets[this_idx] |= OFFSETS_B_CONV;
  1256. ctx->long_b_conversion = 1;
  1257. }
  1258. emit_instr(ctx, bne, MIPS_R_V0, MIPS_R_ZERO, 4 * 3);
  1259. emit_instr(ctx, nop);
  1260. emit_instr(ctx, j, target);
  1261. emit_instr(ctx, nop);
  1262. } else {
  1263. emit_instr(ctx, beq, MIPS_R_V0, MIPS_R_ZERO, b_off);
  1264. emit_instr(ctx, nop);
  1265. }
  1266. #ifdef __BIG_ENDIAN
  1267. need_swap = false;
  1268. #else
  1269. need_swap = true;
  1270. #endif
  1271. dst = MIPS_R_V0;
  1272. switch (BPF_SIZE(insn->code)) {
  1273. case BPF_B:
  1274. emit_instr(ctx, lbu, dst, 0, MIPS_R_V0);
  1275. break;
  1276. case BPF_H:
  1277. emit_instr(ctx, lhu, dst, 0, MIPS_R_V0);
  1278. if (need_swap)
  1279. emit_instr(ctx, wsbh, dst, dst);
  1280. break;
  1281. case BPF_W:
  1282. emit_instr(ctx, lw, dst, 0, MIPS_R_V0);
  1283. if (need_swap) {
  1284. emit_instr(ctx, wsbh, dst, dst);
  1285. emit_instr(ctx, rotr, dst, dst, 16);
  1286. }
  1287. break;
  1288. case BPF_DW:
  1289. emit_instr(ctx, ld, dst, 0, MIPS_R_V0);
  1290. if (need_swap) {
  1291. emit_instr(ctx, dsbh, dst, dst);
  1292. emit_instr(ctx, dshd, dst, dst);
  1293. }
  1294. break;
  1295. }
  1296. break;
  1297. case BPF_ALU | BPF_END | BPF_FROM_BE:
  1298. case BPF_ALU | BPF_END | BPF_FROM_LE:
  1299. dst = ebpf_to_mips_reg(ctx, insn, dst_reg);
  1300. if (dst < 0)
  1301. return dst;
  1302. td = get_reg_val_type(ctx, this_idx, insn->dst_reg);
  1303. if (insn->imm == 64 && td == REG_32BIT)
  1304. emit_instr(ctx, dinsu, dst, MIPS_R_ZERO, 32, 32);
  1305. if (insn->imm != 64 &&
  1306. (td == REG_64BIT || td == REG_32BIT_ZERO_EX)) {
  1307. /* sign extend */
  1308. emit_instr(ctx, sll, dst, dst, 0);
  1309. }
  1310. #ifdef __BIG_ENDIAN
  1311. need_swap = (BPF_SRC(insn->code) == BPF_FROM_LE);
  1312. #else
  1313. need_swap = (BPF_SRC(insn->code) == BPF_FROM_BE);
  1314. #endif
  1315. if (insn->imm == 16) {
  1316. if (need_swap)
  1317. emit_instr(ctx, wsbh, dst, dst);
  1318. emit_instr(ctx, andi, dst, dst, 0xffff);
  1319. } else if (insn->imm == 32) {
  1320. if (need_swap) {
  1321. emit_instr(ctx, wsbh, dst, dst);
  1322. emit_instr(ctx, rotr, dst, dst, 16);
  1323. }
  1324. } else { /* 64-bit*/
  1325. if (need_swap) {
  1326. emit_instr(ctx, dsbh, dst, dst);
  1327. emit_instr(ctx, dshd, dst, dst);
  1328. }
  1329. }
  1330. break;
  1331. case BPF_ST | BPF_B | BPF_MEM:
  1332. case BPF_ST | BPF_H | BPF_MEM:
  1333. case BPF_ST | BPF_W | BPF_MEM:
  1334. case BPF_ST | BPF_DW | BPF_MEM:
  1335. if (insn->dst_reg == BPF_REG_10) {
  1336. ctx->flags |= EBPF_SEEN_FP;
  1337. dst = MIPS_R_SP;
  1338. mem_off = insn->off + MAX_BPF_STACK;
  1339. } else {
  1340. dst = ebpf_to_mips_reg(ctx, insn, dst_reg);
  1341. if (dst < 0)
  1342. return dst;
  1343. mem_off = insn->off;
  1344. }
  1345. gen_imm_to_reg(insn, MIPS_R_AT, ctx);
  1346. switch (BPF_SIZE(insn->code)) {
  1347. case BPF_B:
  1348. emit_instr(ctx, sb, MIPS_R_AT, mem_off, dst);
  1349. break;
  1350. case BPF_H:
  1351. emit_instr(ctx, sh, MIPS_R_AT, mem_off, dst);
  1352. break;
  1353. case BPF_W:
  1354. emit_instr(ctx, sw, MIPS_R_AT, mem_off, dst);
  1355. break;
  1356. case BPF_DW:
  1357. emit_instr(ctx, sd, MIPS_R_AT, mem_off, dst);
  1358. break;
  1359. }
  1360. break;
  1361. case BPF_LDX | BPF_B | BPF_MEM:
  1362. case BPF_LDX | BPF_H | BPF_MEM:
  1363. case BPF_LDX | BPF_W | BPF_MEM:
  1364. case BPF_LDX | BPF_DW | BPF_MEM:
  1365. if (insn->src_reg == BPF_REG_10) {
  1366. ctx->flags |= EBPF_SEEN_FP;
  1367. src = MIPS_R_SP;
  1368. mem_off = insn->off + MAX_BPF_STACK;
  1369. } else {
  1370. src = ebpf_to_mips_reg(ctx, insn, src_reg_no_fp);
  1371. if (src < 0)
  1372. return src;
  1373. mem_off = insn->off;
  1374. }
  1375. dst = ebpf_to_mips_reg(ctx, insn, dst_reg);
  1376. if (dst < 0)
  1377. return dst;
  1378. switch (BPF_SIZE(insn->code)) {
  1379. case BPF_B:
  1380. emit_instr(ctx, lbu, dst, mem_off, src);
  1381. break;
  1382. case BPF_H:
  1383. emit_instr(ctx, lhu, dst, mem_off, src);
  1384. break;
  1385. case BPF_W:
  1386. emit_instr(ctx, lw, dst, mem_off, src);
  1387. break;
  1388. case BPF_DW:
  1389. emit_instr(ctx, ld, dst, mem_off, src);
  1390. break;
  1391. }
  1392. break;
  1393. case BPF_STX | BPF_B | BPF_MEM:
  1394. case BPF_STX | BPF_H | BPF_MEM:
  1395. case BPF_STX | BPF_W | BPF_MEM:
  1396. case BPF_STX | BPF_DW | BPF_MEM:
  1397. case BPF_STX | BPF_W | BPF_XADD:
  1398. case BPF_STX | BPF_DW | BPF_XADD:
  1399. if (insn->dst_reg == BPF_REG_10) {
  1400. ctx->flags |= EBPF_SEEN_FP;
  1401. dst = MIPS_R_SP;
  1402. mem_off = insn->off + MAX_BPF_STACK;
  1403. } else {
  1404. dst = ebpf_to_mips_reg(ctx, insn, dst_reg);
  1405. if (dst < 0)
  1406. return dst;
  1407. mem_off = insn->off;
  1408. }
  1409. src = ebpf_to_mips_reg(ctx, insn, src_reg_no_fp);
  1410. if (src < 0)
  1411. return src;
  1412. if (BPF_MODE(insn->code) == BPF_XADD) {
  1413. switch (BPF_SIZE(insn->code)) {
  1414. case BPF_W:
  1415. if (get_reg_val_type(ctx, this_idx, insn->src_reg) == REG_32BIT) {
  1416. emit_instr(ctx, sll, MIPS_R_AT, src, 0);
  1417. src = MIPS_R_AT;
  1418. }
  1419. emit_instr(ctx, ll, MIPS_R_T8, mem_off, dst);
  1420. emit_instr(ctx, addu, MIPS_R_T8, MIPS_R_T8, src);
  1421. emit_instr(ctx, sc, MIPS_R_T8, mem_off, dst);
  1422. /*
  1423. * On failure back up to LL (-4
  1424. * instructions of 4 bytes each
  1425. */
  1426. emit_instr(ctx, beq, MIPS_R_T8, MIPS_R_ZERO, -4 * 4);
  1427. emit_instr(ctx, nop);
  1428. break;
  1429. case BPF_DW:
  1430. if (get_reg_val_type(ctx, this_idx, insn->src_reg) == REG_32BIT) {
  1431. emit_instr(ctx, daddu, MIPS_R_AT, src, MIPS_R_ZERO);
  1432. emit_instr(ctx, dinsu, MIPS_R_AT, MIPS_R_ZERO, 32, 32);
  1433. src = MIPS_R_AT;
  1434. }
  1435. emit_instr(ctx, lld, MIPS_R_T8, mem_off, dst);
  1436. emit_instr(ctx, daddu, MIPS_R_T8, MIPS_R_T8, src);
  1437. emit_instr(ctx, scd, MIPS_R_T8, mem_off, dst);
  1438. emit_instr(ctx, beq, MIPS_R_T8, MIPS_R_ZERO, -4 * 4);
  1439. emit_instr(ctx, nop);
  1440. break;
  1441. }
  1442. } else { /* BPF_MEM */
  1443. switch (BPF_SIZE(insn->code)) {
  1444. case BPF_B:
  1445. emit_instr(ctx, sb, src, mem_off, dst);
  1446. break;
  1447. case BPF_H:
  1448. emit_instr(ctx, sh, src, mem_off, dst);
  1449. break;
  1450. case BPF_W:
  1451. emit_instr(ctx, sw, src, mem_off, dst);
  1452. break;
  1453. case BPF_DW:
  1454. if (get_reg_val_type(ctx, this_idx, insn->src_reg) == REG_32BIT) {
  1455. emit_instr(ctx, daddu, MIPS_R_AT, src, MIPS_R_ZERO);
  1456. emit_instr(ctx, dinsu, MIPS_R_AT, MIPS_R_ZERO, 32, 32);
  1457. src = MIPS_R_AT;
  1458. }
  1459. emit_instr(ctx, sd, src, mem_off, dst);
  1460. break;
  1461. }
  1462. }
  1463. break;
  1464. default:
  1465. pr_err("NOT HANDLED %d - (%02x)\n",
  1466. this_idx, (unsigned int)insn->code);
  1467. return -EINVAL;
  1468. }
  1469. return 1;
  1470. }
  1471. #define RVT_VISITED_MASK 0xc000000000000000ull
  1472. #define RVT_FALL_THROUGH 0x4000000000000000ull
  1473. #define RVT_BRANCH_TAKEN 0x8000000000000000ull
  1474. #define RVT_DONE (RVT_FALL_THROUGH | RVT_BRANCH_TAKEN)
  1475. static int build_int_body(struct jit_ctx *ctx)
  1476. {
  1477. const struct bpf_prog *prog = ctx->skf;
  1478. const struct bpf_insn *insn;
  1479. int i, r;
  1480. for (i = 0; i < prog->len; ) {
  1481. insn = prog->insnsi + i;
  1482. if ((ctx->reg_val_types[i] & RVT_VISITED_MASK) == 0) {
  1483. /* dead instruction, don't emit it. */
  1484. i++;
  1485. continue;
  1486. }
  1487. if (ctx->target == NULL)
  1488. ctx->offsets[i] = (ctx->offsets[i] & OFFSETS_B_CONV) | (ctx->idx * 4);
  1489. r = build_one_insn(insn, ctx, i, prog->len);
  1490. if (r < 0)
  1491. return r;
  1492. i += r;
  1493. }
  1494. /* epilogue offset */
  1495. if (ctx->target == NULL)
  1496. ctx->offsets[i] = ctx->idx * 4;
  1497. /*
  1498. * All exits have an offset of the epilogue, some offsets may
  1499. * not have been set due to banch-around threading, so set
  1500. * them now.
  1501. */
  1502. if (ctx->target == NULL)
  1503. for (i = 0; i < prog->len; i++) {
  1504. insn = prog->insnsi + i;
  1505. if (insn->code == (BPF_JMP | BPF_EXIT))
  1506. ctx->offsets[i] = ctx->idx * 4;
  1507. }
  1508. return 0;
  1509. }
  1510. /* return the last idx processed, or negative for error */
  1511. static int reg_val_propagate_range(struct jit_ctx *ctx, u64 initial_rvt,
  1512. int start_idx, bool follow_taken)
  1513. {
  1514. const struct bpf_prog *prog = ctx->skf;
  1515. const struct bpf_insn *insn;
  1516. u64 exit_rvt = initial_rvt;
  1517. u64 *rvt = ctx->reg_val_types;
  1518. int idx;
  1519. int reg;
  1520. for (idx = start_idx; idx < prog->len; idx++) {
  1521. rvt[idx] = (rvt[idx] & RVT_VISITED_MASK) | exit_rvt;
  1522. insn = prog->insnsi + idx;
  1523. switch (BPF_CLASS(insn->code)) {
  1524. case BPF_ALU:
  1525. switch (BPF_OP(insn->code)) {
  1526. case BPF_ADD:
  1527. case BPF_SUB:
  1528. case BPF_MUL:
  1529. case BPF_DIV:
  1530. case BPF_OR:
  1531. case BPF_AND:
  1532. case BPF_LSH:
  1533. case BPF_RSH:
  1534. case BPF_NEG:
  1535. case BPF_MOD:
  1536. case BPF_XOR:
  1537. set_reg_val_type(&exit_rvt, insn->dst_reg, REG_32BIT);
  1538. break;
  1539. case BPF_MOV:
  1540. if (BPF_SRC(insn->code)) {
  1541. set_reg_val_type(&exit_rvt, insn->dst_reg, REG_32BIT);
  1542. } else {
  1543. /* IMM to REG move*/
  1544. if (insn->imm >= 0)
  1545. set_reg_val_type(&exit_rvt, insn->dst_reg, REG_32BIT_POS);
  1546. else
  1547. set_reg_val_type(&exit_rvt, insn->dst_reg, REG_32BIT);
  1548. }
  1549. break;
  1550. case BPF_END:
  1551. if (insn->imm == 64)
  1552. set_reg_val_type(&exit_rvt, insn->dst_reg, REG_64BIT);
  1553. else if (insn->imm == 32)
  1554. set_reg_val_type(&exit_rvt, insn->dst_reg, REG_32BIT);
  1555. else /* insn->imm == 16 */
  1556. set_reg_val_type(&exit_rvt, insn->dst_reg, REG_32BIT_POS);
  1557. break;
  1558. }
  1559. rvt[idx] |= RVT_DONE;
  1560. break;
  1561. case BPF_ALU64:
  1562. switch (BPF_OP(insn->code)) {
  1563. case BPF_MOV:
  1564. if (BPF_SRC(insn->code)) {
  1565. /* REG to REG move*/
  1566. set_reg_val_type(&exit_rvt, insn->dst_reg, REG_64BIT);
  1567. } else {
  1568. /* IMM to REG move*/
  1569. if (insn->imm >= 0)
  1570. set_reg_val_type(&exit_rvt, insn->dst_reg, REG_32BIT_POS);
  1571. else
  1572. set_reg_val_type(&exit_rvt, insn->dst_reg, REG_64BIT_32BIT);
  1573. }
  1574. break;
  1575. default:
  1576. set_reg_val_type(&exit_rvt, insn->dst_reg, REG_64BIT);
  1577. }
  1578. rvt[idx] |= RVT_DONE;
  1579. break;
  1580. case BPF_LD:
  1581. switch (BPF_SIZE(insn->code)) {
  1582. case BPF_DW:
  1583. if (BPF_MODE(insn->code) == BPF_IMM) {
  1584. s64 val;
  1585. val = (s64)((u32)insn->imm | ((u64)(insn + 1)->imm << 32));
  1586. if (val > 0 && val <= S32_MAX)
  1587. set_reg_val_type(&exit_rvt, insn->dst_reg, REG_32BIT_POS);
  1588. else if (val >= S32_MIN && val <= S32_MAX)
  1589. set_reg_val_type(&exit_rvt, insn->dst_reg, REG_64BIT_32BIT);
  1590. else
  1591. set_reg_val_type(&exit_rvt, insn->dst_reg, REG_64BIT);
  1592. rvt[idx] |= RVT_DONE;
  1593. idx++;
  1594. } else {
  1595. set_reg_val_type(&exit_rvt, insn->dst_reg, REG_64BIT);
  1596. }
  1597. break;
  1598. case BPF_B:
  1599. case BPF_H:
  1600. set_reg_val_type(&exit_rvt, insn->dst_reg, REG_32BIT_POS);
  1601. break;
  1602. case BPF_W:
  1603. if (BPF_MODE(insn->code) == BPF_IMM)
  1604. set_reg_val_type(&exit_rvt, insn->dst_reg,
  1605. insn->imm >= 0 ? REG_32BIT_POS : REG_32BIT);
  1606. else
  1607. set_reg_val_type(&exit_rvt, insn->dst_reg, REG_32BIT);
  1608. break;
  1609. }
  1610. rvt[idx] |= RVT_DONE;
  1611. break;
  1612. case BPF_LDX:
  1613. switch (BPF_SIZE(insn->code)) {
  1614. case BPF_DW:
  1615. set_reg_val_type(&exit_rvt, insn->dst_reg, REG_64BIT);
  1616. break;
  1617. case BPF_B:
  1618. case BPF_H:
  1619. set_reg_val_type(&exit_rvt, insn->dst_reg, REG_32BIT_POS);
  1620. break;
  1621. case BPF_W:
  1622. set_reg_val_type(&exit_rvt, insn->dst_reg, REG_32BIT);
  1623. break;
  1624. }
  1625. rvt[idx] |= RVT_DONE;
  1626. break;
  1627. case BPF_JMP:
  1628. switch (BPF_OP(insn->code)) {
  1629. case BPF_EXIT:
  1630. rvt[idx] = RVT_DONE | exit_rvt;
  1631. rvt[prog->len] = exit_rvt;
  1632. return idx;
  1633. case BPF_JA:
  1634. rvt[idx] |= RVT_DONE;
  1635. idx += insn->off;
  1636. break;
  1637. case BPF_JEQ:
  1638. case BPF_JGT:
  1639. case BPF_JGE:
  1640. case BPF_JLT:
  1641. case BPF_JLE:
  1642. case BPF_JSET:
  1643. case BPF_JNE:
  1644. case BPF_JSGT:
  1645. case BPF_JSGE:
  1646. case BPF_JSLT:
  1647. case BPF_JSLE:
  1648. if (follow_taken) {
  1649. rvt[idx] |= RVT_BRANCH_TAKEN;
  1650. idx += insn->off;
  1651. follow_taken = false;
  1652. } else {
  1653. rvt[idx] |= RVT_FALL_THROUGH;
  1654. }
  1655. break;
  1656. case BPF_CALL:
  1657. set_reg_val_type(&exit_rvt, BPF_REG_0, REG_64BIT);
  1658. /* Upon call return, argument registers are clobbered. */
  1659. for (reg = BPF_REG_0; reg <= BPF_REG_5; reg++)
  1660. set_reg_val_type(&exit_rvt, reg, REG_64BIT);
  1661. rvt[idx] |= RVT_DONE;
  1662. break;
  1663. default:
  1664. WARN(1, "Unhandled BPF_JMP case.\n");
  1665. rvt[idx] |= RVT_DONE;
  1666. break;
  1667. }
  1668. break;
  1669. default:
  1670. rvt[idx] |= RVT_DONE;
  1671. break;
  1672. }
  1673. }
  1674. return idx;
  1675. }
  1676. /*
  1677. * Track the value range (i.e. 32-bit vs. 64-bit) of each register at
  1678. * each eBPF insn. This allows unneeded sign and zero extension
  1679. * operations to be omitted.
  1680. *
  1681. * Doesn't handle yet confluence of control paths with conflicting
  1682. * ranges, but it is good enough for most sane code.
  1683. */
  1684. static int reg_val_propagate(struct jit_ctx *ctx)
  1685. {
  1686. const struct bpf_prog *prog = ctx->skf;
  1687. u64 exit_rvt;
  1688. int reg;
  1689. int i;
  1690. /*
  1691. * 11 registers * 3 bits/reg leaves top bits free for other
  1692. * uses. Bit-62..63 used to see if we have visited an insn.
  1693. */
  1694. exit_rvt = 0;
  1695. /* Upon entry, argument registers are 64-bit. */
  1696. for (reg = BPF_REG_1; reg <= BPF_REG_5; reg++)
  1697. set_reg_val_type(&exit_rvt, reg, REG_64BIT);
  1698. /*
  1699. * First follow all conditional branches on the fall-through
  1700. * edge of control flow..
  1701. */
  1702. reg_val_propagate_range(ctx, exit_rvt, 0, false);
  1703. restart_search:
  1704. /*
  1705. * Then repeatedly find the first conditional branch where
  1706. * both edges of control flow have not been taken, and follow
  1707. * the branch taken edge. We will end up restarting the
  1708. * search once per conditional branch insn.
  1709. */
  1710. for (i = 0; i < prog->len; i++) {
  1711. u64 rvt = ctx->reg_val_types[i];
  1712. if ((rvt & RVT_VISITED_MASK) == RVT_DONE ||
  1713. (rvt & RVT_VISITED_MASK) == 0)
  1714. continue;
  1715. if ((rvt & RVT_VISITED_MASK) == RVT_FALL_THROUGH) {
  1716. reg_val_propagate_range(ctx, rvt & ~RVT_VISITED_MASK, i, true);
  1717. } else { /* RVT_BRANCH_TAKEN */
  1718. WARN(1, "Unexpected RVT_BRANCH_TAKEN case.\n");
  1719. reg_val_propagate_range(ctx, rvt & ~RVT_VISITED_MASK, i, false);
  1720. }
  1721. goto restart_search;
  1722. }
  1723. /*
  1724. * Eventually all conditional branches have been followed on
  1725. * both branches and we are done. Any insn that has not been
  1726. * visited at this point is dead.
  1727. */
  1728. return 0;
  1729. }
  1730. static void jit_fill_hole(void *area, unsigned int size)
  1731. {
  1732. u32 *p;
  1733. /* We are guaranteed to have aligned memory. */
  1734. for (p = area; size >= sizeof(u32); size -= sizeof(u32))
  1735. uasm_i_break(&p, BRK_BUG); /* Increments p */
  1736. }
  1737. struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
  1738. {
  1739. struct bpf_prog *orig_prog = prog;
  1740. bool tmp_blinded = false;
  1741. struct bpf_prog *tmp;
  1742. struct bpf_binary_header *header = NULL;
  1743. struct jit_ctx ctx;
  1744. unsigned int image_size;
  1745. u8 *image_ptr;
  1746. if (!prog->jit_requested || !cpu_has_mips64r2)
  1747. return prog;
  1748. tmp = bpf_jit_blind_constants(prog);
  1749. /* If blinding was requested and we failed during blinding,
  1750. * we must fall back to the interpreter.
  1751. */
  1752. if (IS_ERR(tmp))
  1753. return orig_prog;
  1754. if (tmp != prog) {
  1755. tmp_blinded = true;
  1756. prog = tmp;
  1757. }
  1758. memset(&ctx, 0, sizeof(ctx));
  1759. preempt_disable();
  1760. switch (current_cpu_type()) {
  1761. case CPU_CAVIUM_OCTEON:
  1762. case CPU_CAVIUM_OCTEON_PLUS:
  1763. case CPU_CAVIUM_OCTEON2:
  1764. case CPU_CAVIUM_OCTEON3:
  1765. ctx.use_bbit_insns = 1;
  1766. break;
  1767. default:
  1768. ctx.use_bbit_insns = 0;
  1769. }
  1770. preempt_enable();
  1771. ctx.offsets = kcalloc(prog->len + 1, sizeof(*ctx.offsets), GFP_KERNEL);
  1772. if (ctx.offsets == NULL)
  1773. goto out_err;
  1774. ctx.reg_val_types = kcalloc(prog->len + 1, sizeof(*ctx.reg_val_types), GFP_KERNEL);
  1775. if (ctx.reg_val_types == NULL)
  1776. goto out_err;
  1777. ctx.skf = prog;
  1778. if (reg_val_propagate(&ctx))
  1779. goto out_err;
  1780. /*
  1781. * First pass discovers used resources and instruction offsets
  1782. * assuming short branches are used.
  1783. */
  1784. if (build_int_body(&ctx))
  1785. goto out_err;
  1786. /*
  1787. * If no calls are made (EBPF_SAVE_RA), then tail call count
  1788. * in $v1, else we must save in n$s4.
  1789. */
  1790. if (ctx.flags & EBPF_SEEN_TC) {
  1791. if (ctx.flags & EBPF_SAVE_RA)
  1792. ctx.flags |= EBPF_SAVE_S4;
  1793. else
  1794. ctx.flags |= EBPF_TCC_IN_V1;
  1795. }
  1796. /*
  1797. * Second pass generates offsets, if any branches are out of
  1798. * range a jump-around long sequence is generated, and we have
  1799. * to try again from the beginning to generate the new
  1800. * offsets. This is done until no additional conversions are
  1801. * necessary.
  1802. */
  1803. do {
  1804. ctx.idx = 0;
  1805. ctx.gen_b_offsets = 1;
  1806. ctx.long_b_conversion = 0;
  1807. if (gen_int_prologue(&ctx))
  1808. goto out_err;
  1809. if (build_int_body(&ctx))
  1810. goto out_err;
  1811. if (build_int_epilogue(&ctx, MIPS_R_RA))
  1812. goto out_err;
  1813. } while (ctx.long_b_conversion);
  1814. image_size = 4 * ctx.idx;
  1815. header = bpf_jit_binary_alloc(image_size, &image_ptr,
  1816. sizeof(u32), jit_fill_hole);
  1817. if (header == NULL)
  1818. goto out_err;
  1819. ctx.target = (u32 *)image_ptr;
  1820. /* Third pass generates the code */
  1821. ctx.idx = 0;
  1822. if (gen_int_prologue(&ctx))
  1823. goto out_err;
  1824. if (build_int_body(&ctx))
  1825. goto out_err;
  1826. if (build_int_epilogue(&ctx, MIPS_R_RA))
  1827. goto out_err;
  1828. /* Update the icache */
  1829. flush_icache_range((unsigned long)ctx.target,
  1830. (unsigned long)(ctx.target + ctx.idx * sizeof(u32)));
  1831. if (bpf_jit_enable > 1)
  1832. /* Dump JIT code */
  1833. bpf_jit_dump(prog->len, image_size, 2, ctx.target);
  1834. bpf_jit_binary_lock_ro(header);
  1835. prog->bpf_func = (void *)ctx.target;
  1836. prog->jited = 1;
  1837. prog->jited_len = image_size;
  1838. out_normal:
  1839. if (tmp_blinded)
  1840. bpf_jit_prog_release_other(prog, prog == orig_prog ?
  1841. tmp : orig_prog);
  1842. kfree(ctx.offsets);
  1843. kfree(ctx.reg_val_types);
  1844. return prog;
  1845. out_err:
  1846. prog = orig_prog;
  1847. if (header)
  1848. bpf_jit_binary_free(header);
  1849. goto out_normal;
  1850. }