ebpf_jit.c 53 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950
  1. /*
  2. * Just-In-Time compiler for eBPF filters on MIPS
  3. *
  4. * Copyright (c) 2017 Cavium, Inc.
  5. *
  6. * Based on code from:
  7. *
  8. * Copyright (c) 2014 Imagination Technologies Ltd.
  9. * Author: Markos Chandras <markos.chandras@imgtec.com>
  10. *
  11. * This program is free software; you can redistribute it and/or modify it
  12. * under the terms of the GNU General Public License as published by the
  13. * Free Software Foundation; version 2 of the License.
  14. */
  15. #include <linux/bitops.h>
  16. #include <linux/errno.h>
  17. #include <linux/filter.h>
  18. #include <linux/bpf.h>
  19. #include <linux/slab.h>
  20. #include <asm/bitops.h>
  21. #include <asm/byteorder.h>
  22. #include <asm/cacheflush.h>
  23. #include <asm/cpu-features.h>
  24. #include <asm/uasm.h>
  25. /* Registers used by JIT */
  26. #define MIPS_R_ZERO 0
  27. #define MIPS_R_AT 1
  28. #define MIPS_R_V0 2 /* BPF_R0 */
  29. #define MIPS_R_V1 3
  30. #define MIPS_R_A0 4 /* BPF_R1 */
  31. #define MIPS_R_A1 5 /* BPF_R2 */
  32. #define MIPS_R_A2 6 /* BPF_R3 */
  33. #define MIPS_R_A3 7 /* BPF_R4 */
  34. #define MIPS_R_A4 8 /* BPF_R5 */
  35. #define MIPS_R_T4 12 /* BPF_AX */
  36. #define MIPS_R_T5 13
  37. #define MIPS_R_T6 14
  38. #define MIPS_R_T7 15
  39. #define MIPS_R_S0 16 /* BPF_R6 */
  40. #define MIPS_R_S1 17 /* BPF_R7 */
  41. #define MIPS_R_S2 18 /* BPF_R8 */
  42. #define MIPS_R_S3 19 /* BPF_R9 */
  43. #define MIPS_R_S4 20 /* BPF_TCC */
  44. #define MIPS_R_S5 21
  45. #define MIPS_R_S6 22
  46. #define MIPS_R_S7 23
  47. #define MIPS_R_T8 24
  48. #define MIPS_R_T9 25
  49. #define MIPS_R_SP 29
  50. #define MIPS_R_RA 31
  51. /* eBPF flags */
  52. #define EBPF_SAVE_S0 BIT(0)
  53. #define EBPF_SAVE_S1 BIT(1)
  54. #define EBPF_SAVE_S2 BIT(2)
  55. #define EBPF_SAVE_S3 BIT(3)
  56. #define EBPF_SAVE_S4 BIT(4)
  57. #define EBPF_SAVE_RA BIT(5)
  58. #define EBPF_SEEN_FP BIT(6)
  59. #define EBPF_SEEN_TC BIT(7)
  60. #define EBPF_TCC_IN_V1 BIT(8)
  61. /*
  62. * For the mips64 ISA, we need to track the value range or type for
  63. * each JIT register. The BPF machine requires zero extended 32-bit
  64. * values, but the mips64 ISA requires sign extended 32-bit values.
  65. * At each point in the BPF program we track the state of every
  66. * register so that we can zero extend or sign extend as the BPF
  67. * semantics require.
  68. */
  69. enum reg_val_type {
  70. /* uninitialized */
  71. REG_UNKNOWN,
  72. /* not known to be 32-bit compatible. */
  73. REG_64BIT,
  74. /* 32-bit compatible, no truncation needed for 64-bit ops. */
  75. REG_64BIT_32BIT,
  76. /* 32-bit compatible, need truncation for 64-bit ops. */
  77. REG_32BIT,
  78. /* 32-bit zero extended. */
  79. REG_32BIT_ZERO_EX,
  80. /* 32-bit no sign/zero extension needed. */
  81. REG_32BIT_POS
  82. };
  83. /*
  84. * high bit of offsets indicates if long branch conversion done at
  85. * this insn.
  86. */
  87. #define OFFSETS_B_CONV BIT(31)
  88. /**
  89. * struct jit_ctx - JIT context
  90. * @skf: The sk_filter
  91. * @stack_size: eBPF stack size
  92. * @tmp_offset: eBPF $sp offset to 8-byte temporary memory
  93. * @idx: Instruction index
  94. * @flags: JIT flags
  95. * @offsets: Instruction offsets
  96. * @target: Memory location for the compiled filter
  97. * @reg_val_types Packed enum reg_val_type for each register.
  98. */
  99. struct jit_ctx {
  100. const struct bpf_prog *skf;
  101. int stack_size;
  102. int tmp_offset;
  103. u32 idx;
  104. u32 flags;
  105. u32 *offsets;
  106. u32 *target;
  107. u64 *reg_val_types;
  108. unsigned int long_b_conversion:1;
  109. unsigned int gen_b_offsets:1;
  110. };
  111. static void set_reg_val_type(u64 *rvt, int reg, enum reg_val_type type)
  112. {
  113. *rvt &= ~(7ull << (reg * 3));
  114. *rvt |= ((u64)type << (reg * 3));
  115. }
  116. static enum reg_val_type get_reg_val_type(const struct jit_ctx *ctx,
  117. int index, int reg)
  118. {
  119. return (ctx->reg_val_types[index] >> (reg * 3)) & 7;
  120. }
  121. /* Simply emit the instruction if the JIT memory space has been allocated */
  122. #define emit_instr(ctx, func, ...) \
  123. do { \
  124. if ((ctx)->target != NULL) { \
  125. u32 *p = &(ctx)->target[ctx->idx]; \
  126. uasm_i_##func(&p, ##__VA_ARGS__); \
  127. } \
  128. (ctx)->idx++; \
  129. } while (0)
  130. static unsigned int j_target(struct jit_ctx *ctx, int target_idx)
  131. {
  132. unsigned long target_va, base_va;
  133. unsigned int r;
  134. if (!ctx->target)
  135. return 0;
  136. base_va = (unsigned long)ctx->target;
  137. target_va = base_va + (ctx->offsets[target_idx] & ~OFFSETS_B_CONV);
  138. if ((base_va & ~0x0ffffffful) != (target_va & ~0x0ffffffful))
  139. return (unsigned int)-1;
  140. r = target_va & 0x0ffffffful;
  141. return r;
  142. }
  143. /* Compute the immediate value for PC-relative branches. */
  144. static u32 b_imm(unsigned int tgt, struct jit_ctx *ctx)
  145. {
  146. if (!ctx->gen_b_offsets)
  147. return 0;
  148. /*
  149. * We want a pc-relative branch. tgt is the instruction offset
  150. * we want to jump to.
  151. * Branch on MIPS:
  152. * I: target_offset <- sign_extend(offset)
  153. * I+1: PC += target_offset (delay slot)
  154. *
  155. * ctx->idx currently points to the branch instruction
  156. * but the offset is added to the delay slot so we need
  157. * to subtract 4.
  158. */
  159. return (ctx->offsets[tgt] & ~OFFSETS_B_CONV) -
  160. (ctx->idx * 4) - 4;
  161. }
  162. int bpf_jit_enable __read_mostly;
  163. enum which_ebpf_reg {
  164. src_reg,
  165. src_reg_no_fp,
  166. dst_reg,
  167. dst_reg_fp_ok
  168. };
  169. /*
  170. * For eBPF, the register mapping naturally falls out of the
  171. * requirements of eBPF and the MIPS n64 ABI. We don't maintain a
  172. * separate frame pointer, so BPF_REG_10 relative accesses are
  173. * adjusted to be $sp relative.
  174. */
  175. int ebpf_to_mips_reg(struct jit_ctx *ctx, const struct bpf_insn *insn,
  176. enum which_ebpf_reg w)
  177. {
  178. int ebpf_reg = (w == src_reg || w == src_reg_no_fp) ?
  179. insn->src_reg : insn->dst_reg;
  180. switch (ebpf_reg) {
  181. case BPF_REG_0:
  182. return MIPS_R_V0;
  183. case BPF_REG_1:
  184. return MIPS_R_A0;
  185. case BPF_REG_2:
  186. return MIPS_R_A1;
  187. case BPF_REG_3:
  188. return MIPS_R_A2;
  189. case BPF_REG_4:
  190. return MIPS_R_A3;
  191. case BPF_REG_5:
  192. return MIPS_R_A4;
  193. case BPF_REG_6:
  194. ctx->flags |= EBPF_SAVE_S0;
  195. return MIPS_R_S0;
  196. case BPF_REG_7:
  197. ctx->flags |= EBPF_SAVE_S1;
  198. return MIPS_R_S1;
  199. case BPF_REG_8:
  200. ctx->flags |= EBPF_SAVE_S2;
  201. return MIPS_R_S2;
  202. case BPF_REG_9:
  203. ctx->flags |= EBPF_SAVE_S3;
  204. return MIPS_R_S3;
  205. case BPF_REG_10:
  206. if (w == dst_reg || w == src_reg_no_fp)
  207. goto bad_reg;
  208. ctx->flags |= EBPF_SEEN_FP;
  209. /*
  210. * Needs special handling, return something that
  211. * cannot be clobbered just in case.
  212. */
  213. return MIPS_R_ZERO;
  214. case BPF_REG_AX:
  215. return MIPS_R_T4;
  216. default:
  217. bad_reg:
  218. WARN(1, "Illegal bpf reg: %d\n", ebpf_reg);
  219. return -EINVAL;
  220. }
  221. }
  222. /*
  223. * eBPF stack frame will be something like:
  224. *
  225. * Entry $sp ------> +--------------------------------+
  226. * | $ra (optional) |
  227. * +--------------------------------+
  228. * | $s0 (optional) |
  229. * +--------------------------------+
  230. * | $s1 (optional) |
  231. * +--------------------------------+
  232. * | $s2 (optional) |
  233. * +--------------------------------+
  234. * | $s3 (optional) |
  235. * +--------------------------------+
  236. * | $s4 (optional) |
  237. * +--------------------------------+
  238. * | tmp-storage (if $ra saved) |
  239. * $sp + tmp_offset --> +--------------------------------+ <--BPF_REG_10
  240. * | BPF_REG_10 relative storage |
  241. * | MAX_BPF_STACK (optional) |
  242. * | . |
  243. * | . |
  244. * | . |
  245. * $sp --------> +--------------------------------+
  246. *
  247. * If BPF_REG_10 is never referenced, then the MAX_BPF_STACK sized
  248. * area is not allocated.
  249. */
  250. static int gen_int_prologue(struct jit_ctx *ctx)
  251. {
  252. int stack_adjust = 0;
  253. int store_offset;
  254. int locals_size;
  255. if (ctx->flags & EBPF_SAVE_RA)
  256. /*
  257. * If RA we are doing a function call and may need
  258. * extra 8-byte tmp area.
  259. */
  260. stack_adjust += 16;
  261. if (ctx->flags & EBPF_SAVE_S0)
  262. stack_adjust += 8;
  263. if (ctx->flags & EBPF_SAVE_S1)
  264. stack_adjust += 8;
  265. if (ctx->flags & EBPF_SAVE_S2)
  266. stack_adjust += 8;
  267. if (ctx->flags & EBPF_SAVE_S3)
  268. stack_adjust += 8;
  269. if (ctx->flags & EBPF_SAVE_S4)
  270. stack_adjust += 8;
  271. BUILD_BUG_ON(MAX_BPF_STACK & 7);
  272. locals_size = (ctx->flags & EBPF_SEEN_FP) ? MAX_BPF_STACK : 0;
  273. stack_adjust += locals_size;
  274. ctx->tmp_offset = locals_size;
  275. ctx->stack_size = stack_adjust;
  276. /*
  277. * First instruction initializes the tail call count (TCC).
  278. * On tail call we skip this instruction, and the TCC is
  279. * passed in $v1 from the caller.
  280. */
  281. emit_instr(ctx, daddiu, MIPS_R_V1, MIPS_R_ZERO, MAX_TAIL_CALL_CNT);
  282. if (stack_adjust)
  283. emit_instr(ctx, daddiu, MIPS_R_SP, MIPS_R_SP, -stack_adjust);
  284. else
  285. return 0;
  286. store_offset = stack_adjust - 8;
  287. if (ctx->flags & EBPF_SAVE_RA) {
  288. emit_instr(ctx, sd, MIPS_R_RA, store_offset, MIPS_R_SP);
  289. store_offset -= 8;
  290. }
  291. if (ctx->flags & EBPF_SAVE_S0) {
  292. emit_instr(ctx, sd, MIPS_R_S0, store_offset, MIPS_R_SP);
  293. store_offset -= 8;
  294. }
  295. if (ctx->flags & EBPF_SAVE_S1) {
  296. emit_instr(ctx, sd, MIPS_R_S1, store_offset, MIPS_R_SP);
  297. store_offset -= 8;
  298. }
  299. if (ctx->flags & EBPF_SAVE_S2) {
  300. emit_instr(ctx, sd, MIPS_R_S2, store_offset, MIPS_R_SP);
  301. store_offset -= 8;
  302. }
  303. if (ctx->flags & EBPF_SAVE_S3) {
  304. emit_instr(ctx, sd, MIPS_R_S3, store_offset, MIPS_R_SP);
  305. store_offset -= 8;
  306. }
  307. if (ctx->flags & EBPF_SAVE_S4) {
  308. emit_instr(ctx, sd, MIPS_R_S4, store_offset, MIPS_R_SP);
  309. store_offset -= 8;
  310. }
  311. if ((ctx->flags & EBPF_SEEN_TC) && !(ctx->flags & EBPF_TCC_IN_V1))
  312. emit_instr(ctx, daddu, MIPS_R_S4, MIPS_R_V1, MIPS_R_ZERO);
  313. return 0;
  314. }
  315. static int build_int_epilogue(struct jit_ctx *ctx, int dest_reg)
  316. {
  317. const struct bpf_prog *prog = ctx->skf;
  318. int stack_adjust = ctx->stack_size;
  319. int store_offset = stack_adjust - 8;
  320. int r0 = MIPS_R_V0;
  321. if (dest_reg == MIPS_R_RA &&
  322. get_reg_val_type(ctx, prog->len, BPF_REG_0) == REG_32BIT_ZERO_EX)
  323. /* Don't let zero extended value escape. */
  324. emit_instr(ctx, sll, r0, r0, 0);
  325. if (ctx->flags & EBPF_SAVE_RA) {
  326. emit_instr(ctx, ld, MIPS_R_RA, store_offset, MIPS_R_SP);
  327. store_offset -= 8;
  328. }
  329. if (ctx->flags & EBPF_SAVE_S0) {
  330. emit_instr(ctx, ld, MIPS_R_S0, store_offset, MIPS_R_SP);
  331. store_offset -= 8;
  332. }
  333. if (ctx->flags & EBPF_SAVE_S1) {
  334. emit_instr(ctx, ld, MIPS_R_S1, store_offset, MIPS_R_SP);
  335. store_offset -= 8;
  336. }
  337. if (ctx->flags & EBPF_SAVE_S2) {
  338. emit_instr(ctx, ld, MIPS_R_S2, store_offset, MIPS_R_SP);
  339. store_offset -= 8;
  340. }
  341. if (ctx->flags & EBPF_SAVE_S3) {
  342. emit_instr(ctx, ld, MIPS_R_S3, store_offset, MIPS_R_SP);
  343. store_offset -= 8;
  344. }
  345. if (ctx->flags & EBPF_SAVE_S4) {
  346. emit_instr(ctx, ld, MIPS_R_S4, store_offset, MIPS_R_SP);
  347. store_offset -= 8;
  348. }
  349. emit_instr(ctx, jr, dest_reg);
  350. if (stack_adjust)
  351. emit_instr(ctx, daddiu, MIPS_R_SP, MIPS_R_SP, stack_adjust);
  352. else
  353. emit_instr(ctx, nop);
  354. return 0;
  355. }
  356. static void gen_imm_to_reg(const struct bpf_insn *insn, int reg,
  357. struct jit_ctx *ctx)
  358. {
  359. if (insn->imm >= S16_MIN && insn->imm <= S16_MAX) {
  360. emit_instr(ctx, addiu, reg, MIPS_R_ZERO, insn->imm);
  361. } else {
  362. int lower = (s16)(insn->imm & 0xffff);
  363. int upper = insn->imm - lower;
  364. emit_instr(ctx, lui, reg, upper >> 16);
  365. emit_instr(ctx, addiu, reg, reg, lower);
  366. }
  367. }
  368. static int gen_imm_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
  369. int idx)
  370. {
  371. int upper_bound, lower_bound;
  372. int dst = ebpf_to_mips_reg(ctx, insn, dst_reg);
  373. if (dst < 0)
  374. return dst;
  375. switch (BPF_OP(insn->code)) {
  376. case BPF_MOV:
  377. case BPF_ADD:
  378. upper_bound = S16_MAX;
  379. lower_bound = S16_MIN;
  380. break;
  381. case BPF_SUB:
  382. upper_bound = -(int)S16_MIN;
  383. lower_bound = -(int)S16_MAX;
  384. break;
  385. case BPF_AND:
  386. case BPF_OR:
  387. case BPF_XOR:
  388. upper_bound = 0xffff;
  389. lower_bound = 0;
  390. break;
  391. case BPF_RSH:
  392. case BPF_LSH:
  393. case BPF_ARSH:
  394. /* Shift amounts are truncated, no need for bounds */
  395. upper_bound = S32_MAX;
  396. lower_bound = S32_MIN;
  397. break;
  398. default:
  399. return -EINVAL;
  400. }
  401. /*
  402. * Immediate move clobbers the register, so no sign/zero
  403. * extension needed.
  404. */
  405. if (BPF_CLASS(insn->code) == BPF_ALU64 &&
  406. BPF_OP(insn->code) != BPF_MOV &&
  407. get_reg_val_type(ctx, idx, insn->dst_reg) == REG_32BIT)
  408. emit_instr(ctx, dinsu, dst, MIPS_R_ZERO, 32, 32);
  409. /* BPF_ALU | BPF_LSH doesn't need separate sign extension */
  410. if (BPF_CLASS(insn->code) == BPF_ALU &&
  411. BPF_OP(insn->code) != BPF_LSH &&
  412. BPF_OP(insn->code) != BPF_MOV &&
  413. get_reg_val_type(ctx, idx, insn->dst_reg) != REG_32BIT)
  414. emit_instr(ctx, sll, dst, dst, 0);
  415. if (insn->imm >= lower_bound && insn->imm <= upper_bound) {
  416. /* single insn immediate case */
  417. switch (BPF_OP(insn->code) | BPF_CLASS(insn->code)) {
  418. case BPF_ALU64 | BPF_MOV:
  419. emit_instr(ctx, daddiu, dst, MIPS_R_ZERO, insn->imm);
  420. break;
  421. case BPF_ALU64 | BPF_AND:
  422. case BPF_ALU | BPF_AND:
  423. emit_instr(ctx, andi, dst, dst, insn->imm);
  424. break;
  425. case BPF_ALU64 | BPF_OR:
  426. case BPF_ALU | BPF_OR:
  427. emit_instr(ctx, ori, dst, dst, insn->imm);
  428. break;
  429. case BPF_ALU64 | BPF_XOR:
  430. case BPF_ALU | BPF_XOR:
  431. emit_instr(ctx, xori, dst, dst, insn->imm);
  432. break;
  433. case BPF_ALU64 | BPF_ADD:
  434. emit_instr(ctx, daddiu, dst, dst, insn->imm);
  435. break;
  436. case BPF_ALU64 | BPF_SUB:
  437. emit_instr(ctx, daddiu, dst, dst, -insn->imm);
  438. break;
  439. case BPF_ALU64 | BPF_RSH:
  440. emit_instr(ctx, dsrl_safe, dst, dst, insn->imm & 0x3f);
  441. break;
  442. case BPF_ALU | BPF_RSH:
  443. emit_instr(ctx, srl, dst, dst, insn->imm & 0x1f);
  444. break;
  445. case BPF_ALU64 | BPF_LSH:
  446. emit_instr(ctx, dsll_safe, dst, dst, insn->imm & 0x3f);
  447. break;
  448. case BPF_ALU | BPF_LSH:
  449. emit_instr(ctx, sll, dst, dst, insn->imm & 0x1f);
  450. break;
  451. case BPF_ALU64 | BPF_ARSH:
  452. emit_instr(ctx, dsra_safe, dst, dst, insn->imm & 0x3f);
  453. break;
  454. case BPF_ALU | BPF_ARSH:
  455. emit_instr(ctx, sra, dst, dst, insn->imm & 0x1f);
  456. break;
  457. case BPF_ALU | BPF_MOV:
  458. emit_instr(ctx, addiu, dst, MIPS_R_ZERO, insn->imm);
  459. break;
  460. case BPF_ALU | BPF_ADD:
  461. emit_instr(ctx, addiu, dst, dst, insn->imm);
  462. break;
  463. case BPF_ALU | BPF_SUB:
  464. emit_instr(ctx, addiu, dst, dst, -insn->imm);
  465. break;
  466. default:
  467. return -EINVAL;
  468. }
  469. } else {
  470. /* multi insn immediate case */
  471. if (BPF_OP(insn->code) == BPF_MOV) {
  472. gen_imm_to_reg(insn, dst, ctx);
  473. } else {
  474. gen_imm_to_reg(insn, MIPS_R_AT, ctx);
  475. switch (BPF_OP(insn->code) | BPF_CLASS(insn->code)) {
  476. case BPF_ALU64 | BPF_AND:
  477. case BPF_ALU | BPF_AND:
  478. emit_instr(ctx, and, dst, dst, MIPS_R_AT);
  479. break;
  480. case BPF_ALU64 | BPF_OR:
  481. case BPF_ALU | BPF_OR:
  482. emit_instr(ctx, or, dst, dst, MIPS_R_AT);
  483. break;
  484. case BPF_ALU64 | BPF_XOR:
  485. case BPF_ALU | BPF_XOR:
  486. emit_instr(ctx, xor, dst, dst, MIPS_R_AT);
  487. break;
  488. case BPF_ALU64 | BPF_ADD:
  489. emit_instr(ctx, daddu, dst, dst, MIPS_R_AT);
  490. break;
  491. case BPF_ALU64 | BPF_SUB:
  492. emit_instr(ctx, dsubu, dst, dst, MIPS_R_AT);
  493. break;
  494. case BPF_ALU | BPF_ADD:
  495. emit_instr(ctx, addu, dst, dst, MIPS_R_AT);
  496. break;
  497. case BPF_ALU | BPF_SUB:
  498. emit_instr(ctx, subu, dst, dst, MIPS_R_AT);
  499. break;
  500. default:
  501. return -EINVAL;
  502. }
  503. }
  504. }
  505. return 0;
  506. }
  507. static void * __must_check
  508. ool_skb_header_pointer(const struct sk_buff *skb, int offset,
  509. int len, void *buffer)
  510. {
  511. return skb_header_pointer(skb, offset, len, buffer);
  512. }
  513. static int size_to_len(const struct bpf_insn *insn)
  514. {
  515. switch (BPF_SIZE(insn->code)) {
  516. case BPF_B:
  517. return 1;
  518. case BPF_H:
  519. return 2;
  520. case BPF_W:
  521. return 4;
  522. case BPF_DW:
  523. return 8;
  524. }
  525. return 0;
  526. }
  527. static void emit_const_to_reg(struct jit_ctx *ctx, int dst, u64 value)
  528. {
  529. if (value >= 0xffffffffffff8000ull || value < 0x8000ull) {
  530. emit_instr(ctx, daddiu, dst, MIPS_R_ZERO, (int)value);
  531. } else if (value >= 0xffffffff80000000ull ||
  532. (value < 0x80000000 && value > 0xffff)) {
  533. emit_instr(ctx, lui, dst, (s32)(s16)(value >> 16));
  534. emit_instr(ctx, ori, dst, dst, (unsigned int)(value & 0xffff));
  535. } else {
  536. int i;
  537. bool seen_part = false;
  538. int needed_shift = 0;
  539. for (i = 0; i < 4; i++) {
  540. u64 part = (value >> (16 * (3 - i))) & 0xffff;
  541. if (seen_part && needed_shift > 0 && (part || i == 3)) {
  542. emit_instr(ctx, dsll_safe, dst, dst, needed_shift);
  543. needed_shift = 0;
  544. }
  545. if (part) {
  546. if (i == 0 || (!seen_part && i < 3 && part < 0x8000)) {
  547. emit_instr(ctx, lui, dst, (s32)(s16)part);
  548. needed_shift = -16;
  549. } else {
  550. emit_instr(ctx, ori, dst,
  551. seen_part ? dst : MIPS_R_ZERO,
  552. (unsigned int)part);
  553. }
  554. seen_part = true;
  555. }
  556. if (seen_part)
  557. needed_shift += 16;
  558. }
  559. }
  560. }
  561. static int emit_bpf_tail_call(struct jit_ctx *ctx, int this_idx)
  562. {
  563. int off, b_off;
  564. ctx->flags |= EBPF_SEEN_TC;
  565. /*
  566. * if (index >= array->map.max_entries)
  567. * goto out;
  568. */
  569. off = offsetof(struct bpf_array, map.max_entries);
  570. emit_instr(ctx, lwu, MIPS_R_T5, off, MIPS_R_A1);
  571. emit_instr(ctx, sltu, MIPS_R_AT, MIPS_R_T5, MIPS_R_A2);
  572. b_off = b_imm(this_idx + 1, ctx);
  573. emit_instr(ctx, bne, MIPS_R_AT, MIPS_R_ZERO, b_off);
  574. /*
  575. * if (--TCC < 0)
  576. * goto out;
  577. */
  578. /* Delay slot */
  579. emit_instr(ctx, daddiu, MIPS_R_T5,
  580. (ctx->flags & EBPF_TCC_IN_V1) ? MIPS_R_V1 : MIPS_R_S4, -1);
  581. b_off = b_imm(this_idx + 1, ctx);
  582. emit_instr(ctx, bltz, MIPS_R_T5, b_off);
  583. /*
  584. * prog = array->ptrs[index];
  585. * if (prog == NULL)
  586. * goto out;
  587. */
  588. /* Delay slot */
  589. emit_instr(ctx, dsll, MIPS_R_T8, MIPS_R_A2, 3);
  590. emit_instr(ctx, daddu, MIPS_R_T8, MIPS_R_T8, MIPS_R_A1);
  591. off = offsetof(struct bpf_array, ptrs);
  592. emit_instr(ctx, ld, MIPS_R_AT, off, MIPS_R_T8);
  593. b_off = b_imm(this_idx + 1, ctx);
  594. emit_instr(ctx, beq, MIPS_R_AT, MIPS_R_ZERO, b_off);
  595. /* Delay slot */
  596. emit_instr(ctx, nop);
  597. /* goto *(prog->bpf_func + 4); */
  598. off = offsetof(struct bpf_prog, bpf_func);
  599. emit_instr(ctx, ld, MIPS_R_T9, off, MIPS_R_AT);
  600. /* All systems are go... propagate TCC */
  601. emit_instr(ctx, daddu, MIPS_R_V1, MIPS_R_T5, MIPS_R_ZERO);
  602. /* Skip first instruction (TCC initialization) */
  603. emit_instr(ctx, daddiu, MIPS_R_T9, MIPS_R_T9, 4);
  604. return build_int_epilogue(ctx, MIPS_R_T9);
  605. }
  606. static bool use_bbit_insns(void)
  607. {
  608. switch (current_cpu_type()) {
  609. case CPU_CAVIUM_OCTEON:
  610. case CPU_CAVIUM_OCTEON_PLUS:
  611. case CPU_CAVIUM_OCTEON2:
  612. case CPU_CAVIUM_OCTEON3:
  613. return true;
  614. default:
  615. return false;
  616. }
  617. }
  618. static bool is_bad_offset(int b_off)
  619. {
  620. return b_off > 0x1ffff || b_off < -0x20000;
  621. }
  622. /* Returns the number of insn slots consumed. */
  623. static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
  624. int this_idx, int exit_idx)
  625. {
  626. int src, dst, r, td, ts, mem_off, b_off;
  627. bool need_swap, did_move, cmp_eq;
  628. unsigned int target;
  629. u64 t64;
  630. s64 t64s;
  631. switch (insn->code) {
  632. case BPF_ALU64 | BPF_ADD | BPF_K: /* ALU64_IMM */
  633. case BPF_ALU64 | BPF_SUB | BPF_K: /* ALU64_IMM */
  634. case BPF_ALU64 | BPF_OR | BPF_K: /* ALU64_IMM */
  635. case BPF_ALU64 | BPF_AND | BPF_K: /* ALU64_IMM */
  636. case BPF_ALU64 | BPF_LSH | BPF_K: /* ALU64_IMM */
  637. case BPF_ALU64 | BPF_RSH | BPF_K: /* ALU64_IMM */
  638. case BPF_ALU64 | BPF_XOR | BPF_K: /* ALU64_IMM */
  639. case BPF_ALU64 | BPF_ARSH | BPF_K: /* ALU64_IMM */
  640. case BPF_ALU64 | BPF_MOV | BPF_K: /* ALU64_IMM */
  641. case BPF_ALU | BPF_MOV | BPF_K: /* ALU32_IMM */
  642. case BPF_ALU | BPF_ADD | BPF_K: /* ALU32_IMM */
  643. case BPF_ALU | BPF_SUB | BPF_K: /* ALU32_IMM */
  644. case BPF_ALU | BPF_OR | BPF_K: /* ALU64_IMM */
  645. case BPF_ALU | BPF_AND | BPF_K: /* ALU64_IMM */
  646. case BPF_ALU | BPF_LSH | BPF_K: /* ALU64_IMM */
  647. case BPF_ALU | BPF_RSH | BPF_K: /* ALU64_IMM */
  648. case BPF_ALU | BPF_XOR | BPF_K: /* ALU64_IMM */
  649. case BPF_ALU | BPF_ARSH | BPF_K: /* ALU64_IMM */
  650. r = gen_imm_insn(insn, ctx, this_idx);
  651. if (r < 0)
  652. return r;
  653. break;
  654. case BPF_ALU64 | BPF_MUL | BPF_K: /* ALU64_IMM */
  655. dst = ebpf_to_mips_reg(ctx, insn, dst_reg);
  656. if (dst < 0)
  657. return dst;
  658. if (get_reg_val_type(ctx, this_idx, insn->dst_reg) == REG_32BIT)
  659. emit_instr(ctx, dinsu, dst, MIPS_R_ZERO, 32, 32);
  660. if (insn->imm == 1) /* Mult by 1 is a nop */
  661. break;
  662. gen_imm_to_reg(insn, MIPS_R_AT, ctx);
  663. emit_instr(ctx, dmultu, MIPS_R_AT, dst);
  664. emit_instr(ctx, mflo, dst);
  665. break;
  666. case BPF_ALU64 | BPF_NEG | BPF_K: /* ALU64_IMM */
  667. dst = ebpf_to_mips_reg(ctx, insn, dst_reg);
  668. if (dst < 0)
  669. return dst;
  670. if (get_reg_val_type(ctx, this_idx, insn->dst_reg) == REG_32BIT)
  671. emit_instr(ctx, dinsu, dst, MIPS_R_ZERO, 32, 32);
  672. emit_instr(ctx, dsubu, dst, MIPS_R_ZERO, dst);
  673. break;
  674. case BPF_ALU | BPF_MUL | BPF_K: /* ALU_IMM */
  675. dst = ebpf_to_mips_reg(ctx, insn, dst_reg);
  676. if (dst < 0)
  677. return dst;
  678. td = get_reg_val_type(ctx, this_idx, insn->dst_reg);
  679. if (td == REG_64BIT || td == REG_32BIT_ZERO_EX) {
  680. /* sign extend */
  681. emit_instr(ctx, sll, dst, dst, 0);
  682. }
  683. if (insn->imm == 1) /* Mult by 1 is a nop */
  684. break;
  685. gen_imm_to_reg(insn, MIPS_R_AT, ctx);
  686. emit_instr(ctx, multu, dst, MIPS_R_AT);
  687. emit_instr(ctx, mflo, dst);
  688. break;
  689. case BPF_ALU | BPF_NEG | BPF_K: /* ALU_IMM */
  690. dst = ebpf_to_mips_reg(ctx, insn, dst_reg);
  691. if (dst < 0)
  692. return dst;
  693. td = get_reg_val_type(ctx, this_idx, insn->dst_reg);
  694. if (td == REG_64BIT || td == REG_32BIT_ZERO_EX) {
  695. /* sign extend */
  696. emit_instr(ctx, sll, dst, dst, 0);
  697. }
  698. emit_instr(ctx, subu, dst, MIPS_R_ZERO, dst);
  699. break;
  700. case BPF_ALU | BPF_DIV | BPF_K: /* ALU_IMM */
  701. case BPF_ALU | BPF_MOD | BPF_K: /* ALU_IMM */
  702. dst = ebpf_to_mips_reg(ctx, insn, dst_reg);
  703. if (dst < 0)
  704. return dst;
  705. if (insn->imm == 0) { /* Div by zero */
  706. b_off = b_imm(exit_idx, ctx);
  707. if (is_bad_offset(b_off))
  708. return -E2BIG;
  709. emit_instr(ctx, beq, MIPS_R_ZERO, MIPS_R_ZERO, b_off);
  710. emit_instr(ctx, addu, MIPS_R_V0, MIPS_R_ZERO, MIPS_R_ZERO);
  711. }
  712. td = get_reg_val_type(ctx, this_idx, insn->dst_reg);
  713. if (td == REG_64BIT || td == REG_32BIT_ZERO_EX)
  714. /* sign extend */
  715. emit_instr(ctx, sll, dst, dst, 0);
  716. if (insn->imm == 1) {
  717. /* div by 1 is a nop, mod by 1 is zero */
  718. if (BPF_OP(insn->code) == BPF_MOD)
  719. emit_instr(ctx, addu, dst, MIPS_R_ZERO, MIPS_R_ZERO);
  720. break;
  721. }
  722. gen_imm_to_reg(insn, MIPS_R_AT, ctx);
  723. emit_instr(ctx, divu, dst, MIPS_R_AT);
  724. if (BPF_OP(insn->code) == BPF_DIV)
  725. emit_instr(ctx, mflo, dst);
  726. else
  727. emit_instr(ctx, mfhi, dst);
  728. break;
  729. case BPF_ALU64 | BPF_DIV | BPF_K: /* ALU_IMM */
  730. case BPF_ALU64 | BPF_MOD | BPF_K: /* ALU_IMM */
  731. dst = ebpf_to_mips_reg(ctx, insn, dst_reg);
  732. if (dst < 0)
  733. return dst;
  734. if (insn->imm == 0) { /* Div by zero */
  735. b_off = b_imm(exit_idx, ctx);
  736. if (is_bad_offset(b_off))
  737. return -E2BIG;
  738. emit_instr(ctx, beq, MIPS_R_ZERO, MIPS_R_ZERO, b_off);
  739. emit_instr(ctx, addu, MIPS_R_V0, MIPS_R_ZERO, MIPS_R_ZERO);
  740. }
  741. if (get_reg_val_type(ctx, this_idx, insn->dst_reg) == REG_32BIT)
  742. emit_instr(ctx, dinsu, dst, MIPS_R_ZERO, 32, 32);
  743. if (insn->imm == 1) {
  744. /* div by 1 is a nop, mod by 1 is zero */
  745. if (BPF_OP(insn->code) == BPF_MOD)
  746. emit_instr(ctx, addu, dst, MIPS_R_ZERO, MIPS_R_ZERO);
  747. break;
  748. }
  749. gen_imm_to_reg(insn, MIPS_R_AT, ctx);
  750. emit_instr(ctx, ddivu, dst, MIPS_R_AT);
  751. if (BPF_OP(insn->code) == BPF_DIV)
  752. emit_instr(ctx, mflo, dst);
  753. else
  754. emit_instr(ctx, mfhi, dst);
  755. break;
  756. case BPF_ALU64 | BPF_MOV | BPF_X: /* ALU64_REG */
  757. case BPF_ALU64 | BPF_ADD | BPF_X: /* ALU64_REG */
  758. case BPF_ALU64 | BPF_SUB | BPF_X: /* ALU64_REG */
  759. case BPF_ALU64 | BPF_XOR | BPF_X: /* ALU64_REG */
  760. case BPF_ALU64 | BPF_OR | BPF_X: /* ALU64_REG */
  761. case BPF_ALU64 | BPF_AND | BPF_X: /* ALU64_REG */
  762. case BPF_ALU64 | BPF_MUL | BPF_X: /* ALU64_REG */
  763. case BPF_ALU64 | BPF_DIV | BPF_X: /* ALU64_REG */
  764. case BPF_ALU64 | BPF_MOD | BPF_X: /* ALU64_REG */
  765. case BPF_ALU64 | BPF_LSH | BPF_X: /* ALU64_REG */
  766. case BPF_ALU64 | BPF_RSH | BPF_X: /* ALU64_REG */
  767. case BPF_ALU64 | BPF_ARSH | BPF_X: /* ALU64_REG */
  768. src = ebpf_to_mips_reg(ctx, insn, src_reg);
  769. dst = ebpf_to_mips_reg(ctx, insn, dst_reg);
  770. if (src < 0 || dst < 0)
  771. return -EINVAL;
  772. if (get_reg_val_type(ctx, this_idx, insn->dst_reg) == REG_32BIT)
  773. emit_instr(ctx, dinsu, dst, MIPS_R_ZERO, 32, 32);
  774. did_move = false;
  775. if (insn->src_reg == BPF_REG_10) {
  776. if (BPF_OP(insn->code) == BPF_MOV) {
  777. emit_instr(ctx, daddiu, dst, MIPS_R_SP, MAX_BPF_STACK);
  778. did_move = true;
  779. } else {
  780. emit_instr(ctx, daddiu, MIPS_R_AT, MIPS_R_SP, MAX_BPF_STACK);
  781. src = MIPS_R_AT;
  782. }
  783. } else if (get_reg_val_type(ctx, this_idx, insn->src_reg) == REG_32BIT) {
  784. int tmp_reg = MIPS_R_AT;
  785. if (BPF_OP(insn->code) == BPF_MOV) {
  786. tmp_reg = dst;
  787. did_move = true;
  788. }
  789. emit_instr(ctx, daddu, tmp_reg, src, MIPS_R_ZERO);
  790. emit_instr(ctx, dinsu, tmp_reg, MIPS_R_ZERO, 32, 32);
  791. src = MIPS_R_AT;
  792. }
  793. switch (BPF_OP(insn->code)) {
  794. case BPF_MOV:
  795. if (!did_move)
  796. emit_instr(ctx, daddu, dst, src, MIPS_R_ZERO);
  797. break;
  798. case BPF_ADD:
  799. emit_instr(ctx, daddu, dst, dst, src);
  800. break;
  801. case BPF_SUB:
  802. emit_instr(ctx, dsubu, dst, dst, src);
  803. break;
  804. case BPF_XOR:
  805. emit_instr(ctx, xor, dst, dst, src);
  806. break;
  807. case BPF_OR:
  808. emit_instr(ctx, or, dst, dst, src);
  809. break;
  810. case BPF_AND:
  811. emit_instr(ctx, and, dst, dst, src);
  812. break;
  813. case BPF_MUL:
  814. emit_instr(ctx, dmultu, dst, src);
  815. emit_instr(ctx, mflo, dst);
  816. break;
  817. case BPF_DIV:
  818. case BPF_MOD:
  819. b_off = b_imm(exit_idx, ctx);
  820. if (is_bad_offset(b_off))
  821. return -E2BIG;
  822. emit_instr(ctx, beq, src, MIPS_R_ZERO, b_off);
  823. emit_instr(ctx, movz, MIPS_R_V0, MIPS_R_ZERO, src);
  824. emit_instr(ctx, ddivu, dst, src);
  825. if (BPF_OP(insn->code) == BPF_DIV)
  826. emit_instr(ctx, mflo, dst);
  827. else
  828. emit_instr(ctx, mfhi, dst);
  829. break;
  830. case BPF_LSH:
  831. emit_instr(ctx, dsllv, dst, dst, src);
  832. break;
  833. case BPF_RSH:
  834. emit_instr(ctx, dsrlv, dst, dst, src);
  835. break;
  836. case BPF_ARSH:
  837. emit_instr(ctx, dsrav, dst, dst, src);
  838. break;
  839. default:
  840. pr_err("ALU64_REG NOT HANDLED\n");
  841. return -EINVAL;
  842. }
  843. break;
  844. case BPF_ALU | BPF_MOV | BPF_X: /* ALU_REG */
  845. case BPF_ALU | BPF_ADD | BPF_X: /* ALU_REG */
  846. case BPF_ALU | BPF_SUB | BPF_X: /* ALU_REG */
  847. case BPF_ALU | BPF_XOR | BPF_X: /* ALU_REG */
  848. case BPF_ALU | BPF_OR | BPF_X: /* ALU_REG */
  849. case BPF_ALU | BPF_AND | BPF_X: /* ALU_REG */
  850. case BPF_ALU | BPF_MUL | BPF_X: /* ALU_REG */
  851. case BPF_ALU | BPF_DIV | BPF_X: /* ALU_REG */
  852. case BPF_ALU | BPF_MOD | BPF_X: /* ALU_REG */
  853. case BPF_ALU | BPF_LSH | BPF_X: /* ALU_REG */
  854. case BPF_ALU | BPF_RSH | BPF_X: /* ALU_REG */
  855. src = ebpf_to_mips_reg(ctx, insn, src_reg_no_fp);
  856. dst = ebpf_to_mips_reg(ctx, insn, dst_reg);
  857. if (src < 0 || dst < 0)
  858. return -EINVAL;
  859. td = get_reg_val_type(ctx, this_idx, insn->dst_reg);
  860. if (td == REG_64BIT || td == REG_32BIT_ZERO_EX) {
  861. /* sign extend */
  862. emit_instr(ctx, sll, dst, dst, 0);
  863. }
  864. did_move = false;
  865. ts = get_reg_val_type(ctx, this_idx, insn->src_reg);
  866. if (ts == REG_64BIT || ts == REG_32BIT_ZERO_EX) {
  867. int tmp_reg = MIPS_R_AT;
  868. if (BPF_OP(insn->code) == BPF_MOV) {
  869. tmp_reg = dst;
  870. did_move = true;
  871. }
  872. /* sign extend */
  873. emit_instr(ctx, sll, tmp_reg, src, 0);
  874. src = MIPS_R_AT;
  875. }
  876. switch (BPF_OP(insn->code)) {
  877. case BPF_MOV:
  878. if (!did_move)
  879. emit_instr(ctx, addu, dst, src, MIPS_R_ZERO);
  880. break;
  881. case BPF_ADD:
  882. emit_instr(ctx, addu, dst, dst, src);
  883. break;
  884. case BPF_SUB:
  885. emit_instr(ctx, subu, dst, dst, src);
  886. break;
  887. case BPF_XOR:
  888. emit_instr(ctx, xor, dst, dst, src);
  889. break;
  890. case BPF_OR:
  891. emit_instr(ctx, or, dst, dst, src);
  892. break;
  893. case BPF_AND:
  894. emit_instr(ctx, and, dst, dst, src);
  895. break;
  896. case BPF_MUL:
  897. emit_instr(ctx, mul, dst, dst, src);
  898. break;
  899. case BPF_DIV:
  900. case BPF_MOD:
  901. b_off = b_imm(exit_idx, ctx);
  902. if (is_bad_offset(b_off))
  903. return -E2BIG;
  904. emit_instr(ctx, beq, src, MIPS_R_ZERO, b_off);
  905. emit_instr(ctx, movz, MIPS_R_V0, MIPS_R_ZERO, src);
  906. emit_instr(ctx, divu, dst, src);
  907. if (BPF_OP(insn->code) == BPF_DIV)
  908. emit_instr(ctx, mflo, dst);
  909. else
  910. emit_instr(ctx, mfhi, dst);
  911. break;
  912. case BPF_LSH:
  913. emit_instr(ctx, sllv, dst, dst, src);
  914. break;
  915. case BPF_RSH:
  916. emit_instr(ctx, srlv, dst, dst, src);
  917. break;
  918. default:
  919. pr_err("ALU_REG NOT HANDLED\n");
  920. return -EINVAL;
  921. }
  922. break;
  923. case BPF_JMP | BPF_EXIT:
  924. if (this_idx + 1 < exit_idx) {
  925. b_off = b_imm(exit_idx, ctx);
  926. if (is_bad_offset(b_off))
  927. return -E2BIG;
  928. emit_instr(ctx, beq, MIPS_R_ZERO, MIPS_R_ZERO, b_off);
  929. emit_instr(ctx, nop);
  930. }
  931. break;
  932. case BPF_JMP | BPF_JEQ | BPF_K: /* JMP_IMM */
  933. case BPF_JMP | BPF_JNE | BPF_K: /* JMP_IMM */
  934. cmp_eq = (BPF_OP(insn->code) == BPF_JEQ);
  935. dst = ebpf_to_mips_reg(ctx, insn, dst_reg_fp_ok);
  936. if (dst < 0)
  937. return dst;
  938. if (insn->imm == 0) {
  939. src = MIPS_R_ZERO;
  940. } else {
  941. gen_imm_to_reg(insn, MIPS_R_AT, ctx);
  942. src = MIPS_R_AT;
  943. }
  944. goto jeq_common;
  945. case BPF_JMP | BPF_JEQ | BPF_X: /* JMP_REG */
  946. case BPF_JMP | BPF_JNE | BPF_X:
  947. case BPF_JMP | BPF_JSGT | BPF_X:
  948. case BPF_JMP | BPF_JSGE | BPF_X:
  949. case BPF_JMP | BPF_JGT | BPF_X:
  950. case BPF_JMP | BPF_JGE | BPF_X:
  951. case BPF_JMP | BPF_JSET | BPF_X:
  952. src = ebpf_to_mips_reg(ctx, insn, src_reg_no_fp);
  953. dst = ebpf_to_mips_reg(ctx, insn, dst_reg);
  954. if (src < 0 || dst < 0)
  955. return -EINVAL;
  956. td = get_reg_val_type(ctx, this_idx, insn->dst_reg);
  957. ts = get_reg_val_type(ctx, this_idx, insn->src_reg);
  958. if (td == REG_32BIT && ts != REG_32BIT) {
  959. emit_instr(ctx, sll, MIPS_R_AT, src, 0);
  960. src = MIPS_R_AT;
  961. } else if (ts == REG_32BIT && td != REG_32BIT) {
  962. emit_instr(ctx, sll, MIPS_R_AT, dst, 0);
  963. dst = MIPS_R_AT;
  964. }
  965. if (BPF_OP(insn->code) == BPF_JSET) {
  966. emit_instr(ctx, and, MIPS_R_AT, dst, src);
  967. cmp_eq = false;
  968. dst = MIPS_R_AT;
  969. src = MIPS_R_ZERO;
  970. } else if (BPF_OP(insn->code) == BPF_JSGT) {
  971. emit_instr(ctx, dsubu, MIPS_R_AT, dst, src);
  972. if ((insn + 1)->code == (BPF_JMP | BPF_EXIT) && insn->off == 1) {
  973. b_off = b_imm(exit_idx, ctx);
  974. if (is_bad_offset(b_off))
  975. return -E2BIG;
  976. emit_instr(ctx, blez, MIPS_R_AT, b_off);
  977. emit_instr(ctx, nop);
  978. return 2; /* We consumed the exit. */
  979. }
  980. b_off = b_imm(this_idx + insn->off + 1, ctx);
  981. if (is_bad_offset(b_off))
  982. return -E2BIG;
  983. emit_instr(ctx, bgtz, MIPS_R_AT, b_off);
  984. emit_instr(ctx, nop);
  985. break;
  986. } else if (BPF_OP(insn->code) == BPF_JSGE) {
  987. emit_instr(ctx, slt, MIPS_R_AT, dst, src);
  988. cmp_eq = true;
  989. dst = MIPS_R_AT;
  990. src = MIPS_R_ZERO;
  991. } else if (BPF_OP(insn->code) == BPF_JGT) {
  992. /* dst or src could be AT */
  993. emit_instr(ctx, dsubu, MIPS_R_T8, dst, src);
  994. emit_instr(ctx, sltu, MIPS_R_AT, dst, src);
  995. /* SP known to be non-zero, movz becomes boolean not */
  996. emit_instr(ctx, movz, MIPS_R_T9, MIPS_R_SP, MIPS_R_T8);
  997. emit_instr(ctx, movn, MIPS_R_T9, MIPS_R_ZERO, MIPS_R_T8);
  998. emit_instr(ctx, or, MIPS_R_AT, MIPS_R_T9, MIPS_R_AT);
  999. cmp_eq = true;
  1000. dst = MIPS_R_AT;
  1001. src = MIPS_R_ZERO;
  1002. } else if (BPF_OP(insn->code) == BPF_JGE) {
  1003. emit_instr(ctx, sltu, MIPS_R_AT, dst, src);
  1004. cmp_eq = true;
  1005. dst = MIPS_R_AT;
  1006. src = MIPS_R_ZERO;
  1007. } else { /* JNE/JEQ case */
  1008. cmp_eq = (BPF_OP(insn->code) == BPF_JEQ);
  1009. }
  1010. jeq_common:
  1011. /*
  1012. * If the next insn is EXIT and we are jumping arround
  1013. * only it, invert the sense of the compare and
  1014. * conditionally jump to the exit. Poor man's branch
  1015. * chaining.
  1016. */
  1017. if ((insn + 1)->code == (BPF_JMP | BPF_EXIT) && insn->off == 1) {
  1018. b_off = b_imm(exit_idx, ctx);
  1019. if (is_bad_offset(b_off)) {
  1020. target = j_target(ctx, exit_idx);
  1021. if (target == (unsigned int)-1)
  1022. return -E2BIG;
  1023. cmp_eq = !cmp_eq;
  1024. b_off = 4 * 3;
  1025. if (!(ctx->offsets[this_idx] & OFFSETS_B_CONV)) {
  1026. ctx->offsets[this_idx] |= OFFSETS_B_CONV;
  1027. ctx->long_b_conversion = 1;
  1028. }
  1029. }
  1030. if (cmp_eq)
  1031. emit_instr(ctx, bne, dst, src, b_off);
  1032. else
  1033. emit_instr(ctx, beq, dst, src, b_off);
  1034. emit_instr(ctx, nop);
  1035. if (ctx->offsets[this_idx] & OFFSETS_B_CONV) {
  1036. emit_instr(ctx, j, target);
  1037. emit_instr(ctx, nop);
  1038. }
  1039. return 2; /* We consumed the exit. */
  1040. }
  1041. b_off = b_imm(this_idx + insn->off + 1, ctx);
  1042. if (is_bad_offset(b_off)) {
  1043. target = j_target(ctx, this_idx + insn->off + 1);
  1044. if (target == (unsigned int)-1)
  1045. return -E2BIG;
  1046. cmp_eq = !cmp_eq;
  1047. b_off = 4 * 3;
  1048. if (!(ctx->offsets[this_idx] & OFFSETS_B_CONV)) {
  1049. ctx->offsets[this_idx] |= OFFSETS_B_CONV;
  1050. ctx->long_b_conversion = 1;
  1051. }
  1052. }
  1053. if (cmp_eq)
  1054. emit_instr(ctx, beq, dst, src, b_off);
  1055. else
  1056. emit_instr(ctx, bne, dst, src, b_off);
  1057. emit_instr(ctx, nop);
  1058. if (ctx->offsets[this_idx] & OFFSETS_B_CONV) {
  1059. emit_instr(ctx, j, target);
  1060. emit_instr(ctx, nop);
  1061. }
  1062. break;
  1063. case BPF_JMP | BPF_JSGT | BPF_K: /* JMP_IMM */
  1064. case BPF_JMP | BPF_JSGE | BPF_K: /* JMP_IMM */
  1065. cmp_eq = (BPF_OP(insn->code) == BPF_JSGE);
  1066. dst = ebpf_to_mips_reg(ctx, insn, dst_reg_fp_ok);
  1067. if (dst < 0)
  1068. return dst;
  1069. if (insn->imm == 0) {
  1070. if ((insn + 1)->code == (BPF_JMP | BPF_EXIT) && insn->off == 1) {
  1071. b_off = b_imm(exit_idx, ctx);
  1072. if (is_bad_offset(b_off))
  1073. return -E2BIG;
  1074. if (cmp_eq)
  1075. emit_instr(ctx, bltz, dst, b_off);
  1076. else
  1077. emit_instr(ctx, blez, dst, b_off);
  1078. emit_instr(ctx, nop);
  1079. return 2; /* We consumed the exit. */
  1080. }
  1081. b_off = b_imm(this_idx + insn->off + 1, ctx);
  1082. if (is_bad_offset(b_off))
  1083. return -E2BIG;
  1084. if (cmp_eq)
  1085. emit_instr(ctx, bgez, dst, b_off);
  1086. else
  1087. emit_instr(ctx, bgtz, dst, b_off);
  1088. emit_instr(ctx, nop);
  1089. break;
  1090. }
  1091. /*
  1092. * only "LT" compare available, so we must use imm + 1
  1093. * to generate "GT"
  1094. */
  1095. t64s = insn->imm + (cmp_eq ? 0 : 1);
  1096. if (t64s >= S16_MIN && t64s <= S16_MAX) {
  1097. emit_instr(ctx, slti, MIPS_R_AT, dst, (int)t64s);
  1098. src = MIPS_R_AT;
  1099. dst = MIPS_R_ZERO;
  1100. cmp_eq = true;
  1101. goto jeq_common;
  1102. }
  1103. emit_const_to_reg(ctx, MIPS_R_AT, (u64)t64s);
  1104. emit_instr(ctx, slt, MIPS_R_AT, dst, MIPS_R_AT);
  1105. src = MIPS_R_AT;
  1106. dst = MIPS_R_ZERO;
  1107. cmp_eq = true;
  1108. goto jeq_common;
  1109. case BPF_JMP | BPF_JGT | BPF_K:
  1110. case BPF_JMP | BPF_JGE | BPF_K:
  1111. cmp_eq = (BPF_OP(insn->code) == BPF_JGE);
  1112. dst = ebpf_to_mips_reg(ctx, insn, dst_reg_fp_ok);
  1113. if (dst < 0)
  1114. return dst;
  1115. /*
  1116. * only "LT" compare available, so we must use imm + 1
  1117. * to generate "GT"
  1118. */
  1119. t64s = (u64)(u32)(insn->imm) + (cmp_eq ? 0 : 1);
  1120. if (t64s >= 0 && t64s <= S16_MAX) {
  1121. emit_instr(ctx, sltiu, MIPS_R_AT, dst, (int)t64s);
  1122. src = MIPS_R_AT;
  1123. dst = MIPS_R_ZERO;
  1124. cmp_eq = true;
  1125. goto jeq_common;
  1126. }
  1127. emit_const_to_reg(ctx, MIPS_R_AT, (u64)t64s);
  1128. emit_instr(ctx, sltu, MIPS_R_AT, dst, MIPS_R_AT);
  1129. src = MIPS_R_AT;
  1130. dst = MIPS_R_ZERO;
  1131. cmp_eq = true;
  1132. goto jeq_common;
  1133. case BPF_JMP | BPF_JSET | BPF_K: /* JMP_IMM */
  1134. dst = ebpf_to_mips_reg(ctx, insn, dst_reg_fp_ok);
  1135. if (dst < 0)
  1136. return dst;
  1137. if (use_bbit_insns() && hweight32((u32)insn->imm) == 1) {
  1138. if ((insn + 1)->code == (BPF_JMP | BPF_EXIT) && insn->off == 1) {
  1139. b_off = b_imm(exit_idx, ctx);
  1140. if (is_bad_offset(b_off))
  1141. return -E2BIG;
  1142. emit_instr(ctx, bbit0, dst, ffs((u32)insn->imm) - 1, b_off);
  1143. emit_instr(ctx, nop);
  1144. return 2; /* We consumed the exit. */
  1145. }
  1146. b_off = b_imm(this_idx + insn->off + 1, ctx);
  1147. if (is_bad_offset(b_off))
  1148. return -E2BIG;
  1149. emit_instr(ctx, bbit1, dst, ffs((u32)insn->imm) - 1, b_off);
  1150. emit_instr(ctx, nop);
  1151. break;
  1152. }
  1153. t64 = (u32)insn->imm;
  1154. emit_const_to_reg(ctx, MIPS_R_AT, t64);
  1155. emit_instr(ctx, and, MIPS_R_AT, dst, MIPS_R_AT);
  1156. src = MIPS_R_AT;
  1157. dst = MIPS_R_ZERO;
  1158. cmp_eq = false;
  1159. goto jeq_common;
  1160. case BPF_JMP | BPF_JA:
  1161. /*
  1162. * Prefer relative branch for easier debugging, but
  1163. * fall back if needed.
  1164. */
  1165. b_off = b_imm(this_idx + insn->off + 1, ctx);
  1166. if (is_bad_offset(b_off)) {
  1167. target = j_target(ctx, this_idx + insn->off + 1);
  1168. if (target == (unsigned int)-1)
  1169. return -E2BIG;
  1170. emit_instr(ctx, j, target);
  1171. } else {
  1172. emit_instr(ctx, b, b_off);
  1173. }
  1174. emit_instr(ctx, nop);
  1175. break;
  1176. case BPF_LD | BPF_DW | BPF_IMM:
  1177. if (insn->src_reg != 0)
  1178. return -EINVAL;
  1179. dst = ebpf_to_mips_reg(ctx, insn, dst_reg);
  1180. if (dst < 0)
  1181. return dst;
  1182. t64 = ((u64)(u32)insn->imm) | ((u64)(insn + 1)->imm << 32);
  1183. emit_const_to_reg(ctx, dst, t64);
  1184. return 2; /* Double slot insn */
  1185. case BPF_JMP | BPF_CALL:
  1186. ctx->flags |= EBPF_SAVE_RA;
  1187. t64s = (s64)insn->imm + (s64)__bpf_call_base;
  1188. emit_const_to_reg(ctx, MIPS_R_T9, (u64)t64s);
  1189. emit_instr(ctx, jalr, MIPS_R_RA, MIPS_R_T9);
  1190. /* delay slot */
  1191. emit_instr(ctx, nop);
  1192. break;
  1193. case BPF_JMP | BPF_TAIL_CALL:
  1194. if (emit_bpf_tail_call(ctx, this_idx))
  1195. return -EINVAL;
  1196. break;
  1197. case BPF_LD | BPF_B | BPF_ABS:
  1198. case BPF_LD | BPF_H | BPF_ABS:
  1199. case BPF_LD | BPF_W | BPF_ABS:
  1200. case BPF_LD | BPF_DW | BPF_ABS:
  1201. ctx->flags |= EBPF_SAVE_RA;
  1202. gen_imm_to_reg(insn, MIPS_R_A1, ctx);
  1203. emit_instr(ctx, addiu, MIPS_R_A2, MIPS_R_ZERO, size_to_len(insn));
  1204. if (insn->imm < 0) {
  1205. emit_const_to_reg(ctx, MIPS_R_T9, (u64)bpf_internal_load_pointer_neg_helper);
  1206. } else {
  1207. emit_const_to_reg(ctx, MIPS_R_T9, (u64)ool_skb_header_pointer);
  1208. emit_instr(ctx, daddiu, MIPS_R_A3, MIPS_R_SP, ctx->tmp_offset);
  1209. }
  1210. goto ld_skb_common;
  1211. case BPF_LD | BPF_B | BPF_IND:
  1212. case BPF_LD | BPF_H | BPF_IND:
  1213. case BPF_LD | BPF_W | BPF_IND:
  1214. case BPF_LD | BPF_DW | BPF_IND:
  1215. ctx->flags |= EBPF_SAVE_RA;
  1216. src = ebpf_to_mips_reg(ctx, insn, src_reg_no_fp);
  1217. if (src < 0)
  1218. return src;
  1219. ts = get_reg_val_type(ctx, this_idx, insn->src_reg);
  1220. if (ts == REG_32BIT_ZERO_EX) {
  1221. /* sign extend */
  1222. emit_instr(ctx, sll, MIPS_R_A1, src, 0);
  1223. src = MIPS_R_A1;
  1224. }
  1225. if (insn->imm >= S16_MIN && insn->imm <= S16_MAX) {
  1226. emit_instr(ctx, daddiu, MIPS_R_A1, src, insn->imm);
  1227. } else {
  1228. gen_imm_to_reg(insn, MIPS_R_AT, ctx);
  1229. emit_instr(ctx, daddu, MIPS_R_A1, MIPS_R_AT, src);
  1230. }
  1231. /* truncate to 32-bit int */
  1232. emit_instr(ctx, sll, MIPS_R_A1, MIPS_R_A1, 0);
  1233. emit_instr(ctx, daddiu, MIPS_R_A3, MIPS_R_SP, ctx->tmp_offset);
  1234. emit_instr(ctx, slt, MIPS_R_AT, MIPS_R_A1, MIPS_R_ZERO);
  1235. emit_const_to_reg(ctx, MIPS_R_T8, (u64)bpf_internal_load_pointer_neg_helper);
  1236. emit_const_to_reg(ctx, MIPS_R_T9, (u64)ool_skb_header_pointer);
  1237. emit_instr(ctx, addiu, MIPS_R_A2, MIPS_R_ZERO, size_to_len(insn));
  1238. emit_instr(ctx, movn, MIPS_R_T9, MIPS_R_T8, MIPS_R_AT);
  1239. ld_skb_common:
  1240. emit_instr(ctx, jalr, MIPS_R_RA, MIPS_R_T9);
  1241. /* delay slot move */
  1242. emit_instr(ctx, daddu, MIPS_R_A0, MIPS_R_S0, MIPS_R_ZERO);
  1243. /* Check the error value */
  1244. b_off = b_imm(exit_idx, ctx);
  1245. if (is_bad_offset(b_off)) {
  1246. target = j_target(ctx, exit_idx);
  1247. if (target == (unsigned int)-1)
  1248. return -E2BIG;
  1249. if (!(ctx->offsets[this_idx] & OFFSETS_B_CONV)) {
  1250. ctx->offsets[this_idx] |= OFFSETS_B_CONV;
  1251. ctx->long_b_conversion = 1;
  1252. }
  1253. emit_instr(ctx, bne, MIPS_R_V0, MIPS_R_ZERO, 4 * 3);
  1254. emit_instr(ctx, nop);
  1255. emit_instr(ctx, j, target);
  1256. emit_instr(ctx, nop);
  1257. } else {
  1258. emit_instr(ctx, beq, MIPS_R_V0, MIPS_R_ZERO, b_off);
  1259. emit_instr(ctx, nop);
  1260. }
  1261. #ifdef __BIG_ENDIAN
  1262. need_swap = false;
  1263. #else
  1264. need_swap = true;
  1265. #endif
  1266. dst = MIPS_R_V0;
  1267. switch (BPF_SIZE(insn->code)) {
  1268. case BPF_B:
  1269. emit_instr(ctx, lbu, dst, 0, MIPS_R_V0);
  1270. break;
  1271. case BPF_H:
  1272. emit_instr(ctx, lhu, dst, 0, MIPS_R_V0);
  1273. if (need_swap)
  1274. emit_instr(ctx, wsbh, dst, dst);
  1275. break;
  1276. case BPF_W:
  1277. emit_instr(ctx, lw, dst, 0, MIPS_R_V0);
  1278. if (need_swap) {
  1279. emit_instr(ctx, wsbh, dst, dst);
  1280. emit_instr(ctx, rotr, dst, dst, 16);
  1281. }
  1282. break;
  1283. case BPF_DW:
  1284. emit_instr(ctx, ld, dst, 0, MIPS_R_V0);
  1285. if (need_swap) {
  1286. emit_instr(ctx, dsbh, dst, dst);
  1287. emit_instr(ctx, dshd, dst, dst);
  1288. }
  1289. break;
  1290. }
  1291. break;
  1292. case BPF_ALU | BPF_END | BPF_FROM_BE:
  1293. case BPF_ALU | BPF_END | BPF_FROM_LE:
  1294. dst = ebpf_to_mips_reg(ctx, insn, dst_reg);
  1295. if (dst < 0)
  1296. return dst;
  1297. td = get_reg_val_type(ctx, this_idx, insn->dst_reg);
  1298. if (insn->imm == 64 && td == REG_32BIT)
  1299. emit_instr(ctx, dinsu, dst, MIPS_R_ZERO, 32, 32);
  1300. if (insn->imm != 64 &&
  1301. (td == REG_64BIT || td == REG_32BIT_ZERO_EX)) {
  1302. /* sign extend */
  1303. emit_instr(ctx, sll, dst, dst, 0);
  1304. }
  1305. #ifdef __BIG_ENDIAN
  1306. need_swap = (BPF_SRC(insn->code) == BPF_FROM_LE);
  1307. #else
  1308. need_swap = (BPF_SRC(insn->code) == BPF_FROM_BE);
  1309. #endif
  1310. if (insn->imm == 16) {
  1311. if (need_swap)
  1312. emit_instr(ctx, wsbh, dst, dst);
  1313. emit_instr(ctx, andi, dst, dst, 0xffff);
  1314. } else if (insn->imm == 32) {
  1315. if (need_swap) {
  1316. emit_instr(ctx, wsbh, dst, dst);
  1317. emit_instr(ctx, rotr, dst, dst, 16);
  1318. }
  1319. } else { /* 64-bit*/
  1320. if (need_swap) {
  1321. emit_instr(ctx, dsbh, dst, dst);
  1322. emit_instr(ctx, dshd, dst, dst);
  1323. }
  1324. }
  1325. break;
  1326. case BPF_ST | BPF_B | BPF_MEM:
  1327. case BPF_ST | BPF_H | BPF_MEM:
  1328. case BPF_ST | BPF_W | BPF_MEM:
  1329. case BPF_ST | BPF_DW | BPF_MEM:
  1330. if (insn->dst_reg == BPF_REG_10) {
  1331. ctx->flags |= EBPF_SEEN_FP;
  1332. dst = MIPS_R_SP;
  1333. mem_off = insn->off + MAX_BPF_STACK;
  1334. } else {
  1335. dst = ebpf_to_mips_reg(ctx, insn, dst_reg);
  1336. if (dst < 0)
  1337. return dst;
  1338. mem_off = insn->off;
  1339. }
  1340. gen_imm_to_reg(insn, MIPS_R_AT, ctx);
  1341. switch (BPF_SIZE(insn->code)) {
  1342. case BPF_B:
  1343. emit_instr(ctx, sb, MIPS_R_AT, mem_off, dst);
  1344. break;
  1345. case BPF_H:
  1346. emit_instr(ctx, sh, MIPS_R_AT, mem_off, dst);
  1347. break;
  1348. case BPF_W:
  1349. emit_instr(ctx, sw, MIPS_R_AT, mem_off, dst);
  1350. break;
  1351. case BPF_DW:
  1352. emit_instr(ctx, sd, MIPS_R_AT, mem_off, dst);
  1353. break;
  1354. }
  1355. break;
  1356. case BPF_LDX | BPF_B | BPF_MEM:
  1357. case BPF_LDX | BPF_H | BPF_MEM:
  1358. case BPF_LDX | BPF_W | BPF_MEM:
  1359. case BPF_LDX | BPF_DW | BPF_MEM:
  1360. if (insn->src_reg == BPF_REG_10) {
  1361. ctx->flags |= EBPF_SEEN_FP;
  1362. src = MIPS_R_SP;
  1363. mem_off = insn->off + MAX_BPF_STACK;
  1364. } else {
  1365. src = ebpf_to_mips_reg(ctx, insn, src_reg_no_fp);
  1366. if (src < 0)
  1367. return src;
  1368. mem_off = insn->off;
  1369. }
  1370. dst = ebpf_to_mips_reg(ctx, insn, dst_reg);
  1371. if (dst < 0)
  1372. return dst;
  1373. switch (BPF_SIZE(insn->code)) {
  1374. case BPF_B:
  1375. emit_instr(ctx, lbu, dst, mem_off, src);
  1376. break;
  1377. case BPF_H:
  1378. emit_instr(ctx, lhu, dst, mem_off, src);
  1379. break;
  1380. case BPF_W:
  1381. emit_instr(ctx, lw, dst, mem_off, src);
  1382. break;
  1383. case BPF_DW:
  1384. emit_instr(ctx, ld, dst, mem_off, src);
  1385. break;
  1386. }
  1387. break;
  1388. case BPF_STX | BPF_B | BPF_MEM:
  1389. case BPF_STX | BPF_H | BPF_MEM:
  1390. case BPF_STX | BPF_W | BPF_MEM:
  1391. case BPF_STX | BPF_DW | BPF_MEM:
  1392. case BPF_STX | BPF_W | BPF_XADD:
  1393. case BPF_STX | BPF_DW | BPF_XADD:
  1394. if (insn->dst_reg == BPF_REG_10) {
  1395. ctx->flags |= EBPF_SEEN_FP;
  1396. dst = MIPS_R_SP;
  1397. mem_off = insn->off + MAX_BPF_STACK;
  1398. } else {
  1399. dst = ebpf_to_mips_reg(ctx, insn, dst_reg);
  1400. if (dst < 0)
  1401. return dst;
  1402. mem_off = insn->off;
  1403. }
  1404. src = ebpf_to_mips_reg(ctx, insn, src_reg_no_fp);
  1405. if (src < 0)
  1406. return dst;
  1407. if (BPF_MODE(insn->code) == BPF_XADD) {
  1408. switch (BPF_SIZE(insn->code)) {
  1409. case BPF_W:
  1410. if (get_reg_val_type(ctx, this_idx, insn->src_reg) == REG_32BIT) {
  1411. emit_instr(ctx, sll, MIPS_R_AT, src, 0);
  1412. src = MIPS_R_AT;
  1413. }
  1414. emit_instr(ctx, ll, MIPS_R_T8, mem_off, dst);
  1415. emit_instr(ctx, addu, MIPS_R_T8, MIPS_R_T8, src);
  1416. emit_instr(ctx, sc, MIPS_R_T8, mem_off, dst);
  1417. /*
  1418. * On failure back up to LL (-4
  1419. * instructions of 4 bytes each
  1420. */
  1421. emit_instr(ctx, beq, MIPS_R_T8, MIPS_R_ZERO, -4 * 4);
  1422. emit_instr(ctx, nop);
  1423. break;
  1424. case BPF_DW:
  1425. if (get_reg_val_type(ctx, this_idx, insn->src_reg) == REG_32BIT) {
  1426. emit_instr(ctx, daddu, MIPS_R_AT, src, MIPS_R_ZERO);
  1427. emit_instr(ctx, dinsu, MIPS_R_AT, MIPS_R_ZERO, 32, 32);
  1428. src = MIPS_R_AT;
  1429. }
  1430. emit_instr(ctx, lld, MIPS_R_T8, mem_off, dst);
  1431. emit_instr(ctx, daddu, MIPS_R_T8, MIPS_R_T8, src);
  1432. emit_instr(ctx, scd, MIPS_R_T8, mem_off, dst);
  1433. emit_instr(ctx, beq, MIPS_R_T8, MIPS_R_ZERO, -4 * 4);
  1434. emit_instr(ctx, nop);
  1435. break;
  1436. }
  1437. } else { /* BPF_MEM */
  1438. switch (BPF_SIZE(insn->code)) {
  1439. case BPF_B:
  1440. emit_instr(ctx, sb, src, mem_off, dst);
  1441. break;
  1442. case BPF_H:
  1443. emit_instr(ctx, sh, src, mem_off, dst);
  1444. break;
  1445. case BPF_W:
  1446. emit_instr(ctx, sw, src, mem_off, dst);
  1447. break;
  1448. case BPF_DW:
  1449. if (get_reg_val_type(ctx, this_idx, insn->src_reg) == REG_32BIT) {
  1450. emit_instr(ctx, daddu, MIPS_R_AT, src, MIPS_R_ZERO);
  1451. emit_instr(ctx, dinsu, MIPS_R_AT, MIPS_R_ZERO, 32, 32);
  1452. src = MIPS_R_AT;
  1453. }
  1454. emit_instr(ctx, sd, src, mem_off, dst);
  1455. break;
  1456. }
  1457. }
  1458. break;
  1459. default:
  1460. pr_err("NOT HANDLED %d - (%02x)\n",
  1461. this_idx, (unsigned int)insn->code);
  1462. return -EINVAL;
  1463. }
  1464. return 1;
  1465. }
  1466. #define RVT_VISITED_MASK 0xc000000000000000ull
  1467. #define RVT_FALL_THROUGH 0x4000000000000000ull
  1468. #define RVT_BRANCH_TAKEN 0x8000000000000000ull
  1469. #define RVT_DONE (RVT_FALL_THROUGH | RVT_BRANCH_TAKEN)
  1470. static int build_int_body(struct jit_ctx *ctx)
  1471. {
  1472. const struct bpf_prog *prog = ctx->skf;
  1473. const struct bpf_insn *insn;
  1474. int i, r;
  1475. for (i = 0; i < prog->len; ) {
  1476. insn = prog->insnsi + i;
  1477. if ((ctx->reg_val_types[i] & RVT_VISITED_MASK) == 0) {
  1478. /* dead instruction, don't emit it. */
  1479. i++;
  1480. continue;
  1481. }
  1482. if (ctx->target == NULL)
  1483. ctx->offsets[i] = (ctx->offsets[i] & OFFSETS_B_CONV) | (ctx->idx * 4);
  1484. r = build_one_insn(insn, ctx, i, prog->len);
  1485. if (r < 0)
  1486. return r;
  1487. i += r;
  1488. }
  1489. /* epilogue offset */
  1490. if (ctx->target == NULL)
  1491. ctx->offsets[i] = ctx->idx * 4;
  1492. /*
  1493. * All exits have an offset of the epilogue, some offsets may
  1494. * not have been set due to banch-around threading, so set
  1495. * them now.
  1496. */
  1497. if (ctx->target == NULL)
  1498. for (i = 0; i < prog->len; i++) {
  1499. insn = prog->insnsi + i;
  1500. if (insn->code == (BPF_JMP | BPF_EXIT))
  1501. ctx->offsets[i] = ctx->idx * 4;
  1502. }
  1503. return 0;
  1504. }
  1505. /* return the last idx processed, or negative for error */
  1506. static int reg_val_propagate_range(struct jit_ctx *ctx, u64 initial_rvt,
  1507. int start_idx, bool follow_taken)
  1508. {
  1509. const struct bpf_prog *prog = ctx->skf;
  1510. const struct bpf_insn *insn;
  1511. u64 exit_rvt = initial_rvt;
  1512. u64 *rvt = ctx->reg_val_types;
  1513. int idx;
  1514. int reg;
  1515. for (idx = start_idx; idx < prog->len; idx++) {
  1516. rvt[idx] = (rvt[idx] & RVT_VISITED_MASK) | exit_rvt;
  1517. insn = prog->insnsi + idx;
  1518. switch (BPF_CLASS(insn->code)) {
  1519. case BPF_ALU:
  1520. switch (BPF_OP(insn->code)) {
  1521. case BPF_ADD:
  1522. case BPF_SUB:
  1523. case BPF_MUL:
  1524. case BPF_DIV:
  1525. case BPF_OR:
  1526. case BPF_AND:
  1527. case BPF_LSH:
  1528. case BPF_RSH:
  1529. case BPF_NEG:
  1530. case BPF_MOD:
  1531. case BPF_XOR:
  1532. set_reg_val_type(&exit_rvt, insn->dst_reg, REG_32BIT);
  1533. break;
  1534. case BPF_MOV:
  1535. if (BPF_SRC(insn->code)) {
  1536. set_reg_val_type(&exit_rvt, insn->dst_reg, REG_32BIT);
  1537. } else {
  1538. /* IMM to REG move*/
  1539. if (insn->imm >= 0)
  1540. set_reg_val_type(&exit_rvt, insn->dst_reg, REG_32BIT_POS);
  1541. else
  1542. set_reg_val_type(&exit_rvt, insn->dst_reg, REG_32BIT);
  1543. }
  1544. break;
  1545. case BPF_END:
  1546. if (insn->imm == 64)
  1547. set_reg_val_type(&exit_rvt, insn->dst_reg, REG_64BIT);
  1548. else if (insn->imm == 32)
  1549. set_reg_val_type(&exit_rvt, insn->dst_reg, REG_32BIT);
  1550. else /* insn->imm == 16 */
  1551. set_reg_val_type(&exit_rvt, insn->dst_reg, REG_32BIT_POS);
  1552. break;
  1553. }
  1554. rvt[idx] |= RVT_DONE;
  1555. break;
  1556. case BPF_ALU64:
  1557. switch (BPF_OP(insn->code)) {
  1558. case BPF_MOV:
  1559. if (BPF_SRC(insn->code)) {
  1560. /* REG to REG move*/
  1561. set_reg_val_type(&exit_rvt, insn->dst_reg, REG_64BIT);
  1562. } else {
  1563. /* IMM to REG move*/
  1564. if (insn->imm >= 0)
  1565. set_reg_val_type(&exit_rvt, insn->dst_reg, REG_32BIT_POS);
  1566. else
  1567. set_reg_val_type(&exit_rvt, insn->dst_reg, REG_64BIT_32BIT);
  1568. }
  1569. break;
  1570. default:
  1571. set_reg_val_type(&exit_rvt, insn->dst_reg, REG_64BIT);
  1572. }
  1573. rvt[idx] |= RVT_DONE;
  1574. break;
  1575. case BPF_LD:
  1576. switch (BPF_SIZE(insn->code)) {
  1577. case BPF_DW:
  1578. if (BPF_MODE(insn->code) == BPF_IMM) {
  1579. s64 val;
  1580. val = (s64)((u32)insn->imm | ((u64)(insn + 1)->imm << 32));
  1581. if (val > 0 && val <= S32_MAX)
  1582. set_reg_val_type(&exit_rvt, insn->dst_reg, REG_32BIT_POS);
  1583. else if (val >= S32_MIN && val <= S32_MAX)
  1584. set_reg_val_type(&exit_rvt, insn->dst_reg, REG_64BIT_32BIT);
  1585. else
  1586. set_reg_val_type(&exit_rvt, insn->dst_reg, REG_64BIT);
  1587. rvt[idx] |= RVT_DONE;
  1588. idx++;
  1589. } else {
  1590. set_reg_val_type(&exit_rvt, insn->dst_reg, REG_64BIT);
  1591. }
  1592. break;
  1593. case BPF_B:
  1594. case BPF_H:
  1595. set_reg_val_type(&exit_rvt, insn->dst_reg, REG_32BIT_POS);
  1596. break;
  1597. case BPF_W:
  1598. if (BPF_MODE(insn->code) == BPF_IMM)
  1599. set_reg_val_type(&exit_rvt, insn->dst_reg,
  1600. insn->imm >= 0 ? REG_32BIT_POS : REG_32BIT);
  1601. else
  1602. set_reg_val_type(&exit_rvt, insn->dst_reg, REG_32BIT);
  1603. break;
  1604. }
  1605. rvt[idx] |= RVT_DONE;
  1606. break;
  1607. case BPF_LDX:
  1608. switch (BPF_SIZE(insn->code)) {
  1609. case BPF_DW:
  1610. set_reg_val_type(&exit_rvt, insn->dst_reg, REG_64BIT);
  1611. break;
  1612. case BPF_B:
  1613. case BPF_H:
  1614. set_reg_val_type(&exit_rvt, insn->dst_reg, REG_32BIT_POS);
  1615. break;
  1616. case BPF_W:
  1617. set_reg_val_type(&exit_rvt, insn->dst_reg, REG_32BIT);
  1618. break;
  1619. }
  1620. rvt[idx] |= RVT_DONE;
  1621. break;
  1622. case BPF_JMP:
  1623. switch (BPF_OP(insn->code)) {
  1624. case BPF_EXIT:
  1625. rvt[idx] = RVT_DONE | exit_rvt;
  1626. rvt[prog->len] = exit_rvt;
  1627. return idx;
  1628. case BPF_JA:
  1629. rvt[idx] |= RVT_DONE;
  1630. idx += insn->off;
  1631. break;
  1632. case BPF_JEQ:
  1633. case BPF_JGT:
  1634. case BPF_JGE:
  1635. case BPF_JSET:
  1636. case BPF_JNE:
  1637. case BPF_JSGT:
  1638. case BPF_JSGE:
  1639. if (follow_taken) {
  1640. rvt[idx] |= RVT_BRANCH_TAKEN;
  1641. idx += insn->off;
  1642. follow_taken = false;
  1643. } else {
  1644. rvt[idx] |= RVT_FALL_THROUGH;
  1645. }
  1646. break;
  1647. case BPF_CALL:
  1648. set_reg_val_type(&exit_rvt, BPF_REG_0, REG_64BIT);
  1649. /* Upon call return, argument registers are clobbered. */
  1650. for (reg = BPF_REG_0; reg <= BPF_REG_5; reg++)
  1651. set_reg_val_type(&exit_rvt, reg, REG_64BIT);
  1652. rvt[idx] |= RVT_DONE;
  1653. break;
  1654. default:
  1655. WARN(1, "Unhandled BPF_JMP case.\n");
  1656. rvt[idx] |= RVT_DONE;
  1657. break;
  1658. }
  1659. break;
  1660. default:
  1661. rvt[idx] |= RVT_DONE;
  1662. break;
  1663. }
  1664. }
  1665. return idx;
  1666. }
  1667. /*
  1668. * Track the value range (i.e. 32-bit vs. 64-bit) of each register at
  1669. * each eBPF insn. This allows unneeded sign and zero extension
  1670. * operations to be omitted.
  1671. *
  1672. * Doesn't handle yet confluence of control paths with conflicting
  1673. * ranges, but it is good enough for most sane code.
  1674. */
  1675. static int reg_val_propagate(struct jit_ctx *ctx)
  1676. {
  1677. const struct bpf_prog *prog = ctx->skf;
  1678. u64 exit_rvt;
  1679. int reg;
  1680. int i;
  1681. /*
  1682. * 11 registers * 3 bits/reg leaves top bits free for other
  1683. * uses. Bit-62..63 used to see if we have visited an insn.
  1684. */
  1685. exit_rvt = 0;
  1686. /* Upon entry, argument registers are 64-bit. */
  1687. for (reg = BPF_REG_1; reg <= BPF_REG_5; reg++)
  1688. set_reg_val_type(&exit_rvt, reg, REG_64BIT);
  1689. /*
  1690. * First follow all conditional branches on the fall-through
  1691. * edge of control flow..
  1692. */
  1693. reg_val_propagate_range(ctx, exit_rvt, 0, false);
  1694. restart_search:
  1695. /*
  1696. * Then repeatedly find the first conditional branch where
  1697. * both edges of control flow have not been taken, and follow
  1698. * the branch taken edge. We will end up restarting the
  1699. * search once per conditional branch insn.
  1700. */
  1701. for (i = 0; i < prog->len; i++) {
  1702. u64 rvt = ctx->reg_val_types[i];
  1703. if ((rvt & RVT_VISITED_MASK) == RVT_DONE ||
  1704. (rvt & RVT_VISITED_MASK) == 0)
  1705. continue;
  1706. if ((rvt & RVT_VISITED_MASK) == RVT_FALL_THROUGH) {
  1707. reg_val_propagate_range(ctx, rvt & ~RVT_VISITED_MASK, i, true);
  1708. } else { /* RVT_BRANCH_TAKEN */
  1709. WARN(1, "Unexpected RVT_BRANCH_TAKEN case.\n");
  1710. reg_val_propagate_range(ctx, rvt & ~RVT_VISITED_MASK, i, false);
  1711. }
  1712. goto restart_search;
  1713. }
  1714. /*
  1715. * Eventually all conditional branches have been followed on
  1716. * both branches and we are done. Any insn that has not been
  1717. * visited at this point is dead.
  1718. */
  1719. return 0;
  1720. }
  1721. static void jit_fill_hole(void *area, unsigned int size)
  1722. {
  1723. u32 *p;
  1724. /* We are guaranteed to have aligned memory. */
  1725. for (p = area; size >= sizeof(u32); size -= sizeof(u32))
  1726. uasm_i_break(&p, BRK_BUG); /* Increments p */
  1727. }
  1728. struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
  1729. {
  1730. struct bpf_prog *orig_prog = prog;
  1731. bool tmp_blinded = false;
  1732. struct bpf_prog *tmp;
  1733. struct bpf_binary_header *header = NULL;
  1734. struct jit_ctx ctx;
  1735. unsigned int image_size;
  1736. u8 *image_ptr;
  1737. if (!bpf_jit_enable || !cpu_has_mips64r2)
  1738. return prog;
  1739. tmp = bpf_jit_blind_constants(prog);
  1740. /* If blinding was requested and we failed during blinding,
  1741. * we must fall back to the interpreter.
  1742. */
  1743. if (IS_ERR(tmp))
  1744. return orig_prog;
  1745. if (tmp != prog) {
  1746. tmp_blinded = true;
  1747. prog = tmp;
  1748. }
  1749. memset(&ctx, 0, sizeof(ctx));
  1750. ctx.offsets = kcalloc(prog->len + 1, sizeof(*ctx.offsets), GFP_KERNEL);
  1751. if (ctx.offsets == NULL)
  1752. goto out_err;
  1753. ctx.reg_val_types = kcalloc(prog->len + 1, sizeof(*ctx.reg_val_types), GFP_KERNEL);
  1754. if (ctx.reg_val_types == NULL)
  1755. goto out_err;
  1756. ctx.skf = prog;
  1757. if (reg_val_propagate(&ctx))
  1758. goto out_err;
  1759. /*
  1760. * First pass discovers used resources and instruction offsets
  1761. * assuming short branches are used.
  1762. */
  1763. if (build_int_body(&ctx))
  1764. goto out_err;
  1765. /*
  1766. * If no calls are made (EBPF_SAVE_RA), then tail call count
  1767. * in $v1, else we must save in n$s4.
  1768. */
  1769. if (ctx.flags & EBPF_SEEN_TC) {
  1770. if (ctx.flags & EBPF_SAVE_RA)
  1771. ctx.flags |= EBPF_SAVE_S4;
  1772. else
  1773. ctx.flags |= EBPF_TCC_IN_V1;
  1774. }
  1775. /*
  1776. * Second pass generates offsets, if any branches are out of
  1777. * range a jump-around long sequence is generated, and we have
  1778. * to try again from the beginning to generate the new
  1779. * offsets. This is done until no additional conversions are
  1780. * necessary.
  1781. */
  1782. do {
  1783. ctx.idx = 0;
  1784. ctx.gen_b_offsets = 1;
  1785. ctx.long_b_conversion = 0;
  1786. if (gen_int_prologue(&ctx))
  1787. goto out_err;
  1788. if (build_int_body(&ctx))
  1789. goto out_err;
  1790. if (build_int_epilogue(&ctx, MIPS_R_RA))
  1791. goto out_err;
  1792. } while (ctx.long_b_conversion);
  1793. image_size = 4 * ctx.idx;
  1794. header = bpf_jit_binary_alloc(image_size, &image_ptr,
  1795. sizeof(u32), jit_fill_hole);
  1796. if (header == NULL)
  1797. goto out_err;
  1798. ctx.target = (u32 *)image_ptr;
  1799. /* Third pass generates the code */
  1800. ctx.idx = 0;
  1801. if (gen_int_prologue(&ctx))
  1802. goto out_err;
  1803. if (build_int_body(&ctx))
  1804. goto out_err;
  1805. if (build_int_epilogue(&ctx, MIPS_R_RA))
  1806. goto out_err;
  1807. /* Update the icache */
  1808. flush_icache_range((unsigned long)ctx.target,
  1809. (unsigned long)(ctx.target + ctx.idx * sizeof(u32)));
  1810. if (bpf_jit_enable > 1)
  1811. /* Dump JIT code */
  1812. bpf_jit_dump(prog->len, image_size, 2, ctx.target);
  1813. bpf_jit_binary_lock_ro(header);
  1814. prog->bpf_func = (void *)ctx.target;
  1815. prog->jited = 1;
  1816. prog->jited_len = image_size;
  1817. out_normal:
  1818. if (tmp_blinded)
  1819. bpf_jit_prog_release_other(prog, prog == orig_prog ?
  1820. tmp : orig_prog);
  1821. kfree(ctx.offsets);
  1822. kfree(ctx.reg_val_types);
  1823. return prog;
  1824. out_err:
  1825. prog = orig_prog;
  1826. if (header)
  1827. bpf_jit_binary_free(header);
  1828. goto out_normal;
  1829. }