nfp_bpf_jit.c 49 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899
  1. /*
  2. * Copyright (C) 2016 Netronome Systems, Inc.
  3. *
  4. * This software is dual licensed under the GNU General License Version 2,
  5. * June 1991 as shown in the file COPYING in the top-level directory of this
  6. * source tree or the BSD 2-Clause License provided below. You have the
  7. * option to license this software under the complete terms of either license.
  8. *
  9. * The BSD 2-Clause License:
  10. *
  11. * Redistribution and use in source and binary forms, with or
  12. * without modification, are permitted provided that the following
  13. * conditions are met:
  14. *
  15. * 1. Redistributions of source code must retain the above
  16. * copyright notice, this list of conditions and the following
  17. * disclaimer.
  18. *
  19. * 2. Redistributions in binary form must reproduce the above
  20. * copyright notice, this list of conditions and the following
  21. * disclaimer in the documentation and/or other materials
  22. * provided with the distribution.
  23. *
  24. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  25. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  26. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  27. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  28. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  29. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  30. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  31. * SOFTWARE.
  32. */
  33. #define pr_fmt(fmt) "NFP net bpf: " fmt
  34. #include <linux/kernel.h>
  35. #include <linux/bpf.h>
  36. #include <linux/filter.h>
  37. #include <linux/pkt_cls.h>
  38. #include <linux/unistd.h>
  39. #include "nfp_asm.h"
  40. #include "nfp_bpf.h"
  41. /* --- NFP prog --- */
  42. /* Foreach "multiple" entries macros provide pos and next<n> pointers.
  43. * It's safe to modify the next pointers (but not pos).
  44. */
  45. #define nfp_for_each_insn_walk2(nfp_prog, pos, next) \
  46. for (pos = list_first_entry(&(nfp_prog)->insns, typeof(*pos), l), \
  47. next = list_next_entry(pos, l); \
  48. &(nfp_prog)->insns != &pos->l && \
  49. &(nfp_prog)->insns != &next->l; \
  50. pos = nfp_meta_next(pos), \
  51. next = nfp_meta_next(pos))
  52. #define nfp_for_each_insn_walk3(nfp_prog, pos, next, next2) \
  53. for (pos = list_first_entry(&(nfp_prog)->insns, typeof(*pos), l), \
  54. next = list_next_entry(pos, l), \
  55. next2 = list_next_entry(next, l); \
  56. &(nfp_prog)->insns != &pos->l && \
  57. &(nfp_prog)->insns != &next->l && \
  58. &(nfp_prog)->insns != &next2->l; \
  59. pos = nfp_meta_next(pos), \
  60. next = nfp_meta_next(pos), \
  61. next2 = nfp_meta_next(next))
  62. static bool
  63. nfp_meta_has_next(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
  64. {
  65. return meta->l.next != &nfp_prog->insns;
  66. }
  67. static bool
  68. nfp_meta_has_prev(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
  69. {
  70. return meta->l.prev != &nfp_prog->insns;
  71. }
  72. static void nfp_prog_free(struct nfp_prog *nfp_prog)
  73. {
  74. struct nfp_insn_meta *meta, *tmp;
  75. list_for_each_entry_safe(meta, tmp, &nfp_prog->insns, l) {
  76. list_del(&meta->l);
  77. kfree(meta);
  78. }
  79. kfree(nfp_prog);
  80. }
  81. static void nfp_prog_push(struct nfp_prog *nfp_prog, u64 insn)
  82. {
  83. if (nfp_prog->__prog_alloc_len == nfp_prog->prog_len) {
  84. nfp_prog->error = -ENOSPC;
  85. return;
  86. }
  87. nfp_prog->prog[nfp_prog->prog_len] = insn;
  88. nfp_prog->prog_len++;
  89. }
  90. static unsigned int nfp_prog_current_offset(struct nfp_prog *nfp_prog)
  91. {
  92. return nfp_prog->start_off + nfp_prog->prog_len;
  93. }
  94. static unsigned int
  95. nfp_prog_offset_to_index(struct nfp_prog *nfp_prog, unsigned int offset)
  96. {
  97. return offset - nfp_prog->start_off;
  98. }
  99. /* --- SW reg --- */
  100. struct nfp_insn_ur_regs {
  101. enum alu_dst_ab dst_ab;
  102. u16 dst;
  103. u16 areg, breg;
  104. bool swap;
  105. bool wr_both;
  106. };
  107. struct nfp_insn_re_regs {
  108. enum alu_dst_ab dst_ab;
  109. u8 dst;
  110. u8 areg, breg;
  111. bool swap;
  112. bool wr_both;
  113. bool i8;
  114. };
  115. static u16 nfp_swreg_to_unreg(u32 swreg, bool is_dst)
  116. {
  117. u16 val = FIELD_GET(NN_REG_VAL, swreg);
  118. switch (FIELD_GET(NN_REG_TYPE, swreg)) {
  119. case NN_REG_GPR_A:
  120. case NN_REG_GPR_B:
  121. case NN_REG_GPR_BOTH:
  122. return val;
  123. case NN_REG_NNR:
  124. return UR_REG_NN | val;
  125. case NN_REG_XFER:
  126. return UR_REG_XFR | val;
  127. case NN_REG_IMM:
  128. if (val & ~0xff) {
  129. pr_err("immediate too large\n");
  130. return 0;
  131. }
  132. return UR_REG_IMM_encode(val);
  133. case NN_REG_NONE:
  134. return is_dst ? UR_REG_NO_DST : REG_NONE;
  135. default:
  136. pr_err("unrecognized reg encoding %08x\n", swreg);
  137. return 0;
  138. }
  139. }
  140. static int
  141. swreg_to_unrestricted(u32 dst, u32 lreg, u32 rreg, struct nfp_insn_ur_regs *reg)
  142. {
  143. memset(reg, 0, sizeof(*reg));
  144. /* Decode destination */
  145. if (FIELD_GET(NN_REG_TYPE, dst) == NN_REG_IMM)
  146. return -EFAULT;
  147. if (FIELD_GET(NN_REG_TYPE, dst) == NN_REG_GPR_B)
  148. reg->dst_ab = ALU_DST_B;
  149. if (FIELD_GET(NN_REG_TYPE, dst) == NN_REG_GPR_BOTH)
  150. reg->wr_both = true;
  151. reg->dst = nfp_swreg_to_unreg(dst, true);
  152. /* Decode source operands */
  153. if (FIELD_GET(NN_REG_TYPE, lreg) == FIELD_GET(NN_REG_TYPE, rreg))
  154. return -EFAULT;
  155. if (FIELD_GET(NN_REG_TYPE, lreg) == NN_REG_GPR_B ||
  156. FIELD_GET(NN_REG_TYPE, rreg) == NN_REG_GPR_A) {
  157. reg->areg = nfp_swreg_to_unreg(rreg, false);
  158. reg->breg = nfp_swreg_to_unreg(lreg, false);
  159. reg->swap = true;
  160. } else {
  161. reg->areg = nfp_swreg_to_unreg(lreg, false);
  162. reg->breg = nfp_swreg_to_unreg(rreg, false);
  163. }
  164. return 0;
  165. }
  166. static u16 nfp_swreg_to_rereg(u32 swreg, bool is_dst, bool has_imm8, bool *i8)
  167. {
  168. u16 val = FIELD_GET(NN_REG_VAL, swreg);
  169. switch (FIELD_GET(NN_REG_TYPE, swreg)) {
  170. case NN_REG_GPR_A:
  171. case NN_REG_GPR_B:
  172. case NN_REG_GPR_BOTH:
  173. return val;
  174. case NN_REG_XFER:
  175. return RE_REG_XFR | val;
  176. case NN_REG_IMM:
  177. if (val & ~(0x7f | has_imm8 << 7)) {
  178. pr_err("immediate too large\n");
  179. return 0;
  180. }
  181. *i8 = val & 0x80;
  182. return RE_REG_IMM_encode(val & 0x7f);
  183. case NN_REG_NONE:
  184. return is_dst ? RE_REG_NO_DST : REG_NONE;
  185. default:
  186. pr_err("unrecognized reg encoding\n");
  187. return 0;
  188. }
  189. }
  190. static int
  191. swreg_to_restricted(u32 dst, u32 lreg, u32 rreg, struct nfp_insn_re_regs *reg,
  192. bool has_imm8)
  193. {
  194. memset(reg, 0, sizeof(*reg));
  195. /* Decode destination */
  196. if (FIELD_GET(NN_REG_TYPE, dst) == NN_REG_IMM)
  197. return -EFAULT;
  198. if (FIELD_GET(NN_REG_TYPE, dst) == NN_REG_GPR_B)
  199. reg->dst_ab = ALU_DST_B;
  200. if (FIELD_GET(NN_REG_TYPE, dst) == NN_REG_GPR_BOTH)
  201. reg->wr_both = true;
  202. reg->dst = nfp_swreg_to_rereg(dst, true, false, NULL);
  203. /* Decode source operands */
  204. if (FIELD_GET(NN_REG_TYPE, lreg) == FIELD_GET(NN_REG_TYPE, rreg))
  205. return -EFAULT;
  206. if (FIELD_GET(NN_REG_TYPE, lreg) == NN_REG_GPR_B ||
  207. FIELD_GET(NN_REG_TYPE, rreg) == NN_REG_GPR_A) {
  208. reg->areg = nfp_swreg_to_rereg(rreg, false, has_imm8, &reg->i8);
  209. reg->breg = nfp_swreg_to_rereg(lreg, false, has_imm8, &reg->i8);
  210. reg->swap = true;
  211. } else {
  212. reg->areg = nfp_swreg_to_rereg(lreg, false, has_imm8, &reg->i8);
  213. reg->breg = nfp_swreg_to_rereg(rreg, false, has_imm8, &reg->i8);
  214. }
  215. return 0;
  216. }
  217. /* --- Emitters --- */
  218. static const struct cmd_tgt_act cmd_tgt_act[__CMD_TGT_MAP_SIZE] = {
  219. [CMD_TGT_WRITE8] = { 0x00, 0x42 },
  220. [CMD_TGT_READ8] = { 0x01, 0x43 },
  221. [CMD_TGT_READ_LE] = { 0x01, 0x40 },
  222. [CMD_TGT_READ_SWAP_LE] = { 0x03, 0x40 },
  223. };
  224. static void
  225. __emit_cmd(struct nfp_prog *nfp_prog, enum cmd_tgt_map op,
  226. u8 mode, u8 xfer, u8 areg, u8 breg, u8 size, bool sync)
  227. {
  228. enum cmd_ctx_swap ctx;
  229. u64 insn;
  230. if (sync)
  231. ctx = CMD_CTX_SWAP;
  232. else
  233. ctx = CMD_CTX_NO_SWAP;
  234. insn = FIELD_PREP(OP_CMD_A_SRC, areg) |
  235. FIELD_PREP(OP_CMD_CTX, ctx) |
  236. FIELD_PREP(OP_CMD_B_SRC, breg) |
  237. FIELD_PREP(OP_CMD_TOKEN, cmd_tgt_act[op].token) |
  238. FIELD_PREP(OP_CMD_XFER, xfer) |
  239. FIELD_PREP(OP_CMD_CNT, size) |
  240. FIELD_PREP(OP_CMD_SIG, sync) |
  241. FIELD_PREP(OP_CMD_TGT_CMD, cmd_tgt_act[op].tgt_cmd) |
  242. FIELD_PREP(OP_CMD_MODE, mode);
  243. nfp_prog_push(nfp_prog, insn);
  244. }
  245. static void
  246. emit_cmd(struct nfp_prog *nfp_prog, enum cmd_tgt_map op,
  247. u8 mode, u8 xfer, u32 lreg, u32 rreg, u8 size, bool sync)
  248. {
  249. struct nfp_insn_re_regs reg;
  250. int err;
  251. err = swreg_to_restricted(reg_none(), lreg, rreg, &reg, false);
  252. if (err) {
  253. nfp_prog->error = err;
  254. return;
  255. }
  256. if (reg.swap) {
  257. pr_err("cmd can't swap arguments\n");
  258. nfp_prog->error = -EFAULT;
  259. return;
  260. }
  261. __emit_cmd(nfp_prog, op, mode, xfer, reg.areg, reg.breg, size, sync);
  262. }
  263. static void
  264. __emit_br(struct nfp_prog *nfp_prog, enum br_mask mask, enum br_ev_pip ev_pip,
  265. enum br_ctx_signal_state css, u16 addr, u8 defer)
  266. {
  267. u16 addr_lo, addr_hi;
  268. u64 insn;
  269. addr_lo = addr & (OP_BR_ADDR_LO >> __bf_shf(OP_BR_ADDR_LO));
  270. addr_hi = addr != addr_lo;
  271. insn = OP_BR_BASE |
  272. FIELD_PREP(OP_BR_MASK, mask) |
  273. FIELD_PREP(OP_BR_EV_PIP, ev_pip) |
  274. FIELD_PREP(OP_BR_CSS, css) |
  275. FIELD_PREP(OP_BR_DEFBR, defer) |
  276. FIELD_PREP(OP_BR_ADDR_LO, addr_lo) |
  277. FIELD_PREP(OP_BR_ADDR_HI, addr_hi);
  278. nfp_prog_push(nfp_prog, insn);
  279. }
  280. static void emit_br_def(struct nfp_prog *nfp_prog, u16 addr, u8 defer)
  281. {
  282. if (defer > 2) {
  283. pr_err("BUG: branch defer out of bounds %d\n", defer);
  284. nfp_prog->error = -EFAULT;
  285. return;
  286. }
  287. __emit_br(nfp_prog, BR_UNC, BR_EV_PIP_UNCOND, BR_CSS_NONE, addr, defer);
  288. }
  289. static void
  290. emit_br(struct nfp_prog *nfp_prog, enum br_mask mask, u16 addr, u8 defer)
  291. {
  292. __emit_br(nfp_prog, mask,
  293. mask != BR_UNC ? BR_EV_PIP_COND : BR_EV_PIP_UNCOND,
  294. BR_CSS_NONE, addr, defer);
  295. }
  296. static void
  297. __emit_br_byte(struct nfp_prog *nfp_prog, u8 areg, u8 breg, bool imm8,
  298. u8 byte, bool equal, u16 addr, u8 defer)
  299. {
  300. u16 addr_lo, addr_hi;
  301. u64 insn;
  302. addr_lo = addr & (OP_BB_ADDR_LO >> __bf_shf(OP_BB_ADDR_LO));
  303. addr_hi = addr != addr_lo;
  304. insn = OP_BBYTE_BASE |
  305. FIELD_PREP(OP_BB_A_SRC, areg) |
  306. FIELD_PREP(OP_BB_BYTE, byte) |
  307. FIELD_PREP(OP_BB_B_SRC, breg) |
  308. FIELD_PREP(OP_BB_I8, imm8) |
  309. FIELD_PREP(OP_BB_EQ, equal) |
  310. FIELD_PREP(OP_BB_DEFBR, defer) |
  311. FIELD_PREP(OP_BB_ADDR_LO, addr_lo) |
  312. FIELD_PREP(OP_BB_ADDR_HI, addr_hi);
  313. nfp_prog_push(nfp_prog, insn);
  314. }
  315. static void
  316. emit_br_byte_neq(struct nfp_prog *nfp_prog,
  317. u32 dst, u8 imm, u8 byte, u16 addr, u8 defer)
  318. {
  319. struct nfp_insn_re_regs reg;
  320. int err;
  321. err = swreg_to_restricted(reg_none(), dst, reg_imm(imm), &reg, true);
  322. if (err) {
  323. nfp_prog->error = err;
  324. return;
  325. }
  326. __emit_br_byte(nfp_prog, reg.areg, reg.breg, reg.i8, byte, false, addr,
  327. defer);
  328. }
  329. static void
  330. __emit_immed(struct nfp_prog *nfp_prog, u16 areg, u16 breg, u16 imm_hi,
  331. enum immed_width width, bool invert,
  332. enum immed_shift shift, bool wr_both)
  333. {
  334. u64 insn;
  335. insn = OP_IMMED_BASE |
  336. FIELD_PREP(OP_IMMED_A_SRC, areg) |
  337. FIELD_PREP(OP_IMMED_B_SRC, breg) |
  338. FIELD_PREP(OP_IMMED_IMM, imm_hi) |
  339. FIELD_PREP(OP_IMMED_WIDTH, width) |
  340. FIELD_PREP(OP_IMMED_INV, invert) |
  341. FIELD_PREP(OP_IMMED_SHIFT, shift) |
  342. FIELD_PREP(OP_IMMED_WR_AB, wr_both);
  343. nfp_prog_push(nfp_prog, insn);
  344. }
  345. static void
  346. emit_immed(struct nfp_prog *nfp_prog, u32 dst, u16 imm,
  347. enum immed_width width, bool invert, enum immed_shift shift)
  348. {
  349. struct nfp_insn_ur_regs reg;
  350. int err;
  351. if (FIELD_GET(NN_REG_TYPE, dst) == NN_REG_IMM) {
  352. nfp_prog->error = -EFAULT;
  353. return;
  354. }
  355. err = swreg_to_unrestricted(dst, dst, reg_imm(imm & 0xff), &reg);
  356. if (err) {
  357. nfp_prog->error = err;
  358. return;
  359. }
  360. __emit_immed(nfp_prog, reg.areg, reg.breg, imm >> 8, width,
  361. invert, shift, reg.wr_both);
  362. }
  363. static void
  364. __emit_shf(struct nfp_prog *nfp_prog, u16 dst, enum alu_dst_ab dst_ab,
  365. enum shf_sc sc, u8 shift,
  366. u16 areg, enum shf_op op, u16 breg, bool i8, bool sw, bool wr_both)
  367. {
  368. u64 insn;
  369. if (!FIELD_FIT(OP_SHF_SHIFT, shift)) {
  370. nfp_prog->error = -EFAULT;
  371. return;
  372. }
  373. if (sc == SHF_SC_L_SHF)
  374. shift = 32 - shift;
  375. insn = OP_SHF_BASE |
  376. FIELD_PREP(OP_SHF_A_SRC, areg) |
  377. FIELD_PREP(OP_SHF_SC, sc) |
  378. FIELD_PREP(OP_SHF_B_SRC, breg) |
  379. FIELD_PREP(OP_SHF_I8, i8) |
  380. FIELD_PREP(OP_SHF_SW, sw) |
  381. FIELD_PREP(OP_SHF_DST, dst) |
  382. FIELD_PREP(OP_SHF_SHIFT, shift) |
  383. FIELD_PREP(OP_SHF_OP, op) |
  384. FIELD_PREP(OP_SHF_DST_AB, dst_ab) |
  385. FIELD_PREP(OP_SHF_WR_AB, wr_both);
  386. nfp_prog_push(nfp_prog, insn);
  387. }
  388. static void
  389. emit_shf(struct nfp_prog *nfp_prog, u32 dst, u32 lreg, enum shf_op op, u32 rreg,
  390. enum shf_sc sc, u8 shift)
  391. {
  392. struct nfp_insn_re_regs reg;
  393. int err;
  394. err = swreg_to_restricted(dst, lreg, rreg, &reg, true);
  395. if (err) {
  396. nfp_prog->error = err;
  397. return;
  398. }
  399. __emit_shf(nfp_prog, reg.dst, reg.dst_ab, sc, shift,
  400. reg.areg, op, reg.breg, reg.i8, reg.swap, reg.wr_both);
  401. }
  402. static void
  403. __emit_alu(struct nfp_prog *nfp_prog, u16 dst, enum alu_dst_ab dst_ab,
  404. u16 areg, enum alu_op op, u16 breg, bool swap, bool wr_both)
  405. {
  406. u64 insn;
  407. insn = OP_ALU_BASE |
  408. FIELD_PREP(OP_ALU_A_SRC, areg) |
  409. FIELD_PREP(OP_ALU_B_SRC, breg) |
  410. FIELD_PREP(OP_ALU_DST, dst) |
  411. FIELD_PREP(OP_ALU_SW, swap) |
  412. FIELD_PREP(OP_ALU_OP, op) |
  413. FIELD_PREP(OP_ALU_DST_AB, dst_ab) |
  414. FIELD_PREP(OP_ALU_WR_AB, wr_both);
  415. nfp_prog_push(nfp_prog, insn);
  416. }
  417. static void
  418. emit_alu(struct nfp_prog *nfp_prog, u32 dst, u32 lreg, enum alu_op op, u32 rreg)
  419. {
  420. struct nfp_insn_ur_regs reg;
  421. int err;
  422. err = swreg_to_unrestricted(dst, lreg, rreg, &reg);
  423. if (err) {
  424. nfp_prog->error = err;
  425. return;
  426. }
  427. __emit_alu(nfp_prog, reg.dst, reg.dst_ab,
  428. reg.areg, op, reg.breg, reg.swap, reg.wr_both);
  429. }
  430. static void
  431. __emit_ld_field(struct nfp_prog *nfp_prog, enum shf_sc sc,
  432. u8 areg, u8 bmask, u8 breg, u8 shift, bool imm8,
  433. bool zero, bool swap, bool wr_both)
  434. {
  435. u64 insn;
  436. insn = OP_LDF_BASE |
  437. FIELD_PREP(OP_LDF_A_SRC, areg) |
  438. FIELD_PREP(OP_LDF_SC, sc) |
  439. FIELD_PREP(OP_LDF_B_SRC, breg) |
  440. FIELD_PREP(OP_LDF_I8, imm8) |
  441. FIELD_PREP(OP_LDF_SW, swap) |
  442. FIELD_PREP(OP_LDF_ZF, zero) |
  443. FIELD_PREP(OP_LDF_BMASK, bmask) |
  444. FIELD_PREP(OP_LDF_SHF, shift) |
  445. FIELD_PREP(OP_LDF_WR_AB, wr_both);
  446. nfp_prog_push(nfp_prog, insn);
  447. }
  448. static void
  449. emit_ld_field_any(struct nfp_prog *nfp_prog, enum shf_sc sc, u8 shift,
  450. u32 dst, u8 bmask, u32 src, bool zero)
  451. {
  452. struct nfp_insn_re_regs reg;
  453. int err;
  454. err = swreg_to_restricted(reg_none(), dst, src, &reg, true);
  455. if (err) {
  456. nfp_prog->error = err;
  457. return;
  458. }
  459. __emit_ld_field(nfp_prog, sc, reg.areg, bmask, reg.breg, shift,
  460. reg.i8, zero, reg.swap, reg.wr_both);
  461. }
  462. static void
  463. emit_ld_field(struct nfp_prog *nfp_prog, u32 dst, u8 bmask, u32 src,
  464. enum shf_sc sc, u8 shift)
  465. {
  466. emit_ld_field_any(nfp_prog, sc, shift, dst, bmask, src, false);
  467. }
  468. /* --- Wrappers --- */
  469. static bool pack_immed(u32 imm, u16 *val, enum immed_shift *shift)
  470. {
  471. if (!(imm & 0xffff0000)) {
  472. *val = imm;
  473. *shift = IMMED_SHIFT_0B;
  474. } else if (!(imm & 0xff0000ff)) {
  475. *val = imm >> 8;
  476. *shift = IMMED_SHIFT_1B;
  477. } else if (!(imm & 0x0000ffff)) {
  478. *val = imm >> 16;
  479. *shift = IMMED_SHIFT_2B;
  480. } else {
  481. return false;
  482. }
  483. return true;
  484. }
  485. static void wrp_immed(struct nfp_prog *nfp_prog, u32 dst, u32 imm)
  486. {
  487. enum immed_shift shift;
  488. u16 val;
  489. if (pack_immed(imm, &val, &shift)) {
  490. emit_immed(nfp_prog, dst, val, IMMED_WIDTH_ALL, false, shift);
  491. } else if (pack_immed(~imm, &val, &shift)) {
  492. emit_immed(nfp_prog, dst, val, IMMED_WIDTH_ALL, true, shift);
  493. } else {
  494. emit_immed(nfp_prog, dst, imm & 0xffff, IMMED_WIDTH_ALL,
  495. false, IMMED_SHIFT_0B);
  496. emit_immed(nfp_prog, dst, imm >> 16, IMMED_WIDTH_WORD,
  497. false, IMMED_SHIFT_2B);
  498. }
  499. }
  500. /* ur_load_imm_any() - encode immediate or use tmp register (unrestricted)
  501. * If the @imm is small enough encode it directly in operand and return
  502. * otherwise load @imm to a spare register and return its encoding.
  503. */
  504. static u32 ur_load_imm_any(struct nfp_prog *nfp_prog, u32 imm, u32 tmp_reg)
  505. {
  506. if (FIELD_FIT(UR_REG_IMM_MAX, imm))
  507. return reg_imm(imm);
  508. wrp_immed(nfp_prog, tmp_reg, imm);
  509. return tmp_reg;
  510. }
  511. /* re_load_imm_any() - encode immediate or use tmp register (restricted)
  512. * If the @imm is small enough encode it directly in operand and return
  513. * otherwise load @imm to a spare register and return its encoding.
  514. */
  515. static u32 re_load_imm_any(struct nfp_prog *nfp_prog, u32 imm, u32 tmp_reg)
  516. {
  517. if (FIELD_FIT(RE_REG_IMM_MAX, imm))
  518. return reg_imm(imm);
  519. wrp_immed(nfp_prog, tmp_reg, imm);
  520. return tmp_reg;
  521. }
  522. static void
  523. wrp_br_special(struct nfp_prog *nfp_prog, enum br_mask mask,
  524. enum br_special special)
  525. {
  526. emit_br(nfp_prog, mask, 0, 0);
  527. nfp_prog->prog[nfp_prog->prog_len - 1] |=
  528. FIELD_PREP(OP_BR_SPECIAL, special);
  529. }
  530. static void wrp_reg_mov(struct nfp_prog *nfp_prog, u16 dst, u16 src)
  531. {
  532. emit_alu(nfp_prog, reg_both(dst), reg_none(), ALU_OP_NONE, reg_b(src));
  533. }
  534. static int
  535. construct_data_ind_ld(struct nfp_prog *nfp_prog, u16 offset,
  536. u16 src, bool src_valid, u8 size)
  537. {
  538. unsigned int i;
  539. u16 shift, sz;
  540. u32 tmp_reg;
  541. /* We load the value from the address indicated in @offset and then
  542. * shift out the data we don't need. Note: this is big endian!
  543. */
  544. sz = size < 4 ? 4 : size;
  545. shift = size < 4 ? 4 - size : 0;
  546. if (src_valid) {
  547. /* Calculate the true offset (src_reg + imm) */
  548. tmp_reg = ur_load_imm_any(nfp_prog, offset, imm_b(nfp_prog));
  549. emit_alu(nfp_prog, imm_both(nfp_prog),
  550. reg_a(src), ALU_OP_ADD, tmp_reg);
  551. /* Check packet length (size guaranteed to fit b/c it's u8) */
  552. emit_alu(nfp_prog, imm_a(nfp_prog),
  553. imm_a(nfp_prog), ALU_OP_ADD, reg_imm(size));
  554. emit_alu(nfp_prog, reg_none(),
  555. NFP_BPF_ABI_LEN, ALU_OP_SUB, imm_a(nfp_prog));
  556. wrp_br_special(nfp_prog, BR_BLO, OP_BR_GO_ABORT);
  557. /* Load data */
  558. emit_cmd(nfp_prog, CMD_TGT_READ8, CMD_MODE_32b, 0,
  559. pkt_reg(nfp_prog), imm_b(nfp_prog), sz - 1, true);
  560. } else {
  561. /* Check packet length */
  562. tmp_reg = ur_load_imm_any(nfp_prog, offset + size,
  563. imm_a(nfp_prog));
  564. emit_alu(nfp_prog, reg_none(),
  565. NFP_BPF_ABI_LEN, ALU_OP_SUB, tmp_reg);
  566. wrp_br_special(nfp_prog, BR_BLO, OP_BR_GO_ABORT);
  567. /* Load data */
  568. tmp_reg = re_load_imm_any(nfp_prog, offset, imm_b(nfp_prog));
  569. emit_cmd(nfp_prog, CMD_TGT_READ8, CMD_MODE_32b, 0,
  570. pkt_reg(nfp_prog), tmp_reg, sz - 1, true);
  571. }
  572. i = 0;
  573. if (shift)
  574. emit_shf(nfp_prog, reg_both(0), reg_none(), SHF_OP_NONE,
  575. reg_xfer(0), SHF_SC_R_SHF, shift * 8);
  576. else
  577. for (; i * 4 < size; i++)
  578. emit_alu(nfp_prog, reg_both(i),
  579. reg_none(), ALU_OP_NONE, reg_xfer(i));
  580. if (i < 2)
  581. wrp_immed(nfp_prog, reg_both(1), 0);
  582. return 0;
  583. }
  584. static int construct_data_ld(struct nfp_prog *nfp_prog, u16 offset, u8 size)
  585. {
  586. return construct_data_ind_ld(nfp_prog, offset, 0, false, size);
  587. }
  588. static int wrp_set_mark(struct nfp_prog *nfp_prog, u8 src)
  589. {
  590. emit_alu(nfp_prog, NFP_BPF_ABI_MARK,
  591. reg_none(), ALU_OP_NONE, reg_b(src));
  592. emit_alu(nfp_prog, NFP_BPF_ABI_FLAGS,
  593. NFP_BPF_ABI_FLAGS, ALU_OP_OR, reg_imm(NFP_BPF_ABI_FLAG_MARK));
  594. return 0;
  595. }
  596. static void
  597. wrp_alu_imm(struct nfp_prog *nfp_prog, u8 dst, enum alu_op alu_op, u32 imm)
  598. {
  599. u32 tmp_reg;
  600. if (alu_op == ALU_OP_AND) {
  601. if (!imm)
  602. wrp_immed(nfp_prog, reg_both(dst), 0);
  603. if (!imm || !~imm)
  604. return;
  605. }
  606. if (alu_op == ALU_OP_OR) {
  607. if (!~imm)
  608. wrp_immed(nfp_prog, reg_both(dst), ~0U);
  609. if (!imm || !~imm)
  610. return;
  611. }
  612. if (alu_op == ALU_OP_XOR) {
  613. if (!~imm)
  614. emit_alu(nfp_prog, reg_both(dst), reg_none(),
  615. ALU_OP_NEG, reg_b(dst));
  616. if (!imm || !~imm)
  617. return;
  618. }
  619. tmp_reg = ur_load_imm_any(nfp_prog, imm, imm_b(nfp_prog));
  620. emit_alu(nfp_prog, reg_both(dst), reg_a(dst), alu_op, tmp_reg);
  621. }
  622. static int
  623. wrp_alu64_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
  624. enum alu_op alu_op, bool skip)
  625. {
  626. const struct bpf_insn *insn = &meta->insn;
  627. u64 imm = insn->imm; /* sign extend */
  628. if (skip) {
  629. meta->skip = true;
  630. return 0;
  631. }
  632. wrp_alu_imm(nfp_prog, insn->dst_reg * 2, alu_op, imm & ~0U);
  633. wrp_alu_imm(nfp_prog, insn->dst_reg * 2 + 1, alu_op, imm >> 32);
  634. return 0;
  635. }
  636. static int
  637. wrp_alu64_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
  638. enum alu_op alu_op)
  639. {
  640. u8 dst = meta->insn.dst_reg * 2, src = meta->insn.src_reg * 2;
  641. emit_alu(nfp_prog, reg_both(dst), reg_a(dst), alu_op, reg_b(src));
  642. emit_alu(nfp_prog, reg_both(dst + 1),
  643. reg_a(dst + 1), alu_op, reg_b(src + 1));
  644. return 0;
  645. }
  646. static int
  647. wrp_alu32_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
  648. enum alu_op alu_op, bool skip)
  649. {
  650. const struct bpf_insn *insn = &meta->insn;
  651. if (skip) {
  652. meta->skip = true;
  653. return 0;
  654. }
  655. wrp_alu_imm(nfp_prog, insn->dst_reg * 2, alu_op, insn->imm);
  656. wrp_immed(nfp_prog, reg_both(insn->dst_reg * 2 + 1), 0);
  657. return 0;
  658. }
  659. static int
  660. wrp_alu32_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
  661. enum alu_op alu_op)
  662. {
  663. u8 dst = meta->insn.dst_reg * 2, src = meta->insn.src_reg * 2;
  664. emit_alu(nfp_prog, reg_both(dst), reg_a(dst), alu_op, reg_b(src));
  665. wrp_immed(nfp_prog, reg_both(meta->insn.dst_reg * 2 + 1), 0);
  666. return 0;
  667. }
  668. static void
  669. wrp_test_reg_one(struct nfp_prog *nfp_prog, u8 dst, enum alu_op alu_op, u8 src,
  670. enum br_mask br_mask, u16 off)
  671. {
  672. emit_alu(nfp_prog, reg_none(), reg_a(dst), alu_op, reg_b(src));
  673. emit_br(nfp_prog, br_mask, off, 0);
  674. }
  675. static int
  676. wrp_test_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
  677. enum alu_op alu_op, enum br_mask br_mask)
  678. {
  679. const struct bpf_insn *insn = &meta->insn;
  680. if (insn->off < 0) /* TODO */
  681. return -ENOTSUPP;
  682. wrp_test_reg_one(nfp_prog, insn->dst_reg * 2, alu_op,
  683. insn->src_reg * 2, br_mask, insn->off);
  684. wrp_test_reg_one(nfp_prog, insn->dst_reg * 2 + 1, alu_op,
  685. insn->src_reg * 2 + 1, br_mask, insn->off);
  686. return 0;
  687. }
  688. static int
  689. wrp_cmp_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
  690. enum br_mask br_mask, bool swap)
  691. {
  692. const struct bpf_insn *insn = &meta->insn;
  693. u64 imm = insn->imm; /* sign extend */
  694. u8 reg = insn->dst_reg * 2;
  695. u32 tmp_reg;
  696. if (insn->off < 0) /* TODO */
  697. return -ENOTSUPP;
  698. tmp_reg = ur_load_imm_any(nfp_prog, imm & ~0U, imm_b(nfp_prog));
  699. if (!swap)
  700. emit_alu(nfp_prog, reg_none(), reg_a(reg), ALU_OP_SUB, tmp_reg);
  701. else
  702. emit_alu(nfp_prog, reg_none(), tmp_reg, ALU_OP_SUB, reg_a(reg));
  703. tmp_reg = ur_load_imm_any(nfp_prog, imm >> 32, imm_b(nfp_prog));
  704. if (!swap)
  705. emit_alu(nfp_prog, reg_none(),
  706. reg_a(reg + 1), ALU_OP_SUB_C, tmp_reg);
  707. else
  708. emit_alu(nfp_prog, reg_none(),
  709. tmp_reg, ALU_OP_SUB_C, reg_a(reg + 1));
  710. emit_br(nfp_prog, br_mask, insn->off, 0);
  711. return 0;
  712. }
  713. static int
  714. wrp_cmp_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
  715. enum br_mask br_mask, bool swap)
  716. {
  717. const struct bpf_insn *insn = &meta->insn;
  718. u8 areg = insn->src_reg * 2, breg = insn->dst_reg * 2;
  719. if (insn->off < 0) /* TODO */
  720. return -ENOTSUPP;
  721. if (swap) {
  722. areg ^= breg;
  723. breg ^= areg;
  724. areg ^= breg;
  725. }
  726. emit_alu(nfp_prog, reg_none(), reg_a(areg), ALU_OP_SUB, reg_b(breg));
  727. emit_alu(nfp_prog, reg_none(),
  728. reg_a(areg + 1), ALU_OP_SUB_C, reg_b(breg + 1));
  729. emit_br(nfp_prog, br_mask, insn->off, 0);
  730. return 0;
  731. }
  732. /* --- Callbacks --- */
  733. static int mov_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
  734. {
  735. const struct bpf_insn *insn = &meta->insn;
  736. wrp_reg_mov(nfp_prog, insn->dst_reg * 2, insn->src_reg * 2);
  737. wrp_reg_mov(nfp_prog, insn->dst_reg * 2 + 1, insn->src_reg * 2 + 1);
  738. return 0;
  739. }
  740. static int mov_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
  741. {
  742. u64 imm = meta->insn.imm; /* sign extend */
  743. wrp_immed(nfp_prog, reg_both(meta->insn.dst_reg * 2), imm & ~0U);
  744. wrp_immed(nfp_prog, reg_both(meta->insn.dst_reg * 2 + 1), imm >> 32);
  745. return 0;
  746. }
  747. static int xor_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
  748. {
  749. return wrp_alu64_reg(nfp_prog, meta, ALU_OP_XOR);
  750. }
  751. static int xor_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
  752. {
  753. return wrp_alu64_imm(nfp_prog, meta, ALU_OP_XOR, !meta->insn.imm);
  754. }
  755. static int and_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
  756. {
  757. return wrp_alu64_reg(nfp_prog, meta, ALU_OP_AND);
  758. }
  759. static int and_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
  760. {
  761. return wrp_alu64_imm(nfp_prog, meta, ALU_OP_AND, !~meta->insn.imm);
  762. }
  763. static int or_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
  764. {
  765. return wrp_alu64_reg(nfp_prog, meta, ALU_OP_OR);
  766. }
  767. static int or_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
  768. {
  769. return wrp_alu64_imm(nfp_prog, meta, ALU_OP_OR, !meta->insn.imm);
  770. }
  771. static int add_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
  772. {
  773. const struct bpf_insn *insn = &meta->insn;
  774. emit_alu(nfp_prog, reg_both(insn->dst_reg * 2),
  775. reg_a(insn->dst_reg * 2), ALU_OP_ADD,
  776. reg_b(insn->src_reg * 2));
  777. emit_alu(nfp_prog, reg_both(insn->dst_reg * 2 + 1),
  778. reg_a(insn->dst_reg * 2 + 1), ALU_OP_ADD_C,
  779. reg_b(insn->src_reg * 2 + 1));
  780. return 0;
  781. }
  782. static int add_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
  783. {
  784. const struct bpf_insn *insn = &meta->insn;
  785. u64 imm = insn->imm; /* sign extend */
  786. wrp_alu_imm(nfp_prog, insn->dst_reg * 2, ALU_OP_ADD, imm & ~0U);
  787. wrp_alu_imm(nfp_prog, insn->dst_reg * 2 + 1, ALU_OP_ADD_C, imm >> 32);
  788. return 0;
  789. }
  790. static int sub_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
  791. {
  792. const struct bpf_insn *insn = &meta->insn;
  793. emit_alu(nfp_prog, reg_both(insn->dst_reg * 2),
  794. reg_a(insn->dst_reg * 2), ALU_OP_SUB,
  795. reg_b(insn->src_reg * 2));
  796. emit_alu(nfp_prog, reg_both(insn->dst_reg * 2 + 1),
  797. reg_a(insn->dst_reg * 2 + 1), ALU_OP_SUB_C,
  798. reg_b(insn->src_reg * 2 + 1));
  799. return 0;
  800. }
  801. static int sub_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
  802. {
  803. const struct bpf_insn *insn = &meta->insn;
  804. u64 imm = insn->imm; /* sign extend */
  805. wrp_alu_imm(nfp_prog, insn->dst_reg * 2, ALU_OP_SUB, imm & ~0U);
  806. wrp_alu_imm(nfp_prog, insn->dst_reg * 2 + 1, ALU_OP_SUB_C, imm >> 32);
  807. return 0;
  808. }
  809. static int shl_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
  810. {
  811. const struct bpf_insn *insn = &meta->insn;
  812. if (insn->imm != 32)
  813. return 1; /* TODO */
  814. wrp_reg_mov(nfp_prog, insn->dst_reg * 2 + 1, insn->dst_reg * 2);
  815. wrp_immed(nfp_prog, reg_both(insn->dst_reg * 2), 0);
  816. return 0;
  817. }
  818. static int shr_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
  819. {
  820. const struct bpf_insn *insn = &meta->insn;
  821. if (insn->imm != 32)
  822. return 1; /* TODO */
  823. wrp_reg_mov(nfp_prog, insn->dst_reg * 2, insn->dst_reg * 2 + 1);
  824. wrp_immed(nfp_prog, reg_both(insn->dst_reg * 2 + 1), 0);
  825. return 0;
  826. }
  827. static int mov_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
  828. {
  829. const struct bpf_insn *insn = &meta->insn;
  830. wrp_reg_mov(nfp_prog, insn->dst_reg * 2, insn->src_reg * 2);
  831. wrp_immed(nfp_prog, reg_both(insn->dst_reg * 2 + 1), 0);
  832. return 0;
  833. }
  834. static int mov_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
  835. {
  836. const struct bpf_insn *insn = &meta->insn;
  837. wrp_immed(nfp_prog, reg_both(insn->dst_reg * 2), insn->imm);
  838. wrp_immed(nfp_prog, reg_both(insn->dst_reg * 2 + 1), 0);
  839. return 0;
  840. }
  841. static int xor_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
  842. {
  843. return wrp_alu32_reg(nfp_prog, meta, ALU_OP_XOR);
  844. }
  845. static int xor_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
  846. {
  847. return wrp_alu32_imm(nfp_prog, meta, ALU_OP_XOR, !~meta->insn.imm);
  848. }
  849. static int and_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
  850. {
  851. return wrp_alu32_reg(nfp_prog, meta, ALU_OP_AND);
  852. }
  853. static int and_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
  854. {
  855. return wrp_alu32_imm(nfp_prog, meta, ALU_OP_AND, !~meta->insn.imm);
  856. }
  857. static int or_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
  858. {
  859. return wrp_alu32_reg(nfp_prog, meta, ALU_OP_OR);
  860. }
  861. static int or_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
  862. {
  863. return wrp_alu32_imm(nfp_prog, meta, ALU_OP_OR, !meta->insn.imm);
  864. }
  865. static int add_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
  866. {
  867. return wrp_alu32_reg(nfp_prog, meta, ALU_OP_ADD);
  868. }
  869. static int add_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
  870. {
  871. return wrp_alu32_imm(nfp_prog, meta, ALU_OP_ADD, !meta->insn.imm);
  872. }
  873. static int sub_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
  874. {
  875. return wrp_alu32_reg(nfp_prog, meta, ALU_OP_SUB);
  876. }
  877. static int sub_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
  878. {
  879. return wrp_alu32_imm(nfp_prog, meta, ALU_OP_SUB, !meta->insn.imm);
  880. }
  881. static int shl_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
  882. {
  883. const struct bpf_insn *insn = &meta->insn;
  884. if (!insn->imm)
  885. return 1; /* TODO: zero shift means indirect */
  886. emit_shf(nfp_prog, reg_both(insn->dst_reg * 2),
  887. reg_none(), SHF_OP_NONE, reg_b(insn->dst_reg * 2),
  888. SHF_SC_L_SHF, insn->imm);
  889. wrp_immed(nfp_prog, reg_both(insn->dst_reg * 2 + 1), 0);
  890. return 0;
  891. }
  892. static int imm_ld8_part2(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
  893. {
  894. wrp_immed(nfp_prog, reg_both(nfp_meta_prev(meta)->insn.dst_reg * 2 + 1),
  895. meta->insn.imm);
  896. return 0;
  897. }
  898. static int imm_ld8(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
  899. {
  900. const struct bpf_insn *insn = &meta->insn;
  901. meta->double_cb = imm_ld8_part2;
  902. wrp_immed(nfp_prog, reg_both(insn->dst_reg * 2), insn->imm);
  903. return 0;
  904. }
  905. static int data_ld1(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
  906. {
  907. return construct_data_ld(nfp_prog, meta->insn.imm, 1);
  908. }
  909. static int data_ld2(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
  910. {
  911. return construct_data_ld(nfp_prog, meta->insn.imm, 2);
  912. }
  913. static int data_ld4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
  914. {
  915. return construct_data_ld(nfp_prog, meta->insn.imm, 4);
  916. }
  917. static int data_ind_ld1(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
  918. {
  919. return construct_data_ind_ld(nfp_prog, meta->insn.imm,
  920. meta->insn.src_reg * 2, true, 1);
  921. }
  922. static int data_ind_ld2(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
  923. {
  924. return construct_data_ind_ld(nfp_prog, meta->insn.imm,
  925. meta->insn.src_reg * 2, true, 2);
  926. }
  927. static int data_ind_ld4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
  928. {
  929. return construct_data_ind_ld(nfp_prog, meta->insn.imm,
  930. meta->insn.src_reg * 2, true, 4);
  931. }
  932. static int mem_ldx4_skb(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
  933. {
  934. if (meta->insn.off == offsetof(struct sk_buff, len))
  935. emit_alu(nfp_prog, reg_both(meta->insn.dst_reg * 2),
  936. reg_none(), ALU_OP_NONE, NFP_BPF_ABI_LEN);
  937. else
  938. return -ENOTSUPP;
  939. return 0;
  940. }
  941. static int mem_ldx4_xdp(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
  942. {
  943. u32 dst = reg_both(meta->insn.dst_reg * 2);
  944. if (meta->insn.off != offsetof(struct xdp_md, data) &&
  945. meta->insn.off != offsetof(struct xdp_md, data_end))
  946. return -ENOTSUPP;
  947. emit_alu(nfp_prog, dst, reg_none(), ALU_OP_NONE, NFP_BPF_ABI_PKT);
  948. if (meta->insn.off == offsetof(struct xdp_md, data))
  949. return 0;
  950. emit_alu(nfp_prog, dst, dst, ALU_OP_ADD, NFP_BPF_ABI_LEN);
  951. return 0;
  952. }
  953. static int mem_ldx4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
  954. {
  955. int ret;
  956. if (nfp_prog->act == NN_ACT_XDP)
  957. ret = mem_ldx4_xdp(nfp_prog, meta);
  958. else
  959. ret = mem_ldx4_skb(nfp_prog, meta);
  960. wrp_immed(nfp_prog, reg_both(meta->insn.dst_reg * 2 + 1), 0);
  961. return ret;
  962. }
  963. static int mem_stx4_skb(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
  964. {
  965. if (meta->insn.off == offsetof(struct sk_buff, mark))
  966. return wrp_set_mark(nfp_prog, meta->insn.src_reg * 2);
  967. return -ENOTSUPP;
  968. }
  969. static int mem_stx4_xdp(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
  970. {
  971. return -ENOTSUPP;
  972. }
  973. static int mem_stx4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
  974. {
  975. if (nfp_prog->act == NN_ACT_XDP)
  976. return mem_stx4_xdp(nfp_prog, meta);
  977. return mem_stx4_skb(nfp_prog, meta);
  978. }
  979. static int jump(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
  980. {
  981. if (meta->insn.off < 0) /* TODO */
  982. return -ENOTSUPP;
  983. emit_br(nfp_prog, BR_UNC, meta->insn.off, 0);
  984. return 0;
  985. }
  986. static int jeq_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
  987. {
  988. const struct bpf_insn *insn = &meta->insn;
  989. u64 imm = insn->imm; /* sign extend */
  990. u32 or1 = reg_a(insn->dst_reg * 2), or2 = reg_b(insn->dst_reg * 2 + 1);
  991. u32 tmp_reg;
  992. if (insn->off < 0) /* TODO */
  993. return -ENOTSUPP;
  994. if (imm & ~0U) {
  995. tmp_reg = ur_load_imm_any(nfp_prog, imm & ~0U, imm_b(nfp_prog));
  996. emit_alu(nfp_prog, imm_a(nfp_prog),
  997. reg_a(insn->dst_reg * 2), ALU_OP_XOR, tmp_reg);
  998. or1 = imm_a(nfp_prog);
  999. }
  1000. if (imm >> 32) {
  1001. tmp_reg = ur_load_imm_any(nfp_prog, imm >> 32, imm_b(nfp_prog));
  1002. emit_alu(nfp_prog, imm_b(nfp_prog),
  1003. reg_a(insn->dst_reg * 2 + 1), ALU_OP_XOR, tmp_reg);
  1004. or2 = imm_b(nfp_prog);
  1005. }
  1006. emit_alu(nfp_prog, reg_none(), or1, ALU_OP_OR, or2);
  1007. emit_br(nfp_prog, BR_BEQ, insn->off, 0);
  1008. return 0;
  1009. }
  1010. static int jgt_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
  1011. {
  1012. return wrp_cmp_imm(nfp_prog, meta, BR_BLO, false);
  1013. }
  1014. static int jge_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
  1015. {
  1016. return wrp_cmp_imm(nfp_prog, meta, BR_BHS, true);
  1017. }
  1018. static int jset_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
  1019. {
  1020. const struct bpf_insn *insn = &meta->insn;
  1021. u64 imm = insn->imm; /* sign extend */
  1022. u32 tmp_reg;
  1023. if (insn->off < 0) /* TODO */
  1024. return -ENOTSUPP;
  1025. if (!imm) {
  1026. meta->skip = true;
  1027. return 0;
  1028. }
  1029. if (imm & ~0U) {
  1030. tmp_reg = ur_load_imm_any(nfp_prog, imm & ~0U, imm_b(nfp_prog));
  1031. emit_alu(nfp_prog, reg_none(),
  1032. reg_a(insn->dst_reg * 2), ALU_OP_AND, tmp_reg);
  1033. emit_br(nfp_prog, BR_BNE, insn->off, 0);
  1034. }
  1035. if (imm >> 32) {
  1036. tmp_reg = ur_load_imm_any(nfp_prog, imm >> 32, imm_b(nfp_prog));
  1037. emit_alu(nfp_prog, reg_none(),
  1038. reg_a(insn->dst_reg * 2 + 1), ALU_OP_AND, tmp_reg);
  1039. emit_br(nfp_prog, BR_BNE, insn->off, 0);
  1040. }
  1041. return 0;
  1042. }
  1043. static int jne_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
  1044. {
  1045. const struct bpf_insn *insn = &meta->insn;
  1046. u64 imm = insn->imm; /* sign extend */
  1047. u32 tmp_reg;
  1048. if (insn->off < 0) /* TODO */
  1049. return -ENOTSUPP;
  1050. if (!imm) {
  1051. emit_alu(nfp_prog, reg_none(), reg_a(insn->dst_reg * 2),
  1052. ALU_OP_OR, reg_b(insn->dst_reg * 2 + 1));
  1053. emit_br(nfp_prog, BR_BNE, insn->off, 0);
  1054. }
  1055. tmp_reg = ur_load_imm_any(nfp_prog, imm & ~0U, imm_b(nfp_prog));
  1056. emit_alu(nfp_prog, reg_none(),
  1057. reg_a(insn->dst_reg * 2), ALU_OP_XOR, tmp_reg);
  1058. emit_br(nfp_prog, BR_BNE, insn->off, 0);
  1059. tmp_reg = ur_load_imm_any(nfp_prog, imm >> 32, imm_b(nfp_prog));
  1060. emit_alu(nfp_prog, reg_none(),
  1061. reg_a(insn->dst_reg * 2 + 1), ALU_OP_XOR, tmp_reg);
  1062. emit_br(nfp_prog, BR_BNE, insn->off, 0);
  1063. return 0;
  1064. }
  1065. static int jeq_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
  1066. {
  1067. const struct bpf_insn *insn = &meta->insn;
  1068. if (insn->off < 0) /* TODO */
  1069. return -ENOTSUPP;
  1070. emit_alu(nfp_prog, imm_a(nfp_prog), reg_a(insn->dst_reg * 2),
  1071. ALU_OP_XOR, reg_b(insn->src_reg * 2));
  1072. emit_alu(nfp_prog, imm_b(nfp_prog), reg_a(insn->dst_reg * 2 + 1),
  1073. ALU_OP_XOR, reg_b(insn->src_reg * 2 + 1));
  1074. emit_alu(nfp_prog, reg_none(),
  1075. imm_a(nfp_prog), ALU_OP_OR, imm_b(nfp_prog));
  1076. emit_br(nfp_prog, BR_BEQ, insn->off, 0);
  1077. return 0;
  1078. }
  1079. static int jgt_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
  1080. {
  1081. return wrp_cmp_reg(nfp_prog, meta, BR_BLO, false);
  1082. }
  1083. static int jge_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
  1084. {
  1085. return wrp_cmp_reg(nfp_prog, meta, BR_BHS, true);
  1086. }
  1087. static int jset_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
  1088. {
  1089. return wrp_test_reg(nfp_prog, meta, ALU_OP_AND, BR_BNE);
  1090. }
  1091. static int jne_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
  1092. {
  1093. return wrp_test_reg(nfp_prog, meta, ALU_OP_XOR, BR_BNE);
  1094. }
  1095. static int goto_out(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
  1096. {
  1097. wrp_br_special(nfp_prog, BR_UNC, OP_BR_GO_OUT);
  1098. return 0;
  1099. }
  1100. static const instr_cb_t instr_cb[256] = {
  1101. [BPF_ALU64 | BPF_MOV | BPF_X] = mov_reg64,
  1102. [BPF_ALU64 | BPF_MOV | BPF_K] = mov_imm64,
  1103. [BPF_ALU64 | BPF_XOR | BPF_X] = xor_reg64,
  1104. [BPF_ALU64 | BPF_XOR | BPF_K] = xor_imm64,
  1105. [BPF_ALU64 | BPF_AND | BPF_X] = and_reg64,
  1106. [BPF_ALU64 | BPF_AND | BPF_K] = and_imm64,
  1107. [BPF_ALU64 | BPF_OR | BPF_X] = or_reg64,
  1108. [BPF_ALU64 | BPF_OR | BPF_K] = or_imm64,
  1109. [BPF_ALU64 | BPF_ADD | BPF_X] = add_reg64,
  1110. [BPF_ALU64 | BPF_ADD | BPF_K] = add_imm64,
  1111. [BPF_ALU64 | BPF_SUB | BPF_X] = sub_reg64,
  1112. [BPF_ALU64 | BPF_SUB | BPF_K] = sub_imm64,
  1113. [BPF_ALU64 | BPF_LSH | BPF_K] = shl_imm64,
  1114. [BPF_ALU64 | BPF_RSH | BPF_K] = shr_imm64,
  1115. [BPF_ALU | BPF_MOV | BPF_X] = mov_reg,
  1116. [BPF_ALU | BPF_MOV | BPF_K] = mov_imm,
  1117. [BPF_ALU | BPF_XOR | BPF_X] = xor_reg,
  1118. [BPF_ALU | BPF_XOR | BPF_K] = xor_imm,
  1119. [BPF_ALU | BPF_AND | BPF_X] = and_reg,
  1120. [BPF_ALU | BPF_AND | BPF_K] = and_imm,
  1121. [BPF_ALU | BPF_OR | BPF_X] = or_reg,
  1122. [BPF_ALU | BPF_OR | BPF_K] = or_imm,
  1123. [BPF_ALU | BPF_ADD | BPF_X] = add_reg,
  1124. [BPF_ALU | BPF_ADD | BPF_K] = add_imm,
  1125. [BPF_ALU | BPF_SUB | BPF_X] = sub_reg,
  1126. [BPF_ALU | BPF_SUB | BPF_K] = sub_imm,
  1127. [BPF_ALU | BPF_LSH | BPF_K] = shl_imm,
  1128. [BPF_LD | BPF_IMM | BPF_DW] = imm_ld8,
  1129. [BPF_LD | BPF_ABS | BPF_B] = data_ld1,
  1130. [BPF_LD | BPF_ABS | BPF_H] = data_ld2,
  1131. [BPF_LD | BPF_ABS | BPF_W] = data_ld4,
  1132. [BPF_LD | BPF_IND | BPF_B] = data_ind_ld1,
  1133. [BPF_LD | BPF_IND | BPF_H] = data_ind_ld2,
  1134. [BPF_LD | BPF_IND | BPF_W] = data_ind_ld4,
  1135. [BPF_LDX | BPF_MEM | BPF_W] = mem_ldx4,
  1136. [BPF_STX | BPF_MEM | BPF_W] = mem_stx4,
  1137. [BPF_JMP | BPF_JA | BPF_K] = jump,
  1138. [BPF_JMP | BPF_JEQ | BPF_K] = jeq_imm,
  1139. [BPF_JMP | BPF_JGT | BPF_K] = jgt_imm,
  1140. [BPF_JMP | BPF_JGE | BPF_K] = jge_imm,
  1141. [BPF_JMP | BPF_JSET | BPF_K] = jset_imm,
  1142. [BPF_JMP | BPF_JNE | BPF_K] = jne_imm,
  1143. [BPF_JMP | BPF_JEQ | BPF_X] = jeq_reg,
  1144. [BPF_JMP | BPF_JGT | BPF_X] = jgt_reg,
  1145. [BPF_JMP | BPF_JGE | BPF_X] = jge_reg,
  1146. [BPF_JMP | BPF_JSET | BPF_X] = jset_reg,
  1147. [BPF_JMP | BPF_JNE | BPF_X] = jne_reg,
  1148. [BPF_JMP | BPF_EXIT] = goto_out,
  1149. };
  1150. /* --- Misc code --- */
  1151. static void br_set_offset(u64 *instr, u16 offset)
  1152. {
  1153. u16 addr_lo, addr_hi;
  1154. addr_lo = offset & (OP_BR_ADDR_LO >> __bf_shf(OP_BR_ADDR_LO));
  1155. addr_hi = offset != addr_lo;
  1156. *instr &= ~(OP_BR_ADDR_HI | OP_BR_ADDR_LO);
  1157. *instr |= FIELD_PREP(OP_BR_ADDR_HI, addr_hi);
  1158. *instr |= FIELD_PREP(OP_BR_ADDR_LO, addr_lo);
  1159. }
  1160. /* --- Assembler logic --- */
  1161. static int nfp_fixup_branches(struct nfp_prog *nfp_prog)
  1162. {
  1163. struct nfp_insn_meta *meta, *next;
  1164. u32 off, br_idx;
  1165. u32 idx;
  1166. nfp_for_each_insn_walk2(nfp_prog, meta, next) {
  1167. if (meta->skip)
  1168. continue;
  1169. if (BPF_CLASS(meta->insn.code) != BPF_JMP)
  1170. continue;
  1171. br_idx = nfp_prog_offset_to_index(nfp_prog, next->off) - 1;
  1172. if (!nfp_is_br(nfp_prog->prog[br_idx])) {
  1173. pr_err("Fixup found block not ending in branch %d %02x %016llx!!\n",
  1174. br_idx, meta->insn.code, nfp_prog->prog[br_idx]);
  1175. return -ELOOP;
  1176. }
  1177. /* Leave special branches for later */
  1178. if (FIELD_GET(OP_BR_SPECIAL, nfp_prog->prog[br_idx]))
  1179. continue;
  1180. /* Find the target offset in assembler realm */
  1181. off = meta->insn.off;
  1182. if (!off) {
  1183. pr_err("Fixup found zero offset!!\n");
  1184. return -ELOOP;
  1185. }
  1186. while (off && nfp_meta_has_next(nfp_prog, next)) {
  1187. next = nfp_meta_next(next);
  1188. off--;
  1189. }
  1190. if (off) {
  1191. pr_err("Fixup found too large jump!! %d\n", off);
  1192. return -ELOOP;
  1193. }
  1194. if (next->skip) {
  1195. pr_err("Branch landing on removed instruction!!\n");
  1196. return -ELOOP;
  1197. }
  1198. for (idx = nfp_prog_offset_to_index(nfp_prog, meta->off);
  1199. idx <= br_idx; idx++) {
  1200. if (!nfp_is_br(nfp_prog->prog[idx]))
  1201. continue;
  1202. br_set_offset(&nfp_prog->prog[idx], next->off);
  1203. }
  1204. }
  1205. /* Fixup 'goto out's separately, they can be scattered around */
  1206. for (br_idx = 0; br_idx < nfp_prog->prog_len; br_idx++) {
  1207. enum br_special special;
  1208. if ((nfp_prog->prog[br_idx] & OP_BR_BASE_MASK) != OP_BR_BASE)
  1209. continue;
  1210. special = FIELD_GET(OP_BR_SPECIAL, nfp_prog->prog[br_idx]);
  1211. switch (special) {
  1212. case OP_BR_NORMAL:
  1213. break;
  1214. case OP_BR_GO_OUT:
  1215. br_set_offset(&nfp_prog->prog[br_idx],
  1216. nfp_prog->tgt_out);
  1217. break;
  1218. case OP_BR_GO_ABORT:
  1219. br_set_offset(&nfp_prog->prog[br_idx],
  1220. nfp_prog->tgt_abort);
  1221. break;
  1222. }
  1223. nfp_prog->prog[br_idx] &= ~OP_BR_SPECIAL;
  1224. }
  1225. return 0;
  1226. }
  1227. static void nfp_intro(struct nfp_prog *nfp_prog)
  1228. {
  1229. emit_alu(nfp_prog, pkt_reg(nfp_prog),
  1230. reg_none(), ALU_OP_NONE, NFP_BPF_ABI_PKT);
  1231. }
  1232. static void nfp_outro_tc_legacy(struct nfp_prog *nfp_prog)
  1233. {
  1234. const u8 act2code[] = {
  1235. [NN_ACT_TC_DROP] = 0x22,
  1236. [NN_ACT_TC_REDIR] = 0x24
  1237. };
  1238. /* Target for aborts */
  1239. nfp_prog->tgt_abort = nfp_prog_current_offset(nfp_prog);
  1240. wrp_immed(nfp_prog, reg_both(0), 0);
  1241. /* Target for normal exits */
  1242. nfp_prog->tgt_out = nfp_prog_current_offset(nfp_prog);
  1243. /* Legacy TC mode:
  1244. * 0 0x11 -> pass, count as stat0
  1245. * -1 drop 0x22 -> drop, count as stat1
  1246. * redir 0x24 -> redir, count as stat1
  1247. * ife mark 0x21 -> pass, count as stat1
  1248. * ife + tx 0x24 -> redir, count as stat1
  1249. */
  1250. emit_br_byte_neq(nfp_prog, reg_b(0), 0xff, 0, nfp_prog->tgt_done, 2);
  1251. emit_alu(nfp_prog, reg_a(0),
  1252. reg_none(), ALU_OP_NONE, NFP_BPF_ABI_FLAGS);
  1253. emit_ld_field(nfp_prog, reg_a(0), 0xc, reg_imm(0x11), SHF_SC_L_SHF, 16);
  1254. emit_br(nfp_prog, BR_UNC, nfp_prog->tgt_done, 1);
  1255. emit_ld_field(nfp_prog, reg_a(0), 0xc, reg_imm(act2code[nfp_prog->act]),
  1256. SHF_SC_L_SHF, 16);
  1257. }
  1258. static void nfp_outro_tc_da(struct nfp_prog *nfp_prog)
  1259. {
  1260. /* TC direct-action mode:
  1261. * 0,1 ok NOT SUPPORTED[1]
  1262. * 2 drop 0x22 -> drop, count as stat1
  1263. * 4,5 nuke 0x02 -> drop
  1264. * 7 redir 0x44 -> redir, count as stat2
  1265. * * unspec 0x11 -> pass, count as stat0
  1266. *
  1267. * [1] We can't support OK and RECLASSIFY because we can't tell TC
  1268. * the exact decision made. We are forced to support UNSPEC
  1269. * to handle aborts so that's the only one we handle for passing
  1270. * packets up the stack.
  1271. */
  1272. /* Target for aborts */
  1273. nfp_prog->tgt_abort = nfp_prog_current_offset(nfp_prog);
  1274. emit_br_def(nfp_prog, nfp_prog->tgt_done, 2);
  1275. emit_alu(nfp_prog, reg_a(0),
  1276. reg_none(), ALU_OP_NONE, NFP_BPF_ABI_FLAGS);
  1277. emit_ld_field(nfp_prog, reg_a(0), 0xc, reg_imm(0x11), SHF_SC_L_SHF, 16);
  1278. /* Target for normal exits */
  1279. nfp_prog->tgt_out = nfp_prog_current_offset(nfp_prog);
  1280. /* if R0 > 7 jump to abort */
  1281. emit_alu(nfp_prog, reg_none(), reg_imm(7), ALU_OP_SUB, reg_b(0));
  1282. emit_br(nfp_prog, BR_BLO, nfp_prog->tgt_abort, 0);
  1283. emit_alu(nfp_prog, reg_a(0),
  1284. reg_none(), ALU_OP_NONE, NFP_BPF_ABI_FLAGS);
  1285. wrp_immed(nfp_prog, reg_b(2), 0x41221211);
  1286. wrp_immed(nfp_prog, reg_b(3), 0x41001211);
  1287. emit_shf(nfp_prog, reg_a(1),
  1288. reg_none(), SHF_OP_NONE, reg_b(0), SHF_SC_L_SHF, 2);
  1289. emit_alu(nfp_prog, reg_none(), reg_a(1), ALU_OP_OR, reg_imm(0));
  1290. emit_shf(nfp_prog, reg_a(2),
  1291. reg_imm(0xf), SHF_OP_AND, reg_b(2), SHF_SC_R_SHF, 0);
  1292. emit_alu(nfp_prog, reg_none(), reg_a(1), ALU_OP_OR, reg_imm(0));
  1293. emit_shf(nfp_prog, reg_b(2),
  1294. reg_imm(0xf), SHF_OP_AND, reg_b(3), SHF_SC_R_SHF, 0);
  1295. emit_br_def(nfp_prog, nfp_prog->tgt_done, 2);
  1296. emit_shf(nfp_prog, reg_b(2),
  1297. reg_a(2), SHF_OP_OR, reg_b(2), SHF_SC_L_SHF, 4);
  1298. emit_ld_field(nfp_prog, reg_a(0), 0xc, reg_b(2), SHF_SC_L_SHF, 16);
  1299. }
  1300. static void nfp_outro_xdp(struct nfp_prog *nfp_prog)
  1301. {
  1302. /* XDP return codes:
  1303. * 0 aborted 0x82 -> drop, count as stat3
  1304. * 1 drop 0x22 -> drop, count as stat1
  1305. * 2 pass 0x11 -> pass, count as stat0
  1306. * 3 tx 0x44 -> redir, count as stat2
  1307. * * unknown 0x82 -> drop, count as stat3
  1308. */
  1309. /* Target for aborts */
  1310. nfp_prog->tgt_abort = nfp_prog_current_offset(nfp_prog);
  1311. emit_br_def(nfp_prog, nfp_prog->tgt_done, 2);
  1312. emit_alu(nfp_prog, reg_a(0),
  1313. reg_none(), ALU_OP_NONE, NFP_BPF_ABI_FLAGS);
  1314. emit_ld_field(nfp_prog, reg_a(0), 0xc, reg_imm(0x82), SHF_SC_L_SHF, 16);
  1315. /* Target for normal exits */
  1316. nfp_prog->tgt_out = nfp_prog_current_offset(nfp_prog);
  1317. /* if R0 > 3 jump to abort */
  1318. emit_alu(nfp_prog, reg_none(), reg_imm(3), ALU_OP_SUB, reg_b(0));
  1319. emit_br(nfp_prog, BR_BLO, nfp_prog->tgt_abort, 0);
  1320. wrp_immed(nfp_prog, reg_b(2), 0x44112282);
  1321. emit_shf(nfp_prog, reg_a(1),
  1322. reg_none(), SHF_OP_NONE, reg_b(0), SHF_SC_L_SHF, 3);
  1323. emit_alu(nfp_prog, reg_none(), reg_a(1), ALU_OP_OR, reg_imm(0));
  1324. emit_shf(nfp_prog, reg_b(2),
  1325. reg_imm(0xff), SHF_OP_AND, reg_b(2), SHF_SC_R_SHF, 0);
  1326. emit_br_def(nfp_prog, nfp_prog->tgt_done, 2);
  1327. emit_alu(nfp_prog, reg_a(0),
  1328. reg_none(), ALU_OP_NONE, NFP_BPF_ABI_FLAGS);
  1329. emit_ld_field(nfp_prog, reg_a(0), 0xc, reg_b(2), SHF_SC_L_SHF, 16);
  1330. }
  1331. static void nfp_outro(struct nfp_prog *nfp_prog)
  1332. {
  1333. switch (nfp_prog->act) {
  1334. case NN_ACT_DIRECT:
  1335. nfp_outro_tc_da(nfp_prog);
  1336. break;
  1337. case NN_ACT_TC_DROP:
  1338. case NN_ACT_TC_REDIR:
  1339. nfp_outro_tc_legacy(nfp_prog);
  1340. break;
  1341. case NN_ACT_XDP:
  1342. nfp_outro_xdp(nfp_prog);
  1343. break;
  1344. }
  1345. }
  1346. static int nfp_translate(struct nfp_prog *nfp_prog)
  1347. {
  1348. struct nfp_insn_meta *meta;
  1349. int err;
  1350. nfp_intro(nfp_prog);
  1351. if (nfp_prog->error)
  1352. return nfp_prog->error;
  1353. list_for_each_entry(meta, &nfp_prog->insns, l) {
  1354. instr_cb_t cb = instr_cb[meta->insn.code];
  1355. meta->off = nfp_prog_current_offset(nfp_prog);
  1356. if (meta->skip) {
  1357. nfp_prog->n_translated++;
  1358. continue;
  1359. }
  1360. if (nfp_meta_has_prev(nfp_prog, meta) &&
  1361. nfp_meta_prev(meta)->double_cb)
  1362. cb = nfp_meta_prev(meta)->double_cb;
  1363. if (!cb)
  1364. return -ENOENT;
  1365. err = cb(nfp_prog, meta);
  1366. if (err)
  1367. return err;
  1368. nfp_prog->n_translated++;
  1369. }
  1370. nfp_outro(nfp_prog);
  1371. if (nfp_prog->error)
  1372. return nfp_prog->error;
  1373. return nfp_fixup_branches(nfp_prog);
  1374. }
  1375. static int
  1376. nfp_prog_prepare(struct nfp_prog *nfp_prog, const struct bpf_insn *prog,
  1377. unsigned int cnt)
  1378. {
  1379. unsigned int i;
  1380. for (i = 0; i < cnt; i++) {
  1381. struct nfp_insn_meta *meta;
  1382. meta = kzalloc(sizeof(*meta), GFP_KERNEL);
  1383. if (!meta)
  1384. return -ENOMEM;
  1385. meta->insn = prog[i];
  1386. meta->n = i;
  1387. list_add_tail(&meta->l, &nfp_prog->insns);
  1388. }
  1389. return 0;
  1390. }
  1391. /* --- Optimizations --- */
  1392. static void nfp_bpf_opt_reg_init(struct nfp_prog *nfp_prog)
  1393. {
  1394. struct nfp_insn_meta *meta;
  1395. list_for_each_entry(meta, &nfp_prog->insns, l) {
  1396. struct bpf_insn insn = meta->insn;
  1397. /* Programs converted from cBPF start with register xoring */
  1398. if (insn.code == (BPF_ALU64 | BPF_XOR | BPF_X) &&
  1399. insn.src_reg == insn.dst_reg)
  1400. continue;
  1401. /* Programs start with R6 = R1 but we ignore the skb pointer */
  1402. if (insn.code == (BPF_ALU64 | BPF_MOV | BPF_X) &&
  1403. insn.src_reg == 1 && insn.dst_reg == 6)
  1404. meta->skip = true;
  1405. /* Return as soon as something doesn't match */
  1406. if (!meta->skip)
  1407. return;
  1408. }
  1409. }
  1410. /* Try to rename registers so that program uses only low ones */
  1411. static int nfp_bpf_opt_reg_rename(struct nfp_prog *nfp_prog)
  1412. {
  1413. bool reg_used[MAX_BPF_REG] = {};
  1414. u8 tgt_reg[MAX_BPF_REG] = {};
  1415. struct nfp_insn_meta *meta;
  1416. unsigned int i, j;
  1417. list_for_each_entry(meta, &nfp_prog->insns, l) {
  1418. if (meta->skip)
  1419. continue;
  1420. reg_used[meta->insn.src_reg] = true;
  1421. reg_used[meta->insn.dst_reg] = true;
  1422. }
  1423. for (i = 0, j = 0; i < ARRAY_SIZE(tgt_reg); i++) {
  1424. if (!reg_used[i])
  1425. continue;
  1426. tgt_reg[i] = j++;
  1427. }
  1428. nfp_prog->num_regs = j;
  1429. list_for_each_entry(meta, &nfp_prog->insns, l) {
  1430. meta->insn.src_reg = tgt_reg[meta->insn.src_reg];
  1431. meta->insn.dst_reg = tgt_reg[meta->insn.dst_reg];
  1432. }
  1433. return 0;
  1434. }
  1435. /* Remove masking after load since our load guarantees this is not needed */
  1436. static void nfp_bpf_opt_ld_mask(struct nfp_prog *nfp_prog)
  1437. {
  1438. struct nfp_insn_meta *meta1, *meta2;
  1439. const s32 exp_mask[] = {
  1440. [BPF_B] = 0x000000ffU,
  1441. [BPF_H] = 0x0000ffffU,
  1442. [BPF_W] = 0xffffffffU,
  1443. };
  1444. nfp_for_each_insn_walk2(nfp_prog, meta1, meta2) {
  1445. struct bpf_insn insn, next;
  1446. insn = meta1->insn;
  1447. next = meta2->insn;
  1448. if (BPF_CLASS(insn.code) != BPF_LD)
  1449. continue;
  1450. if (BPF_MODE(insn.code) != BPF_ABS &&
  1451. BPF_MODE(insn.code) != BPF_IND)
  1452. continue;
  1453. if (next.code != (BPF_ALU64 | BPF_AND | BPF_K))
  1454. continue;
  1455. if (!exp_mask[BPF_SIZE(insn.code)])
  1456. continue;
  1457. if (exp_mask[BPF_SIZE(insn.code)] != next.imm)
  1458. continue;
  1459. if (next.src_reg || next.dst_reg)
  1460. continue;
  1461. meta2->skip = true;
  1462. }
  1463. }
  1464. static void nfp_bpf_opt_ld_shift(struct nfp_prog *nfp_prog)
  1465. {
  1466. struct nfp_insn_meta *meta1, *meta2, *meta3;
  1467. nfp_for_each_insn_walk3(nfp_prog, meta1, meta2, meta3) {
  1468. struct bpf_insn insn, next1, next2;
  1469. insn = meta1->insn;
  1470. next1 = meta2->insn;
  1471. next2 = meta3->insn;
  1472. if (BPF_CLASS(insn.code) != BPF_LD)
  1473. continue;
  1474. if (BPF_MODE(insn.code) != BPF_ABS &&
  1475. BPF_MODE(insn.code) != BPF_IND)
  1476. continue;
  1477. if (BPF_SIZE(insn.code) != BPF_W)
  1478. continue;
  1479. if (!(next1.code == (BPF_LSH | BPF_K | BPF_ALU64) &&
  1480. next2.code == (BPF_RSH | BPF_K | BPF_ALU64)) &&
  1481. !(next1.code == (BPF_RSH | BPF_K | BPF_ALU64) &&
  1482. next2.code == (BPF_LSH | BPF_K | BPF_ALU64)))
  1483. continue;
  1484. if (next1.src_reg || next1.dst_reg ||
  1485. next2.src_reg || next2.dst_reg)
  1486. continue;
  1487. if (next1.imm != 0x20 || next2.imm != 0x20)
  1488. continue;
  1489. meta2->skip = true;
  1490. meta3->skip = true;
  1491. }
  1492. }
  1493. static int nfp_bpf_optimize(struct nfp_prog *nfp_prog)
  1494. {
  1495. int ret;
  1496. nfp_bpf_opt_reg_init(nfp_prog);
  1497. ret = nfp_bpf_opt_reg_rename(nfp_prog);
  1498. if (ret)
  1499. return ret;
  1500. nfp_bpf_opt_ld_mask(nfp_prog);
  1501. nfp_bpf_opt_ld_shift(nfp_prog);
  1502. return 0;
  1503. }
  1504. /**
  1505. * nfp_bpf_jit() - translate BPF code into NFP assembly
  1506. * @filter: kernel BPF filter struct
  1507. * @prog_mem: memory to store assembler instructions
  1508. * @act: action attached to this eBPF program
  1509. * @prog_start: offset of the first instruction when loaded
  1510. * @prog_done: where to jump on exit
  1511. * @prog_sz: size of @prog_mem in instructions
  1512. * @res: achieved parameters of translation results
  1513. */
  1514. int
  1515. nfp_bpf_jit(struct bpf_prog *filter, void *prog_mem,
  1516. enum nfp_bpf_action_type act,
  1517. unsigned int prog_start, unsigned int prog_done,
  1518. unsigned int prog_sz, struct nfp_bpf_result *res)
  1519. {
  1520. struct nfp_prog *nfp_prog;
  1521. int ret;
  1522. nfp_prog = kzalloc(sizeof(*nfp_prog), GFP_KERNEL);
  1523. if (!nfp_prog)
  1524. return -ENOMEM;
  1525. INIT_LIST_HEAD(&nfp_prog->insns);
  1526. nfp_prog->act = act;
  1527. nfp_prog->start_off = prog_start;
  1528. nfp_prog->tgt_done = prog_done;
  1529. ret = nfp_prog_prepare(nfp_prog, filter->insnsi, filter->len);
  1530. if (ret)
  1531. goto out;
  1532. ret = nfp_prog_verify(nfp_prog, filter);
  1533. if (ret)
  1534. goto out;
  1535. ret = nfp_bpf_optimize(nfp_prog);
  1536. if (ret)
  1537. goto out;
  1538. if (nfp_prog->num_regs <= 7)
  1539. nfp_prog->regs_per_thread = 16;
  1540. else
  1541. nfp_prog->regs_per_thread = 32;
  1542. nfp_prog->prog = prog_mem;
  1543. nfp_prog->__prog_alloc_len = prog_sz;
  1544. ret = nfp_translate(nfp_prog);
  1545. if (ret) {
  1546. pr_err("Translation failed with error %d (translated: %u)\n",
  1547. ret, nfp_prog->n_translated);
  1548. ret = -EINVAL;
  1549. }
  1550. res->n_instr = nfp_prog->prog_len;
  1551. res->dense_mode = nfp_prog->num_regs <= 7;
  1552. out:
  1553. nfp_prog_free(nfp_prog);
  1554. return ret;
  1555. }