verifier.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477
  1. /*
  2. * Copyright (C) 2016-2017 Netronome Systems, Inc.
  3. *
  4. * This software is dual licensed under the GNU General License Version 2,
  5. * June 1991 as shown in the file COPYING in the top-level directory of this
  6. * source tree or the BSD 2-Clause License provided below. You have the
  7. * option to license this software under the complete terms of either license.
  8. *
  9. * The BSD 2-Clause License:
  10. *
  11. * Redistribution and use in source and binary forms, with or
  12. * without modification, are permitted provided that the following
  13. * conditions are met:
  14. *
  15. * 1. Redistributions of source code must retain the above
  16. * copyright notice, this list of conditions and the following
  17. * disclaimer.
  18. *
  19. * 2. Redistributions in binary form must reproduce the above
  20. * copyright notice, this list of conditions and the following
  21. * disclaimer in the documentation and/or other materials
  22. * provided with the distribution.
  23. *
  24. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  25. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  26. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  27. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  28. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  29. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  30. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  31. * SOFTWARE.
  32. */
  33. #include <linux/bpf.h>
  34. #include <linux/bpf_verifier.h>
  35. #include <linux/kernel.h>
  36. #include <linux/pkt_cls.h>
  37. #include "fw.h"
  38. #include "main.h"
  39. #define pr_vlog(env, fmt, ...) \
  40. bpf_verifier_log_write(env, "[nfp] " fmt, ##__VA_ARGS__)
  41. struct nfp_insn_meta *
  42. nfp_bpf_goto_meta(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
  43. unsigned int insn_idx, unsigned int n_insns)
  44. {
  45. unsigned int forward, backward, i;
  46. backward = meta->n - insn_idx;
  47. forward = insn_idx - meta->n;
  48. if (min(forward, backward) > n_insns - insn_idx - 1) {
  49. backward = n_insns - insn_idx - 1;
  50. meta = nfp_prog_last_meta(nfp_prog);
  51. }
  52. if (min(forward, backward) > insn_idx && backward > insn_idx) {
  53. forward = insn_idx;
  54. meta = nfp_prog_first_meta(nfp_prog);
  55. }
  56. if (forward < backward)
  57. for (i = 0; i < forward; i++)
  58. meta = nfp_meta_next(meta);
  59. else
  60. for (i = 0; i < backward; i++)
  61. meta = nfp_meta_prev(meta);
  62. return meta;
  63. }
  64. static void
  65. nfp_record_adjust_head(struct nfp_app_bpf *bpf, struct nfp_prog *nfp_prog,
  66. struct nfp_insn_meta *meta,
  67. const struct bpf_reg_state *reg2)
  68. {
  69. unsigned int location = UINT_MAX;
  70. int imm;
  71. /* Datapath usually can give us guarantees on how much adjust head
  72. * can be done without the need for any checks. Optimize the simple
  73. * case where there is only one adjust head by a constant.
  74. */
  75. if (reg2->type != SCALAR_VALUE || !tnum_is_const(reg2->var_off))
  76. goto exit_set_location;
  77. imm = reg2->var_off.value;
  78. /* Translator will skip all checks, we need to guarantee min pkt len */
  79. if (imm > ETH_ZLEN - ETH_HLEN)
  80. goto exit_set_location;
  81. if (imm > (int)bpf->adjust_head.guaranteed_add ||
  82. imm < -bpf->adjust_head.guaranteed_sub)
  83. goto exit_set_location;
  84. if (nfp_prog->adjust_head_location) {
  85. /* Only one call per program allowed */
  86. if (nfp_prog->adjust_head_location != meta->n)
  87. goto exit_set_location;
  88. if (meta->arg2.reg.var_off.value != imm)
  89. goto exit_set_location;
  90. }
  91. location = meta->n;
  92. exit_set_location:
  93. nfp_prog->adjust_head_location = location;
  94. }
  95. static int
  96. nfp_bpf_stack_arg_ok(const char *fname, struct bpf_verifier_env *env,
  97. const struct bpf_reg_state *reg,
  98. struct nfp_bpf_reg_state *old_arg)
  99. {
  100. s64 off, old_off;
  101. if (reg->type != PTR_TO_STACK) {
  102. pr_vlog(env, "%s: unsupported ptr type %d\n",
  103. fname, reg->type);
  104. return false;
  105. }
  106. if (!tnum_is_const(reg->var_off)) {
  107. pr_vlog(env, "%s: variable pointer\n", fname);
  108. return false;
  109. }
  110. off = reg->var_off.value + reg->off;
  111. if (-off % 4) {
  112. pr_vlog(env, "%s: unaligned stack pointer %lld\n", fname, -off);
  113. return false;
  114. }
  115. /* Rest of the checks is only if we re-parse the same insn */
  116. if (!old_arg)
  117. return true;
  118. old_off = old_arg->reg.var_off.value + old_arg->reg.off;
  119. old_arg->var_off |= off != old_off;
  120. return true;
  121. }
  122. static bool
  123. nfp_bpf_map_call_ok(const char *fname, struct bpf_verifier_env *env,
  124. struct nfp_insn_meta *meta,
  125. u32 helper_tgt, const struct bpf_reg_state *reg1)
  126. {
  127. if (!helper_tgt) {
  128. pr_vlog(env, "%s: not supported by FW\n", fname);
  129. return false;
  130. }
  131. /* Rest of the checks is only if we re-parse the same insn */
  132. if (!meta->func_id)
  133. return true;
  134. if (meta->arg1.map_ptr != reg1->map_ptr) {
  135. pr_vlog(env, "%s: called for different map\n", fname);
  136. return false;
  137. }
  138. return true;
  139. }
  140. static int
  141. nfp_bpf_check_call(struct nfp_prog *nfp_prog, struct bpf_verifier_env *env,
  142. struct nfp_insn_meta *meta)
  143. {
  144. const struct bpf_reg_state *reg1 = cur_regs(env) + BPF_REG_1;
  145. const struct bpf_reg_state *reg2 = cur_regs(env) + BPF_REG_2;
  146. const struct bpf_reg_state *reg3 = cur_regs(env) + BPF_REG_3;
  147. struct nfp_app_bpf *bpf = nfp_prog->bpf;
  148. u32 func_id = meta->insn.imm;
  149. switch (func_id) {
  150. case BPF_FUNC_xdp_adjust_head:
  151. if (!bpf->adjust_head.off_max) {
  152. pr_vlog(env, "adjust_head not supported by FW\n");
  153. return -EOPNOTSUPP;
  154. }
  155. if (!(bpf->adjust_head.flags & NFP_BPF_ADJUST_HEAD_NO_META)) {
  156. pr_vlog(env, "adjust_head: FW requires shifting metadata, not supported by the driver\n");
  157. return -EOPNOTSUPP;
  158. }
  159. nfp_record_adjust_head(bpf, nfp_prog, meta, reg2);
  160. break;
  161. case BPF_FUNC_map_lookup_elem:
  162. if (!nfp_bpf_map_call_ok("map_lookup", env, meta,
  163. bpf->helpers.map_lookup, reg1) ||
  164. !nfp_bpf_stack_arg_ok("map_lookup", env, reg2,
  165. meta->func_id ? &meta->arg2 : NULL))
  166. return -EOPNOTSUPP;
  167. break;
  168. case BPF_FUNC_map_update_elem:
  169. if (!nfp_bpf_map_call_ok("map_update", env, meta,
  170. bpf->helpers.map_update, reg1) ||
  171. !nfp_bpf_stack_arg_ok("map_update", env, reg2,
  172. meta->func_id ? &meta->arg2 : NULL) ||
  173. !nfp_bpf_stack_arg_ok("map_update", env, reg3, NULL))
  174. return -EOPNOTSUPP;
  175. break;
  176. case BPF_FUNC_map_delete_elem:
  177. if (!nfp_bpf_map_call_ok("map_delete", env, meta,
  178. bpf->helpers.map_delete, reg1) ||
  179. !nfp_bpf_stack_arg_ok("map_delete", env, reg2,
  180. meta->func_id ? &meta->arg2 : NULL))
  181. return -EOPNOTSUPP;
  182. break;
  183. case BPF_FUNC_get_prandom_u32:
  184. if (bpf->pseudo_random)
  185. break;
  186. pr_vlog(env, "bpf_get_prandom_u32(): FW doesn't support random number generation\n");
  187. return -EOPNOTSUPP;
  188. default:
  189. pr_vlog(env, "unsupported function id: %d\n", func_id);
  190. return -EOPNOTSUPP;
  191. }
  192. meta->func_id = func_id;
  193. meta->arg1 = *reg1;
  194. meta->arg2.reg = *reg2;
  195. return 0;
  196. }
  197. static int
  198. nfp_bpf_check_exit(struct nfp_prog *nfp_prog,
  199. struct bpf_verifier_env *env)
  200. {
  201. const struct bpf_reg_state *reg0 = cur_regs(env) + BPF_REG_0;
  202. u64 imm;
  203. if (nfp_prog->type == BPF_PROG_TYPE_XDP)
  204. return 0;
  205. if (!(reg0->type == SCALAR_VALUE && tnum_is_const(reg0->var_off))) {
  206. char tn_buf[48];
  207. tnum_strn(tn_buf, sizeof(tn_buf), reg0->var_off);
  208. pr_vlog(env, "unsupported exit state: %d, var_off: %s\n",
  209. reg0->type, tn_buf);
  210. return -EINVAL;
  211. }
  212. imm = reg0->var_off.value;
  213. if (nfp_prog->type == BPF_PROG_TYPE_SCHED_CLS &&
  214. imm <= TC_ACT_REDIRECT &&
  215. imm != TC_ACT_SHOT && imm != TC_ACT_STOLEN &&
  216. imm != TC_ACT_QUEUED) {
  217. pr_vlog(env, "unsupported exit state: %d, imm: %llx\n",
  218. reg0->type, imm);
  219. return -EINVAL;
  220. }
  221. return 0;
  222. }
  223. static int
  224. nfp_bpf_check_stack_access(struct nfp_prog *nfp_prog,
  225. struct nfp_insn_meta *meta,
  226. const struct bpf_reg_state *reg,
  227. struct bpf_verifier_env *env)
  228. {
  229. s32 old_off, new_off;
  230. if (!tnum_is_const(reg->var_off)) {
  231. pr_vlog(env, "variable ptr stack access\n");
  232. return -EINVAL;
  233. }
  234. if (meta->ptr.type == NOT_INIT)
  235. return 0;
  236. old_off = meta->ptr.off + meta->ptr.var_off.value;
  237. new_off = reg->off + reg->var_off.value;
  238. meta->ptr_not_const |= old_off != new_off;
  239. if (!meta->ptr_not_const)
  240. return 0;
  241. if (old_off % 4 == new_off % 4)
  242. return 0;
  243. pr_vlog(env, "stack access changed location was:%d is:%d\n",
  244. old_off, new_off);
  245. return -EINVAL;
  246. }
  247. static const char *nfp_bpf_map_use_name(enum nfp_bpf_map_use use)
  248. {
  249. static const char * const names[] = {
  250. [NFP_MAP_UNUSED] = "unused",
  251. [NFP_MAP_USE_READ] = "read",
  252. [NFP_MAP_USE_WRITE] = "write",
  253. [NFP_MAP_USE_ATOMIC_CNT] = "atomic",
  254. };
  255. if (use >= ARRAY_SIZE(names) || !names[use])
  256. return "unknown";
  257. return names[use];
  258. }
  259. static int
  260. nfp_bpf_map_mark_used_one(struct bpf_verifier_env *env,
  261. struct nfp_bpf_map *nfp_map,
  262. unsigned int off, enum nfp_bpf_map_use use)
  263. {
  264. if (nfp_map->use_map[off / 4] != NFP_MAP_UNUSED &&
  265. nfp_map->use_map[off / 4] != use) {
  266. pr_vlog(env, "map value use type conflict %s vs %s off: %u\n",
  267. nfp_bpf_map_use_name(nfp_map->use_map[off / 4]),
  268. nfp_bpf_map_use_name(use), off);
  269. return -EOPNOTSUPP;
  270. }
  271. nfp_map->use_map[off / 4] = use;
  272. return 0;
  273. }
  274. static int
  275. nfp_bpf_map_mark_used(struct bpf_verifier_env *env, struct nfp_insn_meta *meta,
  276. const struct bpf_reg_state *reg,
  277. enum nfp_bpf_map_use use)
  278. {
  279. struct bpf_offloaded_map *offmap;
  280. struct nfp_bpf_map *nfp_map;
  281. unsigned int size, off;
  282. int i, err;
  283. if (!tnum_is_const(reg->var_off)) {
  284. pr_vlog(env, "map value offset is variable\n");
  285. return -EOPNOTSUPP;
  286. }
  287. off = reg->var_off.value + meta->insn.off + reg->off;
  288. size = BPF_LDST_BYTES(&meta->insn);
  289. offmap = map_to_offmap(reg->map_ptr);
  290. nfp_map = offmap->dev_priv;
  291. if (off + size > offmap->map.value_size) {
  292. pr_vlog(env, "map value access out-of-bounds\n");
  293. return -EINVAL;
  294. }
  295. for (i = 0; i < size; i += 4 - (off + i) % 4) {
  296. err = nfp_bpf_map_mark_used_one(env, nfp_map, off + i, use);
  297. if (err)
  298. return err;
  299. }
  300. return 0;
  301. }
  302. static int
  303. nfp_bpf_check_ptr(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
  304. struct bpf_verifier_env *env, u8 reg_no)
  305. {
  306. const struct bpf_reg_state *reg = cur_regs(env) + reg_no;
  307. int err;
  308. if (reg->type != PTR_TO_CTX &&
  309. reg->type != PTR_TO_STACK &&
  310. reg->type != PTR_TO_MAP_VALUE &&
  311. reg->type != PTR_TO_PACKET) {
  312. pr_vlog(env, "unsupported ptr type: %d\n", reg->type);
  313. return -EINVAL;
  314. }
  315. if (reg->type == PTR_TO_STACK) {
  316. err = nfp_bpf_check_stack_access(nfp_prog, meta, reg, env);
  317. if (err)
  318. return err;
  319. }
  320. if (reg->type == PTR_TO_MAP_VALUE) {
  321. if (is_mbpf_load(meta)) {
  322. err = nfp_bpf_map_mark_used(env, meta, reg,
  323. NFP_MAP_USE_READ);
  324. if (err)
  325. return err;
  326. }
  327. if (is_mbpf_store(meta)) {
  328. pr_vlog(env, "map writes not supported\n");
  329. return -EOPNOTSUPP;
  330. }
  331. if (is_mbpf_xadd(meta)) {
  332. err = nfp_bpf_map_mark_used(env, meta, reg,
  333. NFP_MAP_USE_ATOMIC_CNT);
  334. if (err)
  335. return err;
  336. }
  337. }
  338. if (meta->ptr.type != NOT_INIT && meta->ptr.type != reg->type) {
  339. pr_vlog(env, "ptr type changed for instruction %d -> %d\n",
  340. meta->ptr.type, reg->type);
  341. return -EINVAL;
  342. }
  343. meta->ptr = *reg;
  344. return 0;
  345. }
  346. static int
  347. nfp_bpf_check_xadd(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
  348. struct bpf_verifier_env *env)
  349. {
  350. const struct bpf_reg_state *sreg = cur_regs(env) + meta->insn.src_reg;
  351. const struct bpf_reg_state *dreg = cur_regs(env) + meta->insn.dst_reg;
  352. if (dreg->type != PTR_TO_MAP_VALUE) {
  353. pr_vlog(env, "atomic add not to a map value pointer: %d\n",
  354. dreg->type);
  355. return -EOPNOTSUPP;
  356. }
  357. if (sreg->type != SCALAR_VALUE) {
  358. pr_vlog(env, "atomic add not of a scalar: %d\n", sreg->type);
  359. return -EOPNOTSUPP;
  360. }
  361. meta->xadd_over_16bit |=
  362. sreg->var_off.value > 0xffff || sreg->var_off.mask > 0xffff;
  363. meta->xadd_maybe_16bit |=
  364. (sreg->var_off.value & ~sreg->var_off.mask) <= 0xffff;
  365. return nfp_bpf_check_ptr(nfp_prog, meta, env, meta->insn.dst_reg);
  366. }
  367. static int
  368. nfp_verify_insn(struct bpf_verifier_env *env, int insn_idx, int prev_insn_idx)
  369. {
  370. struct nfp_prog *nfp_prog = env->prog->aux->offload->dev_priv;
  371. struct nfp_insn_meta *meta = nfp_prog->verifier_meta;
  372. meta = nfp_bpf_goto_meta(nfp_prog, meta, insn_idx, env->prog->len);
  373. nfp_prog->verifier_meta = meta;
  374. if (!nfp_bpf_supported_opcode(meta->insn.code)) {
  375. pr_vlog(env, "instruction %#02x not supported\n",
  376. meta->insn.code);
  377. return -EINVAL;
  378. }
  379. if (meta->insn.src_reg >= MAX_BPF_REG ||
  380. meta->insn.dst_reg >= MAX_BPF_REG) {
  381. pr_vlog(env, "program uses extended registers - jit hardening?\n");
  382. return -EINVAL;
  383. }
  384. if (meta->insn.code == (BPF_JMP | BPF_CALL))
  385. return nfp_bpf_check_call(nfp_prog, env, meta);
  386. if (meta->insn.code == (BPF_JMP | BPF_EXIT))
  387. return nfp_bpf_check_exit(nfp_prog, env);
  388. if (is_mbpf_load(meta))
  389. return nfp_bpf_check_ptr(nfp_prog, meta, env,
  390. meta->insn.src_reg);
  391. if (is_mbpf_store(meta))
  392. return nfp_bpf_check_ptr(nfp_prog, meta, env,
  393. meta->insn.dst_reg);
  394. if (is_mbpf_xadd(meta))
  395. return nfp_bpf_check_xadd(nfp_prog, meta, env);
  396. return 0;
  397. }
  398. const struct bpf_prog_offload_ops nfp_bpf_analyzer_ops = {
  399. .insn_hook = nfp_verify_insn,
  400. };