cls_bpf.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680
  1. /*
  2. * Berkeley Packet Filter based traffic classifier
  3. *
  4. * Might be used to classify traffic through flexible, user-defined and
  5. * possibly JIT-ed BPF filters for traffic control as an alternative to
  6. * ematches.
  7. *
  8. * (C) 2013 Daniel Borkmann <dborkman@redhat.com>
  9. *
  10. * This program is free software; you can redistribute it and/or modify
  11. * it under the terms of the GNU General Public License version 2 as
  12. * published by the Free Software Foundation.
  13. */
  14. #include <linux/module.h>
  15. #include <linux/types.h>
  16. #include <linux/skbuff.h>
  17. #include <linux/filter.h>
  18. #include <linux/bpf.h>
  19. #include <linux/idr.h>
  20. #include <net/rtnetlink.h>
  21. #include <net/pkt_cls.h>
  22. #include <net/sock.h>
  23. MODULE_LICENSE("GPL");
  24. MODULE_AUTHOR("Daniel Borkmann <dborkman@redhat.com>");
  25. MODULE_DESCRIPTION("TC BPF based classifier");
  26. #define CLS_BPF_NAME_LEN 256
  27. #define CLS_BPF_SUPPORTED_GEN_FLAGS \
  28. (TCA_CLS_FLAGS_SKIP_HW | TCA_CLS_FLAGS_SKIP_SW)
  29. struct cls_bpf_head {
  30. struct list_head plist;
  31. struct idr handle_idr;
  32. struct rcu_head rcu;
  33. };
  34. struct cls_bpf_prog {
  35. struct bpf_prog *filter;
  36. struct list_head link;
  37. struct tcf_result res;
  38. bool exts_integrated;
  39. u32 gen_flags;
  40. struct tcf_exts exts;
  41. u32 handle;
  42. u16 bpf_num_ops;
  43. struct sock_filter *bpf_ops;
  44. const char *bpf_name;
  45. struct tcf_proto *tp;
  46. struct rcu_work rwork;
  47. };
  48. static const struct nla_policy bpf_policy[TCA_BPF_MAX + 1] = {
  49. [TCA_BPF_CLASSID] = { .type = NLA_U32 },
  50. [TCA_BPF_FLAGS] = { .type = NLA_U32 },
  51. [TCA_BPF_FLAGS_GEN] = { .type = NLA_U32 },
  52. [TCA_BPF_FD] = { .type = NLA_U32 },
  53. [TCA_BPF_NAME] = { .type = NLA_NUL_STRING,
  54. .len = CLS_BPF_NAME_LEN },
  55. [TCA_BPF_OPS_LEN] = { .type = NLA_U16 },
  56. [TCA_BPF_OPS] = { .type = NLA_BINARY,
  57. .len = sizeof(struct sock_filter) * BPF_MAXINSNS },
  58. };
  59. static int cls_bpf_exec_opcode(int code)
  60. {
  61. switch (code) {
  62. case TC_ACT_OK:
  63. case TC_ACT_SHOT:
  64. case TC_ACT_STOLEN:
  65. case TC_ACT_TRAP:
  66. case TC_ACT_REDIRECT:
  67. case TC_ACT_UNSPEC:
  68. return code;
  69. default:
  70. return TC_ACT_UNSPEC;
  71. }
  72. }
  73. static int cls_bpf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
  74. struct tcf_result *res)
  75. {
  76. struct cls_bpf_head *head = rcu_dereference_bh(tp->root);
  77. bool at_ingress = skb_at_tc_ingress(skb);
  78. struct cls_bpf_prog *prog;
  79. int ret = -1;
  80. /* Needed here for accessing maps. */
  81. rcu_read_lock();
  82. list_for_each_entry_rcu(prog, &head->plist, link) {
  83. int filter_res;
  84. qdisc_skb_cb(skb)->tc_classid = prog->res.classid;
  85. if (tc_skip_sw(prog->gen_flags)) {
  86. filter_res = prog->exts_integrated ? TC_ACT_UNSPEC : 0;
  87. } else if (at_ingress) {
  88. /* It is safe to push/pull even if skb_shared() */
  89. __skb_push(skb, skb->mac_len);
  90. bpf_compute_data_pointers(skb);
  91. filter_res = BPF_PROG_RUN(prog->filter, skb);
  92. __skb_pull(skb, skb->mac_len);
  93. } else {
  94. bpf_compute_data_pointers(skb);
  95. filter_res = BPF_PROG_RUN(prog->filter, skb);
  96. }
  97. if (prog->exts_integrated) {
  98. res->class = 0;
  99. res->classid = TC_H_MAJ(prog->res.classid) |
  100. qdisc_skb_cb(skb)->tc_classid;
  101. ret = cls_bpf_exec_opcode(filter_res);
  102. if (ret == TC_ACT_UNSPEC)
  103. continue;
  104. break;
  105. }
  106. if (filter_res == 0)
  107. continue;
  108. if (filter_res != -1) {
  109. res->class = 0;
  110. res->classid = filter_res;
  111. } else {
  112. *res = prog->res;
  113. }
  114. ret = tcf_exts_exec(skb, &prog->exts, res);
  115. if (ret < 0)
  116. continue;
  117. break;
  118. }
  119. rcu_read_unlock();
  120. return ret;
  121. }
  122. static bool cls_bpf_is_ebpf(const struct cls_bpf_prog *prog)
  123. {
  124. return !prog->bpf_ops;
  125. }
  126. static int cls_bpf_offload_cmd(struct tcf_proto *tp, struct cls_bpf_prog *prog,
  127. struct cls_bpf_prog *oldprog,
  128. struct netlink_ext_ack *extack)
  129. {
  130. struct tcf_block *block = tp->chain->block;
  131. struct tc_cls_bpf_offload cls_bpf = {};
  132. struct cls_bpf_prog *obj;
  133. bool skip_sw;
  134. int err;
  135. skip_sw = prog && tc_skip_sw(prog->gen_flags);
  136. obj = prog ?: oldprog;
  137. tc_cls_common_offload_init(&cls_bpf.common, tp, obj->gen_flags,
  138. extack);
  139. cls_bpf.command = TC_CLSBPF_OFFLOAD;
  140. cls_bpf.exts = &obj->exts;
  141. cls_bpf.prog = prog ? prog->filter : NULL;
  142. cls_bpf.oldprog = oldprog ? oldprog->filter : NULL;
  143. cls_bpf.name = obj->bpf_name;
  144. cls_bpf.exts_integrated = obj->exts_integrated;
  145. if (oldprog)
  146. tcf_block_offload_dec(block, &oldprog->gen_flags);
  147. err = tc_setup_cb_call(block, NULL, TC_SETUP_CLSBPF, &cls_bpf, skip_sw);
  148. if (prog) {
  149. if (err < 0) {
  150. cls_bpf_offload_cmd(tp, oldprog, prog, extack);
  151. return err;
  152. } else if (err > 0) {
  153. tcf_block_offload_inc(block, &prog->gen_flags);
  154. }
  155. }
  156. if (prog && skip_sw && !(prog->gen_flags & TCA_CLS_FLAGS_IN_HW))
  157. return -EINVAL;
  158. return 0;
  159. }
  160. static u32 cls_bpf_flags(u32 flags)
  161. {
  162. return flags & CLS_BPF_SUPPORTED_GEN_FLAGS;
  163. }
  164. static int cls_bpf_offload(struct tcf_proto *tp, struct cls_bpf_prog *prog,
  165. struct cls_bpf_prog *oldprog,
  166. struct netlink_ext_ack *extack)
  167. {
  168. if (prog && oldprog &&
  169. cls_bpf_flags(prog->gen_flags) !=
  170. cls_bpf_flags(oldprog->gen_flags))
  171. return -EINVAL;
  172. if (prog && tc_skip_hw(prog->gen_flags))
  173. prog = NULL;
  174. if (oldprog && tc_skip_hw(oldprog->gen_flags))
  175. oldprog = NULL;
  176. if (!prog && !oldprog)
  177. return 0;
  178. return cls_bpf_offload_cmd(tp, prog, oldprog, extack);
  179. }
  180. static void cls_bpf_stop_offload(struct tcf_proto *tp,
  181. struct cls_bpf_prog *prog,
  182. struct netlink_ext_ack *extack)
  183. {
  184. int err;
  185. err = cls_bpf_offload_cmd(tp, NULL, prog, extack);
  186. if (err)
  187. pr_err("Stopping hardware offload failed: %d\n", err);
  188. }
  189. static void cls_bpf_offload_update_stats(struct tcf_proto *tp,
  190. struct cls_bpf_prog *prog)
  191. {
  192. struct tcf_block *block = tp->chain->block;
  193. struct tc_cls_bpf_offload cls_bpf = {};
  194. tc_cls_common_offload_init(&cls_bpf.common, tp, prog->gen_flags, NULL);
  195. cls_bpf.command = TC_CLSBPF_STATS;
  196. cls_bpf.exts = &prog->exts;
  197. cls_bpf.prog = prog->filter;
  198. cls_bpf.name = prog->bpf_name;
  199. cls_bpf.exts_integrated = prog->exts_integrated;
  200. tc_setup_cb_call(block, NULL, TC_SETUP_CLSBPF, &cls_bpf, false);
  201. }
  202. static int cls_bpf_init(struct tcf_proto *tp)
  203. {
  204. struct cls_bpf_head *head;
  205. head = kzalloc(sizeof(*head), GFP_KERNEL);
  206. if (head == NULL)
  207. return -ENOBUFS;
  208. INIT_LIST_HEAD_RCU(&head->plist);
  209. idr_init(&head->handle_idr);
  210. rcu_assign_pointer(tp->root, head);
  211. return 0;
  212. }
  213. static void cls_bpf_free_parms(struct cls_bpf_prog *prog)
  214. {
  215. if (cls_bpf_is_ebpf(prog))
  216. bpf_prog_put(prog->filter);
  217. else
  218. bpf_prog_destroy(prog->filter);
  219. kfree(prog->bpf_name);
  220. kfree(prog->bpf_ops);
  221. }
  222. static void __cls_bpf_delete_prog(struct cls_bpf_prog *prog)
  223. {
  224. tcf_exts_destroy(&prog->exts);
  225. tcf_exts_put_net(&prog->exts);
  226. cls_bpf_free_parms(prog);
  227. kfree(prog);
  228. }
  229. static void cls_bpf_delete_prog_work(struct work_struct *work)
  230. {
  231. struct cls_bpf_prog *prog = container_of(to_rcu_work(work),
  232. struct cls_bpf_prog,
  233. rwork);
  234. rtnl_lock();
  235. __cls_bpf_delete_prog(prog);
  236. rtnl_unlock();
  237. }
  238. static void __cls_bpf_delete(struct tcf_proto *tp, struct cls_bpf_prog *prog,
  239. struct netlink_ext_ack *extack)
  240. {
  241. struct cls_bpf_head *head = rtnl_dereference(tp->root);
  242. idr_remove(&head->handle_idr, prog->handle);
  243. cls_bpf_stop_offload(tp, prog, extack);
  244. list_del_rcu(&prog->link);
  245. tcf_unbind_filter(tp, &prog->res);
  246. if (tcf_exts_get_net(&prog->exts))
  247. tcf_queue_work(&prog->rwork, cls_bpf_delete_prog_work);
  248. else
  249. __cls_bpf_delete_prog(prog);
  250. }
  251. static int cls_bpf_delete(struct tcf_proto *tp, void *arg, bool *last,
  252. struct netlink_ext_ack *extack)
  253. {
  254. struct cls_bpf_head *head = rtnl_dereference(tp->root);
  255. __cls_bpf_delete(tp, arg, extack);
  256. *last = list_empty(&head->plist);
  257. return 0;
  258. }
  259. static void cls_bpf_destroy(struct tcf_proto *tp,
  260. struct netlink_ext_ack *extack)
  261. {
  262. struct cls_bpf_head *head = rtnl_dereference(tp->root);
  263. struct cls_bpf_prog *prog, *tmp;
  264. list_for_each_entry_safe(prog, tmp, &head->plist, link)
  265. __cls_bpf_delete(tp, prog, extack);
  266. idr_destroy(&head->handle_idr);
  267. kfree_rcu(head, rcu);
  268. }
  269. static void *cls_bpf_get(struct tcf_proto *tp, u32 handle)
  270. {
  271. struct cls_bpf_head *head = rtnl_dereference(tp->root);
  272. struct cls_bpf_prog *prog;
  273. list_for_each_entry(prog, &head->plist, link) {
  274. if (prog->handle == handle)
  275. return prog;
  276. }
  277. return NULL;
  278. }
  279. static int cls_bpf_prog_from_ops(struct nlattr **tb, struct cls_bpf_prog *prog)
  280. {
  281. struct sock_filter *bpf_ops;
  282. struct sock_fprog_kern fprog_tmp;
  283. struct bpf_prog *fp;
  284. u16 bpf_size, bpf_num_ops;
  285. int ret;
  286. bpf_num_ops = nla_get_u16(tb[TCA_BPF_OPS_LEN]);
  287. if (bpf_num_ops > BPF_MAXINSNS || bpf_num_ops == 0)
  288. return -EINVAL;
  289. bpf_size = bpf_num_ops * sizeof(*bpf_ops);
  290. if (bpf_size != nla_len(tb[TCA_BPF_OPS]))
  291. return -EINVAL;
  292. bpf_ops = kzalloc(bpf_size, GFP_KERNEL);
  293. if (bpf_ops == NULL)
  294. return -ENOMEM;
  295. memcpy(bpf_ops, nla_data(tb[TCA_BPF_OPS]), bpf_size);
  296. fprog_tmp.len = bpf_num_ops;
  297. fprog_tmp.filter = bpf_ops;
  298. ret = bpf_prog_create(&fp, &fprog_tmp);
  299. if (ret < 0) {
  300. kfree(bpf_ops);
  301. return ret;
  302. }
  303. prog->bpf_ops = bpf_ops;
  304. prog->bpf_num_ops = bpf_num_ops;
  305. prog->bpf_name = NULL;
  306. prog->filter = fp;
  307. return 0;
  308. }
  309. static int cls_bpf_prog_from_efd(struct nlattr **tb, struct cls_bpf_prog *prog,
  310. u32 gen_flags, const struct tcf_proto *tp)
  311. {
  312. struct bpf_prog *fp;
  313. char *name = NULL;
  314. bool skip_sw;
  315. u32 bpf_fd;
  316. bpf_fd = nla_get_u32(tb[TCA_BPF_FD]);
  317. skip_sw = gen_flags & TCA_CLS_FLAGS_SKIP_SW;
  318. fp = bpf_prog_get_type_dev(bpf_fd, BPF_PROG_TYPE_SCHED_CLS, skip_sw);
  319. if (IS_ERR(fp))
  320. return PTR_ERR(fp);
  321. if (tb[TCA_BPF_NAME]) {
  322. name = nla_memdup(tb[TCA_BPF_NAME], GFP_KERNEL);
  323. if (!name) {
  324. bpf_prog_put(fp);
  325. return -ENOMEM;
  326. }
  327. }
  328. prog->bpf_ops = NULL;
  329. prog->bpf_name = name;
  330. prog->filter = fp;
  331. if (fp->dst_needed)
  332. tcf_block_netif_keep_dst(tp->chain->block);
  333. return 0;
  334. }
  335. static int cls_bpf_set_parms(struct net *net, struct tcf_proto *tp,
  336. struct cls_bpf_prog *prog, unsigned long base,
  337. struct nlattr **tb, struct nlattr *est, bool ovr,
  338. struct netlink_ext_ack *extack)
  339. {
  340. bool is_bpf, is_ebpf, have_exts = false;
  341. u32 gen_flags = 0;
  342. int ret;
  343. is_bpf = tb[TCA_BPF_OPS_LEN] && tb[TCA_BPF_OPS];
  344. is_ebpf = tb[TCA_BPF_FD];
  345. if ((!is_bpf && !is_ebpf) || (is_bpf && is_ebpf))
  346. return -EINVAL;
  347. ret = tcf_exts_validate(net, tp, tb, est, &prog->exts, ovr, extack);
  348. if (ret < 0)
  349. return ret;
  350. if (tb[TCA_BPF_FLAGS]) {
  351. u32 bpf_flags = nla_get_u32(tb[TCA_BPF_FLAGS]);
  352. if (bpf_flags & ~TCA_BPF_FLAG_ACT_DIRECT)
  353. return -EINVAL;
  354. have_exts = bpf_flags & TCA_BPF_FLAG_ACT_DIRECT;
  355. }
  356. if (tb[TCA_BPF_FLAGS_GEN]) {
  357. gen_flags = nla_get_u32(tb[TCA_BPF_FLAGS_GEN]);
  358. if (gen_flags & ~CLS_BPF_SUPPORTED_GEN_FLAGS ||
  359. !tc_flags_valid(gen_flags))
  360. return -EINVAL;
  361. }
  362. prog->exts_integrated = have_exts;
  363. prog->gen_flags = gen_flags;
  364. ret = is_bpf ? cls_bpf_prog_from_ops(tb, prog) :
  365. cls_bpf_prog_from_efd(tb, prog, gen_flags, tp);
  366. if (ret < 0)
  367. return ret;
  368. if (tb[TCA_BPF_CLASSID]) {
  369. prog->res.classid = nla_get_u32(tb[TCA_BPF_CLASSID]);
  370. tcf_bind_filter(tp, &prog->res, base);
  371. }
  372. return 0;
  373. }
  374. static int cls_bpf_change(struct net *net, struct sk_buff *in_skb,
  375. struct tcf_proto *tp, unsigned long base,
  376. u32 handle, struct nlattr **tca,
  377. void **arg, bool ovr, struct netlink_ext_ack *extack)
  378. {
  379. struct cls_bpf_head *head = rtnl_dereference(tp->root);
  380. struct cls_bpf_prog *oldprog = *arg;
  381. struct nlattr *tb[TCA_BPF_MAX + 1];
  382. struct cls_bpf_prog *prog;
  383. int ret;
  384. if (tca[TCA_OPTIONS] == NULL)
  385. return -EINVAL;
  386. ret = nla_parse_nested(tb, TCA_BPF_MAX, tca[TCA_OPTIONS], bpf_policy,
  387. NULL);
  388. if (ret < 0)
  389. return ret;
  390. prog = kzalloc(sizeof(*prog), GFP_KERNEL);
  391. if (!prog)
  392. return -ENOBUFS;
  393. ret = tcf_exts_init(&prog->exts, TCA_BPF_ACT, TCA_BPF_POLICE);
  394. if (ret < 0)
  395. goto errout;
  396. if (oldprog) {
  397. if (handle && oldprog->handle != handle) {
  398. ret = -EINVAL;
  399. goto errout;
  400. }
  401. }
  402. if (handle == 0) {
  403. handle = 1;
  404. ret = idr_alloc_u32(&head->handle_idr, prog, &handle,
  405. INT_MAX, GFP_KERNEL);
  406. } else if (!oldprog) {
  407. ret = idr_alloc_u32(&head->handle_idr, prog, &handle,
  408. handle, GFP_KERNEL);
  409. }
  410. if (ret)
  411. goto errout;
  412. prog->handle = handle;
  413. ret = cls_bpf_set_parms(net, tp, prog, base, tb, tca[TCA_RATE], ovr,
  414. extack);
  415. if (ret < 0)
  416. goto errout_idr;
  417. ret = cls_bpf_offload(tp, prog, oldprog, extack);
  418. if (ret)
  419. goto errout_parms;
  420. if (!tc_in_hw(prog->gen_flags))
  421. prog->gen_flags |= TCA_CLS_FLAGS_NOT_IN_HW;
  422. if (oldprog) {
  423. idr_replace(&head->handle_idr, prog, handle);
  424. list_replace_rcu(&oldprog->link, &prog->link);
  425. tcf_unbind_filter(tp, &oldprog->res);
  426. tcf_exts_get_net(&oldprog->exts);
  427. tcf_queue_work(&oldprog->rwork, cls_bpf_delete_prog_work);
  428. } else {
  429. list_add_rcu(&prog->link, &head->plist);
  430. }
  431. *arg = prog;
  432. return 0;
  433. errout_parms:
  434. cls_bpf_free_parms(prog);
  435. errout_idr:
  436. if (!oldprog)
  437. idr_remove(&head->handle_idr, prog->handle);
  438. errout:
  439. tcf_exts_destroy(&prog->exts);
  440. kfree(prog);
  441. return ret;
  442. }
  443. static int cls_bpf_dump_bpf_info(const struct cls_bpf_prog *prog,
  444. struct sk_buff *skb)
  445. {
  446. struct nlattr *nla;
  447. if (nla_put_u16(skb, TCA_BPF_OPS_LEN, prog->bpf_num_ops))
  448. return -EMSGSIZE;
  449. nla = nla_reserve(skb, TCA_BPF_OPS, prog->bpf_num_ops *
  450. sizeof(struct sock_filter));
  451. if (nla == NULL)
  452. return -EMSGSIZE;
  453. memcpy(nla_data(nla), prog->bpf_ops, nla_len(nla));
  454. return 0;
  455. }
  456. static int cls_bpf_dump_ebpf_info(const struct cls_bpf_prog *prog,
  457. struct sk_buff *skb)
  458. {
  459. struct nlattr *nla;
  460. if (prog->bpf_name &&
  461. nla_put_string(skb, TCA_BPF_NAME, prog->bpf_name))
  462. return -EMSGSIZE;
  463. if (nla_put_u32(skb, TCA_BPF_ID, prog->filter->aux->id))
  464. return -EMSGSIZE;
  465. nla = nla_reserve(skb, TCA_BPF_TAG, sizeof(prog->filter->tag));
  466. if (nla == NULL)
  467. return -EMSGSIZE;
  468. memcpy(nla_data(nla), prog->filter->tag, nla_len(nla));
  469. return 0;
  470. }
  471. static int cls_bpf_dump(struct net *net, struct tcf_proto *tp, void *fh,
  472. struct sk_buff *skb, struct tcmsg *tm)
  473. {
  474. struct cls_bpf_prog *prog = fh;
  475. struct nlattr *nest;
  476. u32 bpf_flags = 0;
  477. int ret;
  478. if (prog == NULL)
  479. return skb->len;
  480. tm->tcm_handle = prog->handle;
  481. cls_bpf_offload_update_stats(tp, prog);
  482. nest = nla_nest_start(skb, TCA_OPTIONS);
  483. if (nest == NULL)
  484. goto nla_put_failure;
  485. if (prog->res.classid &&
  486. nla_put_u32(skb, TCA_BPF_CLASSID, prog->res.classid))
  487. goto nla_put_failure;
  488. if (cls_bpf_is_ebpf(prog))
  489. ret = cls_bpf_dump_ebpf_info(prog, skb);
  490. else
  491. ret = cls_bpf_dump_bpf_info(prog, skb);
  492. if (ret)
  493. goto nla_put_failure;
  494. if (tcf_exts_dump(skb, &prog->exts) < 0)
  495. goto nla_put_failure;
  496. if (prog->exts_integrated)
  497. bpf_flags |= TCA_BPF_FLAG_ACT_DIRECT;
  498. if (bpf_flags && nla_put_u32(skb, TCA_BPF_FLAGS, bpf_flags))
  499. goto nla_put_failure;
  500. if (prog->gen_flags &&
  501. nla_put_u32(skb, TCA_BPF_FLAGS_GEN, prog->gen_flags))
  502. goto nla_put_failure;
  503. nla_nest_end(skb, nest);
  504. if (tcf_exts_dump_stats(skb, &prog->exts) < 0)
  505. goto nla_put_failure;
  506. return skb->len;
  507. nla_put_failure:
  508. nla_nest_cancel(skb, nest);
  509. return -1;
  510. }
  511. static void cls_bpf_bind_class(void *fh, u32 classid, unsigned long cl)
  512. {
  513. struct cls_bpf_prog *prog = fh;
  514. if (prog && prog->res.classid == classid)
  515. prog->res.class = cl;
  516. }
  517. static void cls_bpf_walk(struct tcf_proto *tp, struct tcf_walker *arg)
  518. {
  519. struct cls_bpf_head *head = rtnl_dereference(tp->root);
  520. struct cls_bpf_prog *prog;
  521. list_for_each_entry(prog, &head->plist, link) {
  522. if (arg->count < arg->skip)
  523. goto skip;
  524. if (arg->fn(tp, prog, arg) < 0) {
  525. arg->stop = 1;
  526. break;
  527. }
  528. skip:
  529. arg->count++;
  530. }
  531. }
  532. static struct tcf_proto_ops cls_bpf_ops __read_mostly = {
  533. .kind = "bpf",
  534. .owner = THIS_MODULE,
  535. .classify = cls_bpf_classify,
  536. .init = cls_bpf_init,
  537. .destroy = cls_bpf_destroy,
  538. .get = cls_bpf_get,
  539. .change = cls_bpf_change,
  540. .delete = cls_bpf_delete,
  541. .walk = cls_bpf_walk,
  542. .dump = cls_bpf_dump,
  543. .bind_class = cls_bpf_bind_class,
  544. };
  545. static int __init cls_bpf_init_mod(void)
  546. {
  547. return register_tcf_proto_ops(&cls_bpf_ops);
  548. }
  549. static void __exit cls_bpf_exit_mod(void)
  550. {
  551. unregister_tcf_proto_ops(&cls_bpf_ops);
  552. }
  553. module_init(cls_bpf_init_mod);
  554. module_exit(cls_bpf_exit_mod);