cls_bpf.c 8.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383
  1. /*
  2. * Berkeley Packet Filter based traffic classifier
  3. *
  4. * Might be used to classify traffic through flexible, user-defined and
  5. * possibly JIT-ed BPF filters for traffic control as an alternative to
  6. * ematches.
  7. *
  8. * (C) 2013 Daniel Borkmann <dborkman@redhat.com>
  9. *
  10. * This program is free software; you can redistribute it and/or modify
  11. * it under the terms of the GNU General Public License version 2 as
  12. * published by the Free Software Foundation.
  13. */
  14. #include <linux/module.h>
  15. #include <linux/types.h>
  16. #include <linux/skbuff.h>
  17. #include <linux/filter.h>
  18. #include <net/rtnetlink.h>
  19. #include <net/pkt_cls.h>
  20. #include <net/sock.h>
  21. MODULE_LICENSE("GPL");
  22. MODULE_AUTHOR("Daniel Borkmann <dborkman@redhat.com>");
  23. MODULE_DESCRIPTION("TC BPF based classifier");
  24. struct cls_bpf_head {
  25. struct list_head plist;
  26. u32 hgen;
  27. struct rcu_head rcu;
  28. };
  29. struct cls_bpf_prog {
  30. struct bpf_prog *filter;
  31. struct sock_filter *bpf_ops;
  32. struct tcf_exts exts;
  33. struct tcf_result res;
  34. struct list_head link;
  35. u32 handle;
  36. u16 bpf_num_ops;
  37. struct tcf_proto *tp;
  38. struct rcu_head rcu;
  39. };
  40. static const struct nla_policy bpf_policy[TCA_BPF_MAX + 1] = {
  41. [TCA_BPF_CLASSID] = { .type = NLA_U32 },
  42. [TCA_BPF_OPS_LEN] = { .type = NLA_U16 },
  43. [TCA_BPF_OPS] = { .type = NLA_BINARY,
  44. .len = sizeof(struct sock_filter) * BPF_MAXINSNS },
  45. };
  46. static int cls_bpf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
  47. struct tcf_result *res)
  48. {
  49. struct cls_bpf_head *head = rcu_dereference_bh(tp->root);
  50. struct cls_bpf_prog *prog;
  51. int ret;
  52. list_for_each_entry_rcu(prog, &head->plist, link) {
  53. int filter_res = BPF_PROG_RUN(prog->filter, skb);
  54. if (filter_res == 0)
  55. continue;
  56. *res = prog->res;
  57. if (filter_res != -1)
  58. res->classid = filter_res;
  59. ret = tcf_exts_exec(skb, &prog->exts, res);
  60. if (ret < 0)
  61. continue;
  62. return ret;
  63. }
  64. return -1;
  65. }
  66. static int cls_bpf_init(struct tcf_proto *tp)
  67. {
  68. struct cls_bpf_head *head;
  69. head = kzalloc(sizeof(*head), GFP_KERNEL);
  70. if (head == NULL)
  71. return -ENOBUFS;
  72. INIT_LIST_HEAD_RCU(&head->plist);
  73. rcu_assign_pointer(tp->root, head);
  74. return 0;
  75. }
  76. static void cls_bpf_delete_prog(struct tcf_proto *tp, struct cls_bpf_prog *prog)
  77. {
  78. tcf_exts_destroy(&prog->exts);
  79. bpf_prog_destroy(prog->filter);
  80. kfree(prog->bpf_ops);
  81. kfree(prog);
  82. }
  83. static void __cls_bpf_delete_prog(struct rcu_head *rcu)
  84. {
  85. struct cls_bpf_prog *prog = container_of(rcu, struct cls_bpf_prog, rcu);
  86. cls_bpf_delete_prog(prog->tp, prog);
  87. }
  88. static int cls_bpf_delete(struct tcf_proto *tp, unsigned long arg)
  89. {
  90. struct cls_bpf_prog *prog = (struct cls_bpf_prog *) arg;
  91. list_del_rcu(&prog->link);
  92. tcf_unbind_filter(tp, &prog->res);
  93. call_rcu(&prog->rcu, __cls_bpf_delete_prog);
  94. return 0;
  95. }
  96. static void cls_bpf_destroy(struct tcf_proto *tp)
  97. {
  98. struct cls_bpf_head *head = rtnl_dereference(tp->root);
  99. struct cls_bpf_prog *prog, *tmp;
  100. list_for_each_entry_safe(prog, tmp, &head->plist, link) {
  101. list_del_rcu(&prog->link);
  102. tcf_unbind_filter(tp, &prog->res);
  103. call_rcu(&prog->rcu, __cls_bpf_delete_prog);
  104. }
  105. RCU_INIT_POINTER(tp->root, NULL);
  106. kfree_rcu(head, rcu);
  107. }
  108. static unsigned long cls_bpf_get(struct tcf_proto *tp, u32 handle)
  109. {
  110. struct cls_bpf_head *head = rtnl_dereference(tp->root);
  111. struct cls_bpf_prog *prog;
  112. unsigned long ret = 0UL;
  113. if (head == NULL)
  114. return 0UL;
  115. list_for_each_entry(prog, &head->plist, link) {
  116. if (prog->handle == handle) {
  117. ret = (unsigned long) prog;
  118. break;
  119. }
  120. }
  121. return ret;
  122. }
  123. static int cls_bpf_modify_existing(struct net *net, struct tcf_proto *tp,
  124. struct cls_bpf_prog *prog,
  125. unsigned long base, struct nlattr **tb,
  126. struct nlattr *est, bool ovr)
  127. {
  128. struct sock_filter *bpf_ops;
  129. struct tcf_exts exts;
  130. struct sock_fprog_kern tmp;
  131. struct bpf_prog *fp;
  132. u16 bpf_size, bpf_num_ops;
  133. u32 classid;
  134. int ret;
  135. if (!tb[TCA_BPF_OPS_LEN] || !tb[TCA_BPF_OPS] || !tb[TCA_BPF_CLASSID])
  136. return -EINVAL;
  137. tcf_exts_init(&exts, TCA_BPF_ACT, TCA_BPF_POLICE);
  138. ret = tcf_exts_validate(net, tp, tb, est, &exts, ovr);
  139. if (ret < 0)
  140. return ret;
  141. classid = nla_get_u32(tb[TCA_BPF_CLASSID]);
  142. bpf_num_ops = nla_get_u16(tb[TCA_BPF_OPS_LEN]);
  143. if (bpf_num_ops > BPF_MAXINSNS || bpf_num_ops == 0) {
  144. ret = -EINVAL;
  145. goto errout;
  146. }
  147. bpf_size = bpf_num_ops * sizeof(*bpf_ops);
  148. if (bpf_size != nla_len(tb[TCA_BPF_OPS])) {
  149. ret = -EINVAL;
  150. goto errout;
  151. }
  152. bpf_ops = kzalloc(bpf_size, GFP_KERNEL);
  153. if (bpf_ops == NULL) {
  154. ret = -ENOMEM;
  155. goto errout;
  156. }
  157. memcpy(bpf_ops, nla_data(tb[TCA_BPF_OPS]), bpf_size);
  158. tmp.len = bpf_num_ops;
  159. tmp.filter = bpf_ops;
  160. ret = bpf_prog_create(&fp, &tmp);
  161. if (ret)
  162. goto errout_free;
  163. prog->bpf_num_ops = bpf_num_ops;
  164. prog->bpf_ops = bpf_ops;
  165. prog->filter = fp;
  166. prog->res.classid = classid;
  167. tcf_bind_filter(tp, &prog->res, base);
  168. tcf_exts_change(tp, &prog->exts, &exts);
  169. return 0;
  170. errout_free:
  171. kfree(bpf_ops);
  172. errout:
  173. tcf_exts_destroy(&exts);
  174. return ret;
  175. }
  176. static u32 cls_bpf_grab_new_handle(struct tcf_proto *tp,
  177. struct cls_bpf_head *head)
  178. {
  179. unsigned int i = 0x80000000;
  180. u32 handle;
  181. do {
  182. if (++head->hgen == 0x7FFFFFFF)
  183. head->hgen = 1;
  184. } while (--i > 0 && cls_bpf_get(tp, head->hgen));
  185. if (unlikely(i == 0)) {
  186. pr_err("Insufficient number of handles\n");
  187. handle = 0;
  188. } else {
  189. handle = head->hgen;
  190. }
  191. return handle;
  192. }
  193. static int cls_bpf_change(struct net *net, struct sk_buff *in_skb,
  194. struct tcf_proto *tp, unsigned long base,
  195. u32 handle, struct nlattr **tca,
  196. unsigned long *arg, bool ovr)
  197. {
  198. struct cls_bpf_head *head = rtnl_dereference(tp->root);
  199. struct cls_bpf_prog *oldprog = (struct cls_bpf_prog *) *arg;
  200. struct nlattr *tb[TCA_BPF_MAX + 1];
  201. struct cls_bpf_prog *prog;
  202. int ret;
  203. if (tca[TCA_OPTIONS] == NULL)
  204. return -EINVAL;
  205. ret = nla_parse_nested(tb, TCA_BPF_MAX, tca[TCA_OPTIONS], bpf_policy);
  206. if (ret < 0)
  207. return ret;
  208. prog = kzalloc(sizeof(*prog), GFP_KERNEL);
  209. if (!prog)
  210. return -ENOBUFS;
  211. tcf_exts_init(&prog->exts, TCA_BPF_ACT, TCA_BPF_POLICE);
  212. if (oldprog) {
  213. if (handle && oldprog->handle != handle) {
  214. ret = -EINVAL;
  215. goto errout;
  216. }
  217. }
  218. if (handle == 0)
  219. prog->handle = cls_bpf_grab_new_handle(tp, head);
  220. else
  221. prog->handle = handle;
  222. if (prog->handle == 0) {
  223. ret = -EINVAL;
  224. goto errout;
  225. }
  226. ret = cls_bpf_modify_existing(net, tp, prog, base, tb, tca[TCA_RATE], ovr);
  227. if (ret < 0)
  228. goto errout;
  229. if (oldprog) {
  230. list_replace_rcu(&prog->link, &oldprog->link);
  231. tcf_unbind_filter(tp, &oldprog->res);
  232. call_rcu(&oldprog->rcu, __cls_bpf_delete_prog);
  233. } else {
  234. list_add_rcu(&prog->link, &head->plist);
  235. }
  236. *arg = (unsigned long) prog;
  237. return 0;
  238. errout:
  239. kfree(prog);
  240. return ret;
  241. }
  242. static int cls_bpf_dump(struct net *net, struct tcf_proto *tp, unsigned long fh,
  243. struct sk_buff *skb, struct tcmsg *tm)
  244. {
  245. struct cls_bpf_prog *prog = (struct cls_bpf_prog *) fh;
  246. struct nlattr *nest, *nla;
  247. if (prog == NULL)
  248. return skb->len;
  249. tm->tcm_handle = prog->handle;
  250. nest = nla_nest_start(skb, TCA_OPTIONS);
  251. if (nest == NULL)
  252. goto nla_put_failure;
  253. if (nla_put_u32(skb, TCA_BPF_CLASSID, prog->res.classid))
  254. goto nla_put_failure;
  255. if (nla_put_u16(skb, TCA_BPF_OPS_LEN, prog->bpf_num_ops))
  256. goto nla_put_failure;
  257. nla = nla_reserve(skb, TCA_BPF_OPS, prog->bpf_num_ops *
  258. sizeof(struct sock_filter));
  259. if (nla == NULL)
  260. goto nla_put_failure;
  261. memcpy(nla_data(nla), prog->bpf_ops, nla_len(nla));
  262. if (tcf_exts_dump(skb, &prog->exts) < 0)
  263. goto nla_put_failure;
  264. nla_nest_end(skb, nest);
  265. if (tcf_exts_dump_stats(skb, &prog->exts) < 0)
  266. goto nla_put_failure;
  267. return skb->len;
  268. nla_put_failure:
  269. nla_nest_cancel(skb, nest);
  270. return -1;
  271. }
  272. static void cls_bpf_walk(struct tcf_proto *tp, struct tcf_walker *arg)
  273. {
  274. struct cls_bpf_head *head = rtnl_dereference(tp->root);
  275. struct cls_bpf_prog *prog;
  276. list_for_each_entry(prog, &head->plist, link) {
  277. if (arg->count < arg->skip)
  278. goto skip;
  279. if (arg->fn(tp, (unsigned long) prog, arg) < 0) {
  280. arg->stop = 1;
  281. break;
  282. }
  283. skip:
  284. arg->count++;
  285. }
  286. }
  287. static struct tcf_proto_ops cls_bpf_ops __read_mostly = {
  288. .kind = "bpf",
  289. .owner = THIS_MODULE,
  290. .classify = cls_bpf_classify,
  291. .init = cls_bpf_init,
  292. .destroy = cls_bpf_destroy,
  293. .get = cls_bpf_get,
  294. .change = cls_bpf_change,
  295. .delete = cls_bpf_delete,
  296. .walk = cls_bpf_walk,
  297. .dump = cls_bpf_dump,
  298. };
  299. static int __init cls_bpf_init_mod(void)
  300. {
  301. return register_tcf_proto_ops(&cls_bpf_ops);
  302. }
  303. static void __exit cls_bpf_exit_mod(void)
  304. {
  305. unregister_tcf_proto_ops(&cls_bpf_ops);
  306. }
  307. module_init(cls_bpf_init_mod);
  308. module_exit(cls_bpf_exit_mod);