cls_bpf.c 8.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382
  1. /*
  2. * Berkeley Packet Filter based traffic classifier
  3. *
  4. * Might be used to classify traffic through flexible, user-defined and
  5. * possibly JIT-ed BPF filters for traffic control as an alternative to
  6. * ematches.
  7. *
  8. * (C) 2013 Daniel Borkmann <dborkman@redhat.com>
  9. *
  10. * This program is free software; you can redistribute it and/or modify
  11. * it under the terms of the GNU General Public License version 2 as
  12. * published by the Free Software Foundation.
  13. */
  14. #include <linux/module.h>
  15. #include <linux/types.h>
  16. #include <linux/skbuff.h>
  17. #include <linux/filter.h>
  18. #include <net/rtnetlink.h>
  19. #include <net/pkt_cls.h>
  20. #include <net/sock.h>
  21. MODULE_LICENSE("GPL");
  22. MODULE_AUTHOR("Daniel Borkmann <dborkman@redhat.com>");
  23. MODULE_DESCRIPTION("TC BPF based classifier");
  24. struct cls_bpf_head {
  25. struct list_head plist;
  26. u32 hgen;
  27. };
  28. struct cls_bpf_prog {
  29. struct sk_filter *filter;
  30. struct sock_filter *bpf_ops;
  31. struct tcf_exts exts;
  32. struct tcf_result res;
  33. struct list_head link;
  34. u32 handle;
  35. u16 bpf_len;
  36. };
  37. static const struct nla_policy bpf_policy[TCA_BPF_MAX + 1] = {
  38. [TCA_BPF_CLASSID] = { .type = NLA_U32 },
  39. [TCA_BPF_OPS_LEN] = { .type = NLA_U16 },
  40. [TCA_BPF_OPS] = { .type = NLA_BINARY,
  41. .len = sizeof(struct sock_filter) * BPF_MAXINSNS },
  42. };
  43. static int cls_bpf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
  44. struct tcf_result *res)
  45. {
  46. struct cls_bpf_head *head = tp->root;
  47. struct cls_bpf_prog *prog;
  48. int ret;
  49. list_for_each_entry(prog, &head->plist, link) {
  50. int filter_res = SK_RUN_FILTER(prog->filter, skb);
  51. if (filter_res == 0)
  52. continue;
  53. *res = prog->res;
  54. if (filter_res != -1)
  55. res->classid = filter_res;
  56. ret = tcf_exts_exec(skb, &prog->exts, res);
  57. if (ret < 0)
  58. continue;
  59. return ret;
  60. }
  61. return -1;
  62. }
  63. static int cls_bpf_init(struct tcf_proto *tp)
  64. {
  65. struct cls_bpf_head *head;
  66. head = kzalloc(sizeof(*head), GFP_KERNEL);
  67. if (head == NULL)
  68. return -ENOBUFS;
  69. INIT_LIST_HEAD(&head->plist);
  70. tp->root = head;
  71. return 0;
  72. }
  73. static void cls_bpf_delete_prog(struct tcf_proto *tp, struct cls_bpf_prog *prog)
  74. {
  75. tcf_unbind_filter(tp, &prog->res);
  76. tcf_exts_destroy(tp, &prog->exts);
  77. sk_unattached_filter_destroy(prog->filter);
  78. kfree(prog->bpf_ops);
  79. kfree(prog);
  80. }
  81. static int cls_bpf_delete(struct tcf_proto *tp, unsigned long arg)
  82. {
  83. struct cls_bpf_head *head = tp->root;
  84. struct cls_bpf_prog *prog, *todel = (struct cls_bpf_prog *) arg;
  85. list_for_each_entry(prog, &head->plist, link) {
  86. if (prog == todel) {
  87. tcf_tree_lock(tp);
  88. list_del(&prog->link);
  89. tcf_tree_unlock(tp);
  90. cls_bpf_delete_prog(tp, prog);
  91. return 0;
  92. }
  93. }
  94. return -ENOENT;
  95. }
  96. static void cls_bpf_destroy(struct tcf_proto *tp)
  97. {
  98. struct cls_bpf_head *head = tp->root;
  99. struct cls_bpf_prog *prog, *tmp;
  100. list_for_each_entry_safe(prog, tmp, &head->plist, link) {
  101. list_del(&prog->link);
  102. cls_bpf_delete_prog(tp, prog);
  103. }
  104. kfree(head);
  105. }
  106. static unsigned long cls_bpf_get(struct tcf_proto *tp, u32 handle)
  107. {
  108. struct cls_bpf_head *head = tp->root;
  109. struct cls_bpf_prog *prog;
  110. unsigned long ret = 0UL;
  111. if (head == NULL)
  112. return 0UL;
  113. list_for_each_entry(prog, &head->plist, link) {
  114. if (prog->handle == handle) {
  115. ret = (unsigned long) prog;
  116. break;
  117. }
  118. }
  119. return ret;
  120. }
  121. static void cls_bpf_put(struct tcf_proto *tp, unsigned long f)
  122. {
  123. }
  124. static int cls_bpf_modify_existing(struct net *net, struct tcf_proto *tp,
  125. struct cls_bpf_prog *prog,
  126. unsigned long base, struct nlattr **tb,
  127. struct nlattr *est, bool ovr)
  128. {
  129. struct sock_filter *bpf_ops, *bpf_old;
  130. struct tcf_exts exts;
  131. struct sock_fprog_kern tmp;
  132. struct sk_filter *fp, *fp_old;
  133. u16 bpf_size, bpf_len;
  134. u32 classid;
  135. int ret;
  136. if (!tb[TCA_BPF_OPS_LEN] || !tb[TCA_BPF_OPS] || !tb[TCA_BPF_CLASSID])
  137. return -EINVAL;
  138. tcf_exts_init(&exts, TCA_BPF_ACT, TCA_BPF_POLICE);
  139. ret = tcf_exts_validate(net, tp, tb, est, &exts, ovr);
  140. if (ret < 0)
  141. return ret;
  142. classid = nla_get_u32(tb[TCA_BPF_CLASSID]);
  143. bpf_len = nla_get_u16(tb[TCA_BPF_OPS_LEN]);
  144. if (bpf_len > BPF_MAXINSNS || bpf_len == 0) {
  145. ret = -EINVAL;
  146. goto errout;
  147. }
  148. bpf_size = bpf_len * sizeof(*bpf_ops);
  149. bpf_ops = kzalloc(bpf_size, GFP_KERNEL);
  150. if (bpf_ops == NULL) {
  151. ret = -ENOMEM;
  152. goto errout;
  153. }
  154. memcpy(bpf_ops, nla_data(tb[TCA_BPF_OPS]), bpf_size);
  155. tmp.len = bpf_len;
  156. tmp.filter = bpf_ops;
  157. ret = sk_unattached_filter_create(&fp, &tmp);
  158. if (ret)
  159. goto errout_free;
  160. tcf_tree_lock(tp);
  161. fp_old = prog->filter;
  162. bpf_old = prog->bpf_ops;
  163. prog->bpf_len = bpf_len;
  164. prog->bpf_ops = bpf_ops;
  165. prog->filter = fp;
  166. prog->res.classid = classid;
  167. tcf_tree_unlock(tp);
  168. tcf_bind_filter(tp, &prog->res, base);
  169. tcf_exts_change(tp, &prog->exts, &exts);
  170. if (fp_old)
  171. sk_unattached_filter_destroy(fp_old);
  172. if (bpf_old)
  173. kfree(bpf_old);
  174. return 0;
  175. errout_free:
  176. kfree(bpf_ops);
  177. errout:
  178. tcf_exts_destroy(tp, &exts);
  179. return ret;
  180. }
  181. static u32 cls_bpf_grab_new_handle(struct tcf_proto *tp,
  182. struct cls_bpf_head *head)
  183. {
  184. unsigned int i = 0x80000000;
  185. do {
  186. if (++head->hgen == 0x7FFFFFFF)
  187. head->hgen = 1;
  188. } while (--i > 0 && cls_bpf_get(tp, head->hgen));
  189. if (i == 0)
  190. pr_err("Insufficient number of handles\n");
  191. return i;
  192. }
  193. static int cls_bpf_change(struct net *net, struct sk_buff *in_skb,
  194. struct tcf_proto *tp, unsigned long base,
  195. u32 handle, struct nlattr **tca,
  196. unsigned long *arg, bool ovr)
  197. {
  198. struct cls_bpf_head *head = tp->root;
  199. struct cls_bpf_prog *prog = (struct cls_bpf_prog *) *arg;
  200. struct nlattr *tb[TCA_BPF_MAX + 1];
  201. int ret;
  202. if (tca[TCA_OPTIONS] == NULL)
  203. return -EINVAL;
  204. ret = nla_parse_nested(tb, TCA_BPF_MAX, tca[TCA_OPTIONS], bpf_policy);
  205. if (ret < 0)
  206. return ret;
  207. if (prog != NULL) {
  208. if (handle && prog->handle != handle)
  209. return -EINVAL;
  210. return cls_bpf_modify_existing(net, tp, prog, base, tb,
  211. tca[TCA_RATE], ovr);
  212. }
  213. prog = kzalloc(sizeof(*prog), GFP_KERNEL);
  214. if (prog == NULL)
  215. return -ENOBUFS;
  216. tcf_exts_init(&prog->exts, TCA_BPF_ACT, TCA_BPF_POLICE);
  217. if (handle == 0)
  218. prog->handle = cls_bpf_grab_new_handle(tp, head);
  219. else
  220. prog->handle = handle;
  221. if (prog->handle == 0) {
  222. ret = -EINVAL;
  223. goto errout;
  224. }
  225. ret = cls_bpf_modify_existing(net, tp, prog, base, tb, tca[TCA_RATE], ovr);
  226. if (ret < 0)
  227. goto errout;
  228. tcf_tree_lock(tp);
  229. list_add(&prog->link, &head->plist);
  230. tcf_tree_unlock(tp);
  231. *arg = (unsigned long) prog;
  232. return 0;
  233. errout:
  234. if (*arg == 0UL && prog)
  235. kfree(prog);
  236. return ret;
  237. }
  238. static int cls_bpf_dump(struct net *net, struct tcf_proto *tp, unsigned long fh,
  239. struct sk_buff *skb, struct tcmsg *tm)
  240. {
  241. struct cls_bpf_prog *prog = (struct cls_bpf_prog *) fh;
  242. struct nlattr *nest, *nla;
  243. if (prog == NULL)
  244. return skb->len;
  245. tm->tcm_handle = prog->handle;
  246. nest = nla_nest_start(skb, TCA_OPTIONS);
  247. if (nest == NULL)
  248. goto nla_put_failure;
  249. if (nla_put_u32(skb, TCA_BPF_CLASSID, prog->res.classid))
  250. goto nla_put_failure;
  251. if (nla_put_u16(skb, TCA_BPF_OPS_LEN, prog->bpf_len))
  252. goto nla_put_failure;
  253. nla = nla_reserve(skb, TCA_BPF_OPS, prog->bpf_len *
  254. sizeof(struct sock_filter));
  255. if (nla == NULL)
  256. goto nla_put_failure;
  257. memcpy(nla_data(nla), prog->bpf_ops, nla_len(nla));
  258. if (tcf_exts_dump(skb, &prog->exts) < 0)
  259. goto nla_put_failure;
  260. nla_nest_end(skb, nest);
  261. if (tcf_exts_dump_stats(skb, &prog->exts) < 0)
  262. goto nla_put_failure;
  263. return skb->len;
  264. nla_put_failure:
  265. nla_nest_cancel(skb, nest);
  266. return -1;
  267. }
  268. static void cls_bpf_walk(struct tcf_proto *tp, struct tcf_walker *arg)
  269. {
  270. struct cls_bpf_head *head = tp->root;
  271. struct cls_bpf_prog *prog;
  272. list_for_each_entry(prog, &head->plist, link) {
  273. if (arg->count < arg->skip)
  274. goto skip;
  275. if (arg->fn(tp, (unsigned long) prog, arg) < 0) {
  276. arg->stop = 1;
  277. break;
  278. }
  279. skip:
  280. arg->count++;
  281. }
  282. }
  283. static struct tcf_proto_ops cls_bpf_ops __read_mostly = {
  284. .kind = "bpf",
  285. .owner = THIS_MODULE,
  286. .classify = cls_bpf_classify,
  287. .init = cls_bpf_init,
  288. .destroy = cls_bpf_destroy,
  289. .get = cls_bpf_get,
  290. .put = cls_bpf_put,
  291. .change = cls_bpf_change,
  292. .delete = cls_bpf_delete,
  293. .walk = cls_bpf_walk,
  294. .dump = cls_bpf_dump,
  295. };
  296. static int __init cls_bpf_init_mod(void)
  297. {
  298. return register_tcf_proto_ops(&cls_bpf_ops);
  299. }
  300. static void __exit cls_bpf_exit_mod(void)
  301. {
  302. unregister_tcf_proto_ops(&cls_bpf_ops);
  303. }
  304. module_init(cls_bpf_init_mod);
  305. module_exit(cls_bpf_exit_mod);