cls_cgroup.c 7.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337
  1. /*
  2. * net/sched/cls_cgroup.c Control Group Classifier
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public License
  6. * as published by the Free Software Foundation; either version
  7. * 2 of the License, or (at your option) any later version.
  8. *
  9. * Authors: Thomas Graf <tgraf@suug.ch>
  10. */
  11. #include <linux/module.h>
  12. #include <linux/slab.h>
  13. #include <linux/types.h>
  14. #include <linux/string.h>
  15. #include <linux/errno.h>
  16. #include <linux/skbuff.h>
  17. #include <linux/cgroup.h>
  18. #include <linux/rcupdate.h>
  19. #include <linux/fdtable.h>
  20. #include <net/rtnetlink.h>
  21. #include <net/pkt_cls.h>
  22. #include <net/sock.h>
  23. #include <net/cls_cgroup.h>
  24. static inline struct cgroup_cls_state *css_cls_state(struct cgroup_subsys_state *css)
  25. {
  26. return css ? container_of(css, struct cgroup_cls_state, css) : NULL;
  27. }
  28. static inline struct cgroup_cls_state *cgrp_cls_state(struct cgroup *cgrp)
  29. {
  30. return css_cls_state(cgroup_css(cgrp, net_cls_subsys_id));
  31. }
  32. static inline struct cgroup_cls_state *task_cls_state(struct task_struct *p)
  33. {
  34. return css_cls_state(task_css(p, net_cls_subsys_id));
  35. }
  36. static struct cgroup_subsys_state *cgrp_css_alloc(struct cgroup *cgrp)
  37. {
  38. struct cgroup_cls_state *cs;
  39. cs = kzalloc(sizeof(*cs), GFP_KERNEL);
  40. if (!cs)
  41. return ERR_PTR(-ENOMEM);
  42. return &cs->css;
  43. }
  44. static int cgrp_css_online(struct cgroup *cgrp)
  45. {
  46. struct cgroup_cls_state *cs = cgrp_cls_state(cgrp);
  47. struct cgroup_cls_state *parent = css_cls_state(css_parent(&cs->css));
  48. if (parent)
  49. cs->classid = parent->classid;
  50. return 0;
  51. }
  52. static void cgrp_css_free(struct cgroup *cgrp)
  53. {
  54. kfree(cgrp_cls_state(cgrp));
  55. }
  56. static int update_classid(const void *v, struct file *file, unsigned n)
  57. {
  58. int err;
  59. struct socket *sock = sock_from_file(file, &err);
  60. if (sock)
  61. sock->sk->sk_classid = (u32)(unsigned long)v;
  62. return 0;
  63. }
  64. static void cgrp_attach(struct cgroup *cgrp, struct cgroup_taskset *tset)
  65. {
  66. struct task_struct *p;
  67. void *v;
  68. cgroup_taskset_for_each(p, cgrp, tset) {
  69. task_lock(p);
  70. v = (void *)(unsigned long)task_cls_classid(p);
  71. iterate_fd(p->files, 0, update_classid, v);
  72. task_unlock(p);
  73. }
  74. }
  75. static u64 read_classid(struct cgroup *cgrp, struct cftype *cft)
  76. {
  77. return cgrp_cls_state(cgrp)->classid;
  78. }
  79. static int write_classid(struct cgroup *cgrp, struct cftype *cft, u64 value)
  80. {
  81. cgrp_cls_state(cgrp)->classid = (u32) value;
  82. return 0;
  83. }
  84. static struct cftype ss_files[] = {
  85. {
  86. .name = "classid",
  87. .read_u64 = read_classid,
  88. .write_u64 = write_classid,
  89. },
  90. { } /* terminate */
  91. };
  92. struct cgroup_subsys net_cls_subsys = {
  93. .name = "net_cls",
  94. .css_alloc = cgrp_css_alloc,
  95. .css_online = cgrp_css_online,
  96. .css_free = cgrp_css_free,
  97. .attach = cgrp_attach,
  98. .subsys_id = net_cls_subsys_id,
  99. .base_cftypes = ss_files,
  100. .module = THIS_MODULE,
  101. };
  102. struct cls_cgroup_head {
  103. u32 handle;
  104. struct tcf_exts exts;
  105. struct tcf_ematch_tree ematches;
  106. };
  107. static int cls_cgroup_classify(struct sk_buff *skb, const struct tcf_proto *tp,
  108. struct tcf_result *res)
  109. {
  110. struct cls_cgroup_head *head = tp->root;
  111. u32 classid;
  112. rcu_read_lock();
  113. classid = task_cls_state(current)->classid;
  114. rcu_read_unlock();
  115. /*
  116. * Due to the nature of the classifier it is required to ignore all
  117. * packets originating from softirq context as accessing `current'
  118. * would lead to false results.
  119. *
  120. * This test assumes that all callers of dev_queue_xmit() explicitely
  121. * disable bh. Knowing this, it is possible to detect softirq based
  122. * calls by looking at the number of nested bh disable calls because
  123. * softirqs always disables bh.
  124. */
  125. if (in_serving_softirq()) {
  126. /* If there is an sk_classid we'll use that. */
  127. if (!skb->sk)
  128. return -1;
  129. classid = skb->sk->sk_classid;
  130. }
  131. if (!classid)
  132. return -1;
  133. if (!tcf_em_tree_match(skb, &head->ematches, NULL))
  134. return -1;
  135. res->classid = classid;
  136. res->class = 0;
  137. return tcf_exts_exec(skb, &head->exts, res);
  138. }
  139. static unsigned long cls_cgroup_get(struct tcf_proto *tp, u32 handle)
  140. {
  141. return 0UL;
  142. }
  143. static void cls_cgroup_put(struct tcf_proto *tp, unsigned long f)
  144. {
  145. }
  146. static int cls_cgroup_init(struct tcf_proto *tp)
  147. {
  148. return 0;
  149. }
  150. static const struct tcf_ext_map cgroup_ext_map = {
  151. .action = TCA_CGROUP_ACT,
  152. .police = TCA_CGROUP_POLICE,
  153. };
  154. static const struct nla_policy cgroup_policy[TCA_CGROUP_MAX + 1] = {
  155. [TCA_CGROUP_EMATCHES] = { .type = NLA_NESTED },
  156. };
  157. static int cls_cgroup_change(struct net *net, struct sk_buff *in_skb,
  158. struct tcf_proto *tp, unsigned long base,
  159. u32 handle, struct nlattr **tca,
  160. unsigned long *arg)
  161. {
  162. struct nlattr *tb[TCA_CGROUP_MAX + 1];
  163. struct cls_cgroup_head *head = tp->root;
  164. struct tcf_ematch_tree t;
  165. struct tcf_exts e;
  166. int err;
  167. if (!tca[TCA_OPTIONS])
  168. return -EINVAL;
  169. if (head == NULL) {
  170. if (!handle)
  171. return -EINVAL;
  172. head = kzalloc(sizeof(*head), GFP_KERNEL);
  173. if (head == NULL)
  174. return -ENOBUFS;
  175. head->handle = handle;
  176. tcf_tree_lock(tp);
  177. tp->root = head;
  178. tcf_tree_unlock(tp);
  179. }
  180. if (handle != head->handle)
  181. return -ENOENT;
  182. err = nla_parse_nested(tb, TCA_CGROUP_MAX, tca[TCA_OPTIONS],
  183. cgroup_policy);
  184. if (err < 0)
  185. return err;
  186. err = tcf_exts_validate(net, tp, tb, tca[TCA_RATE], &e,
  187. &cgroup_ext_map);
  188. if (err < 0)
  189. return err;
  190. err = tcf_em_tree_validate(tp, tb[TCA_CGROUP_EMATCHES], &t);
  191. if (err < 0)
  192. return err;
  193. tcf_exts_change(tp, &head->exts, &e);
  194. tcf_em_tree_change(tp, &head->ematches, &t);
  195. return 0;
  196. }
  197. static void cls_cgroup_destroy(struct tcf_proto *tp)
  198. {
  199. struct cls_cgroup_head *head = tp->root;
  200. if (head) {
  201. tcf_exts_destroy(tp, &head->exts);
  202. tcf_em_tree_destroy(tp, &head->ematches);
  203. kfree(head);
  204. }
  205. }
  206. static int cls_cgroup_delete(struct tcf_proto *tp, unsigned long arg)
  207. {
  208. return -EOPNOTSUPP;
  209. }
  210. static void cls_cgroup_walk(struct tcf_proto *tp, struct tcf_walker *arg)
  211. {
  212. struct cls_cgroup_head *head = tp->root;
  213. if (arg->count < arg->skip)
  214. goto skip;
  215. if (arg->fn(tp, (unsigned long) head, arg) < 0) {
  216. arg->stop = 1;
  217. return;
  218. }
  219. skip:
  220. arg->count++;
  221. }
  222. static int cls_cgroup_dump(struct tcf_proto *tp, unsigned long fh,
  223. struct sk_buff *skb, struct tcmsg *t)
  224. {
  225. struct cls_cgroup_head *head = tp->root;
  226. unsigned char *b = skb_tail_pointer(skb);
  227. struct nlattr *nest;
  228. t->tcm_handle = head->handle;
  229. nest = nla_nest_start(skb, TCA_OPTIONS);
  230. if (nest == NULL)
  231. goto nla_put_failure;
  232. if (tcf_exts_dump(skb, &head->exts, &cgroup_ext_map) < 0 ||
  233. tcf_em_tree_dump(skb, &head->ematches, TCA_CGROUP_EMATCHES) < 0)
  234. goto nla_put_failure;
  235. nla_nest_end(skb, nest);
  236. if (tcf_exts_dump_stats(skb, &head->exts, &cgroup_ext_map) < 0)
  237. goto nla_put_failure;
  238. return skb->len;
  239. nla_put_failure:
  240. nlmsg_trim(skb, b);
  241. return -1;
  242. }
  243. static struct tcf_proto_ops cls_cgroup_ops __read_mostly = {
  244. .kind = "cgroup",
  245. .init = cls_cgroup_init,
  246. .change = cls_cgroup_change,
  247. .classify = cls_cgroup_classify,
  248. .destroy = cls_cgroup_destroy,
  249. .get = cls_cgroup_get,
  250. .put = cls_cgroup_put,
  251. .delete = cls_cgroup_delete,
  252. .walk = cls_cgroup_walk,
  253. .dump = cls_cgroup_dump,
  254. .owner = THIS_MODULE,
  255. };
  256. static int __init init_cgroup_cls(void)
  257. {
  258. int ret;
  259. ret = cgroup_load_subsys(&net_cls_subsys);
  260. if (ret)
  261. goto out;
  262. ret = register_tcf_proto_ops(&cls_cgroup_ops);
  263. if (ret)
  264. cgroup_unload_subsys(&net_cls_subsys);
  265. out:
  266. return ret;
  267. }
  268. static void __exit exit_cgroup_cls(void)
  269. {
  270. unregister_tcf_proto_ops(&cls_cgroup_ops);
  271. cgroup_unload_subsys(&net_cls_subsys);
  272. }
  273. module_init(init_cgroup_cls);
  274. module_exit(exit_cgroup_cls);
  275. MODULE_LICENSE("GPL");