sch_dsmark.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517
  1. /* net/sched/sch_dsmark.c - Differentiated Services field marker */
  2. /* Written 1998-2000 by Werner Almesberger, EPFL ICA */
  3. #include <linux/module.h>
  4. #include <linux/init.h>
  5. #include <linux/slab.h>
  6. #include <linux/types.h>
  7. #include <linux/string.h>
  8. #include <linux/errno.h>
  9. #include <linux/skbuff.h>
  10. #include <linux/rtnetlink.h>
  11. #include <linux/bitops.h>
  12. #include <net/pkt_sched.h>
  13. #include <net/dsfield.h>
  14. #include <net/inet_ecn.h>
  15. #include <asm/byteorder.h>
  16. /*
  17. * classid class marking
  18. * ------- ----- -------
  19. * n/a 0 n/a
  20. * x:0 1 use entry [0]
  21. * ... ... ...
  22. * x:y y>0 y+1 use entry [y]
  23. * ... ... ...
  24. * x:indices-1 indices use entry [indices-1]
  25. * ... ... ...
  26. * x:y y+1 use entry [y & (indices-1)]
  27. * ... ... ...
  28. * 0xffff 0x10000 use entry [indices-1]
  29. */
  30. #define NO_DEFAULT_INDEX (1 << 16)
  31. struct mask_value {
  32. u8 mask;
  33. u8 value;
  34. };
  35. struct dsmark_qdisc_data {
  36. struct Qdisc *q;
  37. struct tcf_proto __rcu *filter_list;
  38. struct mask_value *mv;
  39. u16 indices;
  40. u8 set_tc_index;
  41. u32 default_index; /* index range is 0...0xffff */
  42. #define DSMARK_EMBEDDED_SZ 16
  43. struct mask_value embedded[DSMARK_EMBEDDED_SZ];
  44. };
  45. static inline int dsmark_valid_index(struct dsmark_qdisc_data *p, u16 index)
  46. {
  47. return index <= p->indices && index > 0;
  48. }
  49. /* ------------------------- Class/flow operations ------------------------- */
  50. static int dsmark_graft(struct Qdisc *sch, unsigned long arg,
  51. struct Qdisc *new, struct Qdisc **old)
  52. {
  53. struct dsmark_qdisc_data *p = qdisc_priv(sch);
  54. pr_debug("%s(sch %p,[qdisc %p],new %p,old %p)\n",
  55. __func__, sch, p, new, old);
  56. if (new == NULL) {
  57. new = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
  58. sch->handle);
  59. if (new == NULL)
  60. new = &noop_qdisc;
  61. }
  62. sch_tree_lock(sch);
  63. *old = p->q;
  64. p->q = new;
  65. qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
  66. qdisc_reset(*old);
  67. sch_tree_unlock(sch);
  68. return 0;
  69. }
  70. static struct Qdisc *dsmark_leaf(struct Qdisc *sch, unsigned long arg)
  71. {
  72. struct dsmark_qdisc_data *p = qdisc_priv(sch);
  73. return p->q;
  74. }
  75. static unsigned long dsmark_get(struct Qdisc *sch, u32 classid)
  76. {
  77. pr_debug("%s(sch %p,[qdisc %p],classid %x)\n",
  78. __func__, sch, qdisc_priv(sch), classid);
  79. return TC_H_MIN(classid) + 1;
  80. }
  81. static unsigned long dsmark_bind_filter(struct Qdisc *sch,
  82. unsigned long parent, u32 classid)
  83. {
  84. return dsmark_get(sch, classid);
  85. }
  86. static void dsmark_put(struct Qdisc *sch, unsigned long cl)
  87. {
  88. }
  89. static const struct nla_policy dsmark_policy[TCA_DSMARK_MAX + 1] = {
  90. [TCA_DSMARK_INDICES] = { .type = NLA_U16 },
  91. [TCA_DSMARK_DEFAULT_INDEX] = { .type = NLA_U16 },
  92. [TCA_DSMARK_SET_TC_INDEX] = { .type = NLA_FLAG },
  93. [TCA_DSMARK_MASK] = { .type = NLA_U8 },
  94. [TCA_DSMARK_VALUE] = { .type = NLA_U8 },
  95. };
  96. static int dsmark_change(struct Qdisc *sch, u32 classid, u32 parent,
  97. struct nlattr **tca, unsigned long *arg)
  98. {
  99. struct dsmark_qdisc_data *p = qdisc_priv(sch);
  100. struct nlattr *opt = tca[TCA_OPTIONS];
  101. struct nlattr *tb[TCA_DSMARK_MAX + 1];
  102. int err = -EINVAL;
  103. pr_debug("%s(sch %p,[qdisc %p],classid %x,parent %x), arg 0x%lx\n",
  104. __func__, sch, p, classid, parent, *arg);
  105. if (!dsmark_valid_index(p, *arg)) {
  106. err = -ENOENT;
  107. goto errout;
  108. }
  109. if (!opt)
  110. goto errout;
  111. err = nla_parse_nested(tb, TCA_DSMARK_MAX, opt, dsmark_policy);
  112. if (err < 0)
  113. goto errout;
  114. if (tb[TCA_DSMARK_VALUE])
  115. p->mv[*arg - 1].value = nla_get_u8(tb[TCA_DSMARK_VALUE]);
  116. if (tb[TCA_DSMARK_MASK])
  117. p->mv[*arg - 1].mask = nla_get_u8(tb[TCA_DSMARK_MASK]);
  118. err = 0;
  119. errout:
  120. return err;
  121. }
  122. static int dsmark_delete(struct Qdisc *sch, unsigned long arg)
  123. {
  124. struct dsmark_qdisc_data *p = qdisc_priv(sch);
  125. if (!dsmark_valid_index(p, arg))
  126. return -EINVAL;
  127. p->mv[arg - 1].mask = 0xff;
  128. p->mv[arg - 1].value = 0;
  129. return 0;
  130. }
  131. static void dsmark_walk(struct Qdisc *sch, struct qdisc_walker *walker)
  132. {
  133. struct dsmark_qdisc_data *p = qdisc_priv(sch);
  134. int i;
  135. pr_debug("%s(sch %p,[qdisc %p],walker %p)\n",
  136. __func__, sch, p, walker);
  137. if (walker->stop)
  138. return;
  139. for (i = 0; i < p->indices; i++) {
  140. if (p->mv[i].mask == 0xff && !p->mv[i].value)
  141. goto ignore;
  142. if (walker->count >= walker->skip) {
  143. if (walker->fn(sch, i + 1, walker) < 0) {
  144. walker->stop = 1;
  145. break;
  146. }
  147. }
  148. ignore:
  149. walker->count++;
  150. }
  151. }
  152. static inline struct tcf_proto __rcu **dsmark_find_tcf(struct Qdisc *sch,
  153. unsigned long cl)
  154. {
  155. struct dsmark_qdisc_data *p = qdisc_priv(sch);
  156. return &p->filter_list;
  157. }
  158. /* --------------------------- Qdisc operations ---------------------------- */
  159. static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch)
  160. {
  161. struct dsmark_qdisc_data *p = qdisc_priv(sch);
  162. int err;
  163. pr_debug("%s(skb %p,sch %p,[qdisc %p])\n", __func__, skb, sch, p);
  164. if (p->set_tc_index) {
  165. switch (tc_skb_protocol(skb)) {
  166. case htons(ETH_P_IP):
  167. if (skb_cow_head(skb, sizeof(struct iphdr)))
  168. goto drop;
  169. skb->tc_index = ipv4_get_dsfield(ip_hdr(skb))
  170. & ~INET_ECN_MASK;
  171. break;
  172. case htons(ETH_P_IPV6):
  173. if (skb_cow_head(skb, sizeof(struct ipv6hdr)))
  174. goto drop;
  175. skb->tc_index = ipv6_get_dsfield(ipv6_hdr(skb))
  176. & ~INET_ECN_MASK;
  177. break;
  178. default:
  179. skb->tc_index = 0;
  180. break;
  181. }
  182. }
  183. if (TC_H_MAJ(skb->priority) == sch->handle)
  184. skb->tc_index = TC_H_MIN(skb->priority);
  185. else {
  186. struct tcf_result res;
  187. struct tcf_proto *fl = rcu_dereference_bh(p->filter_list);
  188. int result = tc_classify(skb, fl, &res, false);
  189. pr_debug("result %d class 0x%04x\n", result, res.classid);
  190. switch (result) {
  191. #ifdef CONFIG_NET_CLS_ACT
  192. case TC_ACT_QUEUED:
  193. case TC_ACT_STOLEN:
  194. kfree_skb(skb);
  195. return NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
  196. case TC_ACT_SHOT:
  197. goto drop;
  198. #endif
  199. case TC_ACT_OK:
  200. skb->tc_index = TC_H_MIN(res.classid);
  201. break;
  202. default:
  203. if (p->default_index != NO_DEFAULT_INDEX)
  204. skb->tc_index = p->default_index;
  205. break;
  206. }
  207. }
  208. err = qdisc_enqueue(skb, p->q);
  209. if (err != NET_XMIT_SUCCESS) {
  210. if (net_xmit_drop_count(err))
  211. qdisc_qstats_drop(sch);
  212. return err;
  213. }
  214. sch->q.qlen++;
  215. return NET_XMIT_SUCCESS;
  216. drop:
  217. qdisc_drop(skb, sch);
  218. return NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
  219. }
  220. static struct sk_buff *dsmark_dequeue(struct Qdisc *sch)
  221. {
  222. struct dsmark_qdisc_data *p = qdisc_priv(sch);
  223. struct sk_buff *skb;
  224. u32 index;
  225. pr_debug("%s(sch %p,[qdisc %p])\n", __func__, sch, p);
  226. skb = p->q->ops->dequeue(p->q);
  227. if (skb == NULL)
  228. return NULL;
  229. qdisc_bstats_update(sch, skb);
  230. sch->q.qlen--;
  231. index = skb->tc_index & (p->indices - 1);
  232. pr_debug("index %d->%d\n", skb->tc_index, index);
  233. switch (tc_skb_protocol(skb)) {
  234. case htons(ETH_P_IP):
  235. ipv4_change_dsfield(ip_hdr(skb), p->mv[index].mask,
  236. p->mv[index].value);
  237. break;
  238. case htons(ETH_P_IPV6):
  239. ipv6_change_dsfield(ipv6_hdr(skb), p->mv[index].mask,
  240. p->mv[index].value);
  241. break;
  242. default:
  243. /*
  244. * Only complain if a change was actually attempted.
  245. * This way, we can send non-IP traffic through dsmark
  246. * and don't need yet another qdisc as a bypass.
  247. */
  248. if (p->mv[index].mask != 0xff || p->mv[index].value)
  249. pr_warn("%s: unsupported protocol %d\n",
  250. __func__, ntohs(tc_skb_protocol(skb)));
  251. break;
  252. }
  253. return skb;
  254. }
  255. static struct sk_buff *dsmark_peek(struct Qdisc *sch)
  256. {
  257. struct dsmark_qdisc_data *p = qdisc_priv(sch);
  258. pr_debug("%s(sch %p,[qdisc %p])\n", __func__, sch, p);
  259. return p->q->ops->peek(p->q);
  260. }
  261. static unsigned int dsmark_drop(struct Qdisc *sch)
  262. {
  263. struct dsmark_qdisc_data *p = qdisc_priv(sch);
  264. unsigned int len;
  265. pr_debug("%s(sch %p,[qdisc %p])\n", __func__, sch, p);
  266. if (p->q->ops->drop == NULL)
  267. return 0;
  268. len = p->q->ops->drop(p->q);
  269. if (len)
  270. sch->q.qlen--;
  271. return len;
  272. }
  273. static int dsmark_init(struct Qdisc *sch, struct nlattr *opt)
  274. {
  275. struct dsmark_qdisc_data *p = qdisc_priv(sch);
  276. struct nlattr *tb[TCA_DSMARK_MAX + 1];
  277. int err = -EINVAL;
  278. u32 default_index = NO_DEFAULT_INDEX;
  279. u16 indices;
  280. int i;
  281. pr_debug("%s(sch %p,[qdisc %p],opt %p)\n", __func__, sch, p, opt);
  282. if (!opt)
  283. goto errout;
  284. err = nla_parse_nested(tb, TCA_DSMARK_MAX, opt, dsmark_policy);
  285. if (err < 0)
  286. goto errout;
  287. err = -EINVAL;
  288. indices = nla_get_u16(tb[TCA_DSMARK_INDICES]);
  289. if (hweight32(indices) != 1)
  290. goto errout;
  291. if (tb[TCA_DSMARK_DEFAULT_INDEX])
  292. default_index = nla_get_u16(tb[TCA_DSMARK_DEFAULT_INDEX]);
  293. if (indices <= DSMARK_EMBEDDED_SZ)
  294. p->mv = p->embedded;
  295. else
  296. p->mv = kmalloc_array(indices, sizeof(*p->mv), GFP_KERNEL);
  297. if (!p->mv) {
  298. err = -ENOMEM;
  299. goto errout;
  300. }
  301. for (i = 0; i < indices; i++) {
  302. p->mv[i].mask = 0xff;
  303. p->mv[i].value = 0;
  304. }
  305. p->indices = indices;
  306. p->default_index = default_index;
  307. p->set_tc_index = nla_get_flag(tb[TCA_DSMARK_SET_TC_INDEX]);
  308. p->q = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, sch->handle);
  309. if (p->q == NULL)
  310. p->q = &noop_qdisc;
  311. pr_debug("%s: qdisc %p\n", __func__, p->q);
  312. err = 0;
  313. errout:
  314. return err;
  315. }
  316. static void dsmark_reset(struct Qdisc *sch)
  317. {
  318. struct dsmark_qdisc_data *p = qdisc_priv(sch);
  319. pr_debug("%s(sch %p,[qdisc %p])\n", __func__, sch, p);
  320. qdisc_reset(p->q);
  321. sch->q.qlen = 0;
  322. }
  323. static void dsmark_destroy(struct Qdisc *sch)
  324. {
  325. struct dsmark_qdisc_data *p = qdisc_priv(sch);
  326. pr_debug("%s(sch %p,[qdisc %p])\n", __func__, sch, p);
  327. tcf_destroy_chain(&p->filter_list);
  328. qdisc_destroy(p->q);
  329. if (p->mv != p->embedded)
  330. kfree(p->mv);
  331. }
  332. static int dsmark_dump_class(struct Qdisc *sch, unsigned long cl,
  333. struct sk_buff *skb, struct tcmsg *tcm)
  334. {
  335. struct dsmark_qdisc_data *p = qdisc_priv(sch);
  336. struct nlattr *opts = NULL;
  337. pr_debug("%s(sch %p,[qdisc %p],class %ld\n", __func__, sch, p, cl);
  338. if (!dsmark_valid_index(p, cl))
  339. return -EINVAL;
  340. tcm->tcm_handle = TC_H_MAKE(TC_H_MAJ(sch->handle), cl - 1);
  341. tcm->tcm_info = p->q->handle;
  342. opts = nla_nest_start(skb, TCA_OPTIONS);
  343. if (opts == NULL)
  344. goto nla_put_failure;
  345. if (nla_put_u8(skb, TCA_DSMARK_MASK, p->mv[cl - 1].mask) ||
  346. nla_put_u8(skb, TCA_DSMARK_VALUE, p->mv[cl - 1].value))
  347. goto nla_put_failure;
  348. return nla_nest_end(skb, opts);
  349. nla_put_failure:
  350. nla_nest_cancel(skb, opts);
  351. return -EMSGSIZE;
  352. }
  353. static int dsmark_dump(struct Qdisc *sch, struct sk_buff *skb)
  354. {
  355. struct dsmark_qdisc_data *p = qdisc_priv(sch);
  356. struct nlattr *opts = NULL;
  357. opts = nla_nest_start(skb, TCA_OPTIONS);
  358. if (opts == NULL)
  359. goto nla_put_failure;
  360. if (nla_put_u16(skb, TCA_DSMARK_INDICES, p->indices))
  361. goto nla_put_failure;
  362. if (p->default_index != NO_DEFAULT_INDEX &&
  363. nla_put_u16(skb, TCA_DSMARK_DEFAULT_INDEX, p->default_index))
  364. goto nla_put_failure;
  365. if (p->set_tc_index &&
  366. nla_put_flag(skb, TCA_DSMARK_SET_TC_INDEX))
  367. goto nla_put_failure;
  368. return nla_nest_end(skb, opts);
  369. nla_put_failure:
  370. nla_nest_cancel(skb, opts);
  371. return -EMSGSIZE;
  372. }
  373. static const struct Qdisc_class_ops dsmark_class_ops = {
  374. .graft = dsmark_graft,
  375. .leaf = dsmark_leaf,
  376. .get = dsmark_get,
  377. .put = dsmark_put,
  378. .change = dsmark_change,
  379. .delete = dsmark_delete,
  380. .walk = dsmark_walk,
  381. .tcf_chain = dsmark_find_tcf,
  382. .bind_tcf = dsmark_bind_filter,
  383. .unbind_tcf = dsmark_put,
  384. .dump = dsmark_dump_class,
  385. };
  386. static struct Qdisc_ops dsmark_qdisc_ops __read_mostly = {
  387. .next = NULL,
  388. .cl_ops = &dsmark_class_ops,
  389. .id = "dsmark",
  390. .priv_size = sizeof(struct dsmark_qdisc_data),
  391. .enqueue = dsmark_enqueue,
  392. .dequeue = dsmark_dequeue,
  393. .peek = dsmark_peek,
  394. .drop = dsmark_drop,
  395. .init = dsmark_init,
  396. .reset = dsmark_reset,
  397. .destroy = dsmark_destroy,
  398. .change = NULL,
  399. .dump = dsmark_dump,
  400. .owner = THIS_MODULE,
  401. };
  402. static int __init dsmark_module_init(void)
  403. {
  404. return register_qdisc(&dsmark_qdisc_ops);
  405. }
  406. static void __exit dsmark_module_exit(void)
  407. {
  408. unregister_qdisc(&dsmark_qdisc_ops);
  409. }
  410. module_init(dsmark_module_init)
  411. module_exit(dsmark_module_exit)
  412. MODULE_LICENSE("GPL");