sch_drr.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520
  1. /*
  2. * net/sched/sch_drr.c Deficit Round Robin scheduler
  3. *
  4. * Copyright (c) 2008 Patrick McHardy <kaber@trash.net>
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public License
  8. * version 2 as published by the Free Software Foundation.
  9. */
  10. #include <linux/module.h>
  11. #include <linux/slab.h>
  12. #include <linux/init.h>
  13. #include <linux/errno.h>
  14. #include <linux/netdevice.h>
  15. #include <linux/pkt_sched.h>
  16. #include <net/sch_generic.h>
  17. #include <net/pkt_sched.h>
  18. #include <net/pkt_cls.h>
  19. struct drr_class {
  20. struct Qdisc_class_common common;
  21. unsigned int refcnt;
  22. unsigned int filter_cnt;
  23. struct gnet_stats_basic_packed bstats;
  24. struct gnet_stats_queue qstats;
  25. struct net_rate_estimator __rcu *rate_est;
  26. struct list_head alist;
  27. struct Qdisc *qdisc;
  28. u32 quantum;
  29. u32 deficit;
  30. };
  31. struct drr_sched {
  32. struct list_head active;
  33. struct tcf_proto __rcu *filter_list;
  34. struct tcf_block *block;
  35. struct Qdisc_class_hash clhash;
  36. };
  37. static struct drr_class *drr_find_class(struct Qdisc *sch, u32 classid)
  38. {
  39. struct drr_sched *q = qdisc_priv(sch);
  40. struct Qdisc_class_common *clc;
  41. clc = qdisc_class_find(&q->clhash, classid);
  42. if (clc == NULL)
  43. return NULL;
  44. return container_of(clc, struct drr_class, common);
  45. }
  46. static void drr_purge_queue(struct drr_class *cl)
  47. {
  48. unsigned int len = cl->qdisc->q.qlen;
  49. unsigned int backlog = cl->qdisc->qstats.backlog;
  50. qdisc_reset(cl->qdisc);
  51. qdisc_tree_reduce_backlog(cl->qdisc, len, backlog);
  52. }
  53. static const struct nla_policy drr_policy[TCA_DRR_MAX + 1] = {
  54. [TCA_DRR_QUANTUM] = { .type = NLA_U32 },
  55. };
  56. static int drr_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
  57. struct nlattr **tca, unsigned long *arg)
  58. {
  59. struct drr_sched *q = qdisc_priv(sch);
  60. struct drr_class *cl = (struct drr_class *)*arg;
  61. struct nlattr *opt = tca[TCA_OPTIONS];
  62. struct nlattr *tb[TCA_DRR_MAX + 1];
  63. u32 quantum;
  64. int err;
  65. if (!opt)
  66. return -EINVAL;
  67. err = nla_parse_nested(tb, TCA_DRR_MAX, opt, drr_policy, NULL);
  68. if (err < 0)
  69. return err;
  70. if (tb[TCA_DRR_QUANTUM]) {
  71. quantum = nla_get_u32(tb[TCA_DRR_QUANTUM]);
  72. if (quantum == 0)
  73. return -EINVAL;
  74. } else
  75. quantum = psched_mtu(qdisc_dev(sch));
  76. if (cl != NULL) {
  77. if (tca[TCA_RATE]) {
  78. err = gen_replace_estimator(&cl->bstats, NULL,
  79. &cl->rate_est,
  80. NULL,
  81. qdisc_root_sleeping_running(sch),
  82. tca[TCA_RATE]);
  83. if (err)
  84. return err;
  85. }
  86. sch_tree_lock(sch);
  87. if (tb[TCA_DRR_QUANTUM])
  88. cl->quantum = quantum;
  89. sch_tree_unlock(sch);
  90. return 0;
  91. }
  92. cl = kzalloc(sizeof(struct drr_class), GFP_KERNEL);
  93. if (cl == NULL)
  94. return -ENOBUFS;
  95. cl->refcnt = 1;
  96. cl->common.classid = classid;
  97. cl->quantum = quantum;
  98. cl->qdisc = qdisc_create_dflt(sch->dev_queue,
  99. &pfifo_qdisc_ops, classid);
  100. if (cl->qdisc == NULL)
  101. cl->qdisc = &noop_qdisc;
  102. else
  103. qdisc_hash_add(cl->qdisc, true);
  104. if (tca[TCA_RATE]) {
  105. err = gen_replace_estimator(&cl->bstats, NULL, &cl->rate_est,
  106. NULL,
  107. qdisc_root_sleeping_running(sch),
  108. tca[TCA_RATE]);
  109. if (err) {
  110. qdisc_destroy(cl->qdisc);
  111. kfree(cl);
  112. return err;
  113. }
  114. }
  115. sch_tree_lock(sch);
  116. qdisc_class_hash_insert(&q->clhash, &cl->common);
  117. sch_tree_unlock(sch);
  118. qdisc_class_hash_grow(sch, &q->clhash);
  119. *arg = (unsigned long)cl;
  120. return 0;
  121. }
  122. static void drr_destroy_class(struct Qdisc *sch, struct drr_class *cl)
  123. {
  124. gen_kill_estimator(&cl->rate_est);
  125. qdisc_destroy(cl->qdisc);
  126. kfree(cl);
  127. }
  128. static int drr_delete_class(struct Qdisc *sch, unsigned long arg)
  129. {
  130. struct drr_sched *q = qdisc_priv(sch);
  131. struct drr_class *cl = (struct drr_class *)arg;
  132. if (cl->filter_cnt > 0)
  133. return -EBUSY;
  134. sch_tree_lock(sch);
  135. drr_purge_queue(cl);
  136. qdisc_class_hash_remove(&q->clhash, &cl->common);
  137. BUG_ON(--cl->refcnt == 0);
  138. /*
  139. * This shouldn't happen: we "hold" one cops->get() when called
  140. * from tc_ctl_tclass; the destroy method is done from cops->put().
  141. */
  142. sch_tree_unlock(sch);
  143. return 0;
  144. }
  145. static unsigned long drr_get_class(struct Qdisc *sch, u32 classid)
  146. {
  147. struct drr_class *cl = drr_find_class(sch, classid);
  148. if (cl != NULL)
  149. cl->refcnt++;
  150. return (unsigned long)cl;
  151. }
  152. static void drr_put_class(struct Qdisc *sch, unsigned long arg)
  153. {
  154. struct drr_class *cl = (struct drr_class *)arg;
  155. if (--cl->refcnt == 0)
  156. drr_destroy_class(sch, cl);
  157. }
  158. static struct tcf_block *drr_tcf_block(struct Qdisc *sch, unsigned long cl)
  159. {
  160. struct drr_sched *q = qdisc_priv(sch);
  161. if (cl)
  162. return NULL;
  163. return q->block;
  164. }
  165. static unsigned long drr_bind_tcf(struct Qdisc *sch, unsigned long parent,
  166. u32 classid)
  167. {
  168. struct drr_class *cl = drr_find_class(sch, classid);
  169. if (cl != NULL)
  170. cl->filter_cnt++;
  171. return (unsigned long)cl;
  172. }
  173. static void drr_unbind_tcf(struct Qdisc *sch, unsigned long arg)
  174. {
  175. struct drr_class *cl = (struct drr_class *)arg;
  176. cl->filter_cnt--;
  177. }
  178. static int drr_graft_class(struct Qdisc *sch, unsigned long arg,
  179. struct Qdisc *new, struct Qdisc **old)
  180. {
  181. struct drr_class *cl = (struct drr_class *)arg;
  182. if (new == NULL) {
  183. new = qdisc_create_dflt(sch->dev_queue,
  184. &pfifo_qdisc_ops, cl->common.classid);
  185. if (new == NULL)
  186. new = &noop_qdisc;
  187. }
  188. *old = qdisc_replace(sch, new, &cl->qdisc);
  189. return 0;
  190. }
  191. static struct Qdisc *drr_class_leaf(struct Qdisc *sch, unsigned long arg)
  192. {
  193. struct drr_class *cl = (struct drr_class *)arg;
  194. return cl->qdisc;
  195. }
  196. static void drr_qlen_notify(struct Qdisc *csh, unsigned long arg)
  197. {
  198. struct drr_class *cl = (struct drr_class *)arg;
  199. if (cl->qdisc->q.qlen == 0)
  200. list_del(&cl->alist);
  201. }
  202. static int drr_dump_class(struct Qdisc *sch, unsigned long arg,
  203. struct sk_buff *skb, struct tcmsg *tcm)
  204. {
  205. struct drr_class *cl = (struct drr_class *)arg;
  206. struct nlattr *nest;
  207. tcm->tcm_parent = TC_H_ROOT;
  208. tcm->tcm_handle = cl->common.classid;
  209. tcm->tcm_info = cl->qdisc->handle;
  210. nest = nla_nest_start(skb, TCA_OPTIONS);
  211. if (nest == NULL)
  212. goto nla_put_failure;
  213. if (nla_put_u32(skb, TCA_DRR_QUANTUM, cl->quantum))
  214. goto nla_put_failure;
  215. return nla_nest_end(skb, nest);
  216. nla_put_failure:
  217. nla_nest_cancel(skb, nest);
  218. return -EMSGSIZE;
  219. }
  220. static int drr_dump_class_stats(struct Qdisc *sch, unsigned long arg,
  221. struct gnet_dump *d)
  222. {
  223. struct drr_class *cl = (struct drr_class *)arg;
  224. __u32 qlen = cl->qdisc->q.qlen;
  225. struct tc_drr_stats xstats;
  226. memset(&xstats, 0, sizeof(xstats));
  227. if (qlen)
  228. xstats.deficit = cl->deficit;
  229. if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch),
  230. d, NULL, &cl->bstats) < 0 ||
  231. gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 ||
  232. gnet_stats_copy_queue(d, NULL, &cl->qdisc->qstats, qlen) < 0)
  233. return -1;
  234. return gnet_stats_copy_app(d, &xstats, sizeof(xstats));
  235. }
  236. static void drr_walk(struct Qdisc *sch, struct qdisc_walker *arg)
  237. {
  238. struct drr_sched *q = qdisc_priv(sch);
  239. struct drr_class *cl;
  240. unsigned int i;
  241. if (arg->stop)
  242. return;
  243. for (i = 0; i < q->clhash.hashsize; i++) {
  244. hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) {
  245. if (arg->count < arg->skip) {
  246. arg->count++;
  247. continue;
  248. }
  249. if (arg->fn(sch, (unsigned long)cl, arg) < 0) {
  250. arg->stop = 1;
  251. return;
  252. }
  253. arg->count++;
  254. }
  255. }
  256. }
  257. static struct drr_class *drr_classify(struct sk_buff *skb, struct Qdisc *sch,
  258. int *qerr)
  259. {
  260. struct drr_sched *q = qdisc_priv(sch);
  261. struct drr_class *cl;
  262. struct tcf_result res;
  263. struct tcf_proto *fl;
  264. int result;
  265. if (TC_H_MAJ(skb->priority ^ sch->handle) == 0) {
  266. cl = drr_find_class(sch, skb->priority);
  267. if (cl != NULL)
  268. return cl;
  269. }
  270. *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
  271. fl = rcu_dereference_bh(q->filter_list);
  272. result = tcf_classify(skb, fl, &res, false);
  273. if (result >= 0) {
  274. #ifdef CONFIG_NET_CLS_ACT
  275. switch (result) {
  276. case TC_ACT_QUEUED:
  277. case TC_ACT_STOLEN:
  278. case TC_ACT_TRAP:
  279. *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
  280. case TC_ACT_SHOT:
  281. return NULL;
  282. }
  283. #endif
  284. cl = (struct drr_class *)res.class;
  285. if (cl == NULL)
  286. cl = drr_find_class(sch, res.classid);
  287. return cl;
  288. }
  289. return NULL;
  290. }
  291. static int drr_enqueue(struct sk_buff *skb, struct Qdisc *sch,
  292. struct sk_buff **to_free)
  293. {
  294. struct drr_sched *q = qdisc_priv(sch);
  295. struct drr_class *cl;
  296. int err = 0;
  297. cl = drr_classify(skb, sch, &err);
  298. if (cl == NULL) {
  299. if (err & __NET_XMIT_BYPASS)
  300. qdisc_qstats_drop(sch);
  301. __qdisc_drop(skb, to_free);
  302. return err;
  303. }
  304. err = qdisc_enqueue(skb, cl->qdisc, to_free);
  305. if (unlikely(err != NET_XMIT_SUCCESS)) {
  306. if (net_xmit_drop_count(err)) {
  307. cl->qstats.drops++;
  308. qdisc_qstats_drop(sch);
  309. }
  310. return err;
  311. }
  312. if (cl->qdisc->q.qlen == 1) {
  313. list_add_tail(&cl->alist, &q->active);
  314. cl->deficit = cl->quantum;
  315. }
  316. qdisc_qstats_backlog_inc(sch, skb);
  317. sch->q.qlen++;
  318. return err;
  319. }
  320. static struct sk_buff *drr_dequeue(struct Qdisc *sch)
  321. {
  322. struct drr_sched *q = qdisc_priv(sch);
  323. struct drr_class *cl;
  324. struct sk_buff *skb;
  325. unsigned int len;
  326. if (list_empty(&q->active))
  327. goto out;
  328. while (1) {
  329. cl = list_first_entry(&q->active, struct drr_class, alist);
  330. skb = cl->qdisc->ops->peek(cl->qdisc);
  331. if (skb == NULL) {
  332. qdisc_warn_nonwc(__func__, cl->qdisc);
  333. goto out;
  334. }
  335. len = qdisc_pkt_len(skb);
  336. if (len <= cl->deficit) {
  337. cl->deficit -= len;
  338. skb = qdisc_dequeue_peeked(cl->qdisc);
  339. if (unlikely(skb == NULL))
  340. goto out;
  341. if (cl->qdisc->q.qlen == 0)
  342. list_del(&cl->alist);
  343. bstats_update(&cl->bstats, skb);
  344. qdisc_bstats_update(sch, skb);
  345. qdisc_qstats_backlog_dec(sch, skb);
  346. sch->q.qlen--;
  347. return skb;
  348. }
  349. cl->deficit += cl->quantum;
  350. list_move_tail(&cl->alist, &q->active);
  351. }
  352. out:
  353. return NULL;
  354. }
  355. static int drr_init_qdisc(struct Qdisc *sch, struct nlattr *opt)
  356. {
  357. struct drr_sched *q = qdisc_priv(sch);
  358. int err;
  359. err = tcf_block_get(&q->block, &q->filter_list);
  360. if (err)
  361. return err;
  362. err = qdisc_class_hash_init(&q->clhash);
  363. if (err < 0)
  364. return err;
  365. INIT_LIST_HEAD(&q->active);
  366. return 0;
  367. }
  368. static void drr_reset_qdisc(struct Qdisc *sch)
  369. {
  370. struct drr_sched *q = qdisc_priv(sch);
  371. struct drr_class *cl;
  372. unsigned int i;
  373. for (i = 0; i < q->clhash.hashsize; i++) {
  374. hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) {
  375. if (cl->qdisc->q.qlen)
  376. list_del(&cl->alist);
  377. qdisc_reset(cl->qdisc);
  378. }
  379. }
  380. sch->qstats.backlog = 0;
  381. sch->q.qlen = 0;
  382. }
  383. static void drr_destroy_qdisc(struct Qdisc *sch)
  384. {
  385. struct drr_sched *q = qdisc_priv(sch);
  386. struct drr_class *cl;
  387. struct hlist_node *next;
  388. unsigned int i;
  389. tcf_block_put(q->block);
  390. for (i = 0; i < q->clhash.hashsize; i++) {
  391. hlist_for_each_entry_safe(cl, next, &q->clhash.hash[i],
  392. common.hnode)
  393. drr_destroy_class(sch, cl);
  394. }
  395. qdisc_class_hash_destroy(&q->clhash);
  396. }
  397. static const struct Qdisc_class_ops drr_class_ops = {
  398. .change = drr_change_class,
  399. .delete = drr_delete_class,
  400. .get = drr_get_class,
  401. .put = drr_put_class,
  402. .tcf_block = drr_tcf_block,
  403. .bind_tcf = drr_bind_tcf,
  404. .unbind_tcf = drr_unbind_tcf,
  405. .graft = drr_graft_class,
  406. .leaf = drr_class_leaf,
  407. .qlen_notify = drr_qlen_notify,
  408. .dump = drr_dump_class,
  409. .dump_stats = drr_dump_class_stats,
  410. .walk = drr_walk,
  411. };
  412. static struct Qdisc_ops drr_qdisc_ops __read_mostly = {
  413. .cl_ops = &drr_class_ops,
  414. .id = "drr",
  415. .priv_size = sizeof(struct drr_sched),
  416. .enqueue = drr_enqueue,
  417. .dequeue = drr_dequeue,
  418. .peek = qdisc_peek_dequeued,
  419. .init = drr_init_qdisc,
  420. .reset = drr_reset_qdisc,
  421. .destroy = drr_destroy_qdisc,
  422. .owner = THIS_MODULE,
  423. };
  424. static int __init drr_init(void)
  425. {
  426. return register_qdisc(&drr_qdisc_ops);
  427. }
  428. static void __exit drr_exit(void)
  429. {
  430. unregister_qdisc(&drr_qdisc_ops);
  431. }
  432. module_init(drr_init);
  433. module_exit(drr_exit);
  434. MODULE_LICENSE("GPL");