sch_mqprio.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436
  1. /*
  2. * net/sched/sch_mqprio.c
  3. *
  4. * Copyright (c) 2010 John Fastabend <john.r.fastabend@intel.com>
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public License
  8. * version 2 as published by the Free Software Foundation.
  9. */
  10. #include <linux/types.h>
  11. #include <linux/slab.h>
  12. #include <linux/kernel.h>
  13. #include <linux/string.h>
  14. #include <linux/errno.h>
  15. #include <linux/skbuff.h>
  16. #include <linux/module.h>
  17. #include <net/netlink.h>
  18. #include <net/pkt_sched.h>
  19. #include <net/sch_generic.h>
  20. struct mqprio_sched {
  21. struct Qdisc **qdiscs;
  22. int hw_owned;
  23. };
  24. static void mqprio_destroy(struct Qdisc *sch)
  25. {
  26. struct net_device *dev = qdisc_dev(sch);
  27. struct mqprio_sched *priv = qdisc_priv(sch);
  28. struct tc_to_netdev tc = {.type = TC_SETUP_MQPRIO};
  29. unsigned int ntx;
  30. if (priv->qdiscs) {
  31. for (ntx = 0;
  32. ntx < dev->num_tx_queues && priv->qdiscs[ntx];
  33. ntx++)
  34. qdisc_destroy(priv->qdiscs[ntx]);
  35. kfree(priv->qdiscs);
  36. }
  37. if (priv->hw_owned && dev->netdev_ops->ndo_setup_tc)
  38. dev->netdev_ops->ndo_setup_tc(dev, sch->handle, 0, &tc);
  39. else
  40. netdev_set_num_tc(dev, 0);
  41. }
  42. static int mqprio_parse_opt(struct net_device *dev, struct tc_mqprio_qopt *qopt)
  43. {
  44. int i, j;
  45. /* Verify num_tc is not out of max range */
  46. if (qopt->num_tc > TC_MAX_QUEUE)
  47. return -EINVAL;
  48. /* Verify priority mapping uses valid tcs */
  49. for (i = 0; i < TC_BITMASK + 1; i++) {
  50. if (qopt->prio_tc_map[i] >= qopt->num_tc)
  51. return -EINVAL;
  52. }
  53. /* net_device does not support requested operation */
  54. if (qopt->hw && !dev->netdev_ops->ndo_setup_tc)
  55. return -EINVAL;
  56. /* if hw owned qcount and qoffset are taken from LLD so
  57. * no reason to verify them here
  58. */
  59. if (qopt->hw)
  60. return 0;
  61. for (i = 0; i < qopt->num_tc; i++) {
  62. unsigned int last = qopt->offset[i] + qopt->count[i];
  63. /* Verify the queue count is in tx range being equal to the
  64. * real_num_tx_queues indicates the last queue is in use.
  65. */
  66. if (qopt->offset[i] >= dev->real_num_tx_queues ||
  67. !qopt->count[i] ||
  68. last > dev->real_num_tx_queues)
  69. return -EINVAL;
  70. /* Verify that the offset and counts do not overlap */
  71. for (j = i + 1; j < qopt->num_tc; j++) {
  72. if (last > qopt->offset[j])
  73. return -EINVAL;
  74. }
  75. }
  76. return 0;
  77. }
  78. static int mqprio_init(struct Qdisc *sch, struct nlattr *opt)
  79. {
  80. struct net_device *dev = qdisc_dev(sch);
  81. struct mqprio_sched *priv = qdisc_priv(sch);
  82. struct netdev_queue *dev_queue;
  83. struct Qdisc *qdisc;
  84. int i, err = -EOPNOTSUPP;
  85. struct tc_mqprio_qopt *qopt = NULL;
  86. BUILD_BUG_ON(TC_MAX_QUEUE != TC_QOPT_MAX_QUEUE);
  87. BUILD_BUG_ON(TC_BITMASK != TC_QOPT_BITMASK);
  88. if (sch->parent != TC_H_ROOT)
  89. return -EOPNOTSUPP;
  90. if (!netif_is_multiqueue(dev))
  91. return -EOPNOTSUPP;
  92. if (!opt || nla_len(opt) < sizeof(*qopt))
  93. return -EINVAL;
  94. qopt = nla_data(opt);
  95. if (mqprio_parse_opt(dev, qopt))
  96. return -EINVAL;
  97. /* pre-allocate qdisc, attachment can't fail */
  98. priv->qdiscs = kcalloc(dev->num_tx_queues, sizeof(priv->qdiscs[0]),
  99. GFP_KERNEL);
  100. if (priv->qdiscs == NULL) {
  101. err = -ENOMEM;
  102. goto err;
  103. }
  104. for (i = 0; i < dev->num_tx_queues; i++) {
  105. dev_queue = netdev_get_tx_queue(dev, i);
  106. qdisc = qdisc_create_dflt(dev_queue,
  107. get_default_qdisc_ops(dev, i),
  108. TC_H_MAKE(TC_H_MAJ(sch->handle),
  109. TC_H_MIN(i + 1)));
  110. if (qdisc == NULL) {
  111. err = -ENOMEM;
  112. goto err;
  113. }
  114. priv->qdiscs[i] = qdisc;
  115. qdisc->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
  116. }
  117. /* If the mqprio options indicate that hardware should own
  118. * the queue mapping then run ndo_setup_tc otherwise use the
  119. * supplied and verified mapping
  120. */
  121. if (qopt->hw) {
  122. struct tc_to_netdev tc = {.type = TC_SETUP_MQPRIO,
  123. { .tc = qopt->num_tc }};
  124. priv->hw_owned = 1;
  125. err = dev->netdev_ops->ndo_setup_tc(dev, sch->handle, 0, &tc);
  126. if (err)
  127. goto err;
  128. } else {
  129. netdev_set_num_tc(dev, qopt->num_tc);
  130. for (i = 0; i < qopt->num_tc; i++)
  131. netdev_set_tc_queue(dev, i,
  132. qopt->count[i], qopt->offset[i]);
  133. }
  134. /* Always use supplied priority mappings */
  135. for (i = 0; i < TC_BITMASK + 1; i++)
  136. netdev_set_prio_tc_map(dev, i, qopt->prio_tc_map[i]);
  137. sch->flags |= TCQ_F_MQROOT;
  138. return 0;
  139. err:
  140. mqprio_destroy(sch);
  141. return err;
  142. }
  143. static void mqprio_attach(struct Qdisc *sch)
  144. {
  145. struct net_device *dev = qdisc_dev(sch);
  146. struct mqprio_sched *priv = qdisc_priv(sch);
  147. struct Qdisc *qdisc, *old;
  148. unsigned int ntx;
  149. /* Attach underlying qdisc */
  150. for (ntx = 0; ntx < dev->num_tx_queues; ntx++) {
  151. qdisc = priv->qdiscs[ntx];
  152. old = dev_graft_qdisc(qdisc->dev_queue, qdisc);
  153. if (old)
  154. qdisc_destroy(old);
  155. if (ntx < dev->real_num_tx_queues)
  156. qdisc_hash_add(qdisc);
  157. }
  158. kfree(priv->qdiscs);
  159. priv->qdiscs = NULL;
  160. }
  161. static struct netdev_queue *mqprio_queue_get(struct Qdisc *sch,
  162. unsigned long cl)
  163. {
  164. struct net_device *dev = qdisc_dev(sch);
  165. unsigned long ntx = cl - 1 - netdev_get_num_tc(dev);
  166. if (ntx >= dev->num_tx_queues)
  167. return NULL;
  168. return netdev_get_tx_queue(dev, ntx);
  169. }
  170. static int mqprio_graft(struct Qdisc *sch, unsigned long cl, struct Qdisc *new,
  171. struct Qdisc **old)
  172. {
  173. struct net_device *dev = qdisc_dev(sch);
  174. struct netdev_queue *dev_queue = mqprio_queue_get(sch, cl);
  175. if (!dev_queue)
  176. return -EINVAL;
  177. if (dev->flags & IFF_UP)
  178. dev_deactivate(dev);
  179. *old = dev_graft_qdisc(dev_queue, new);
  180. if (new)
  181. new->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
  182. if (dev->flags & IFF_UP)
  183. dev_activate(dev);
  184. return 0;
  185. }
  186. static int mqprio_dump(struct Qdisc *sch, struct sk_buff *skb)
  187. {
  188. struct net_device *dev = qdisc_dev(sch);
  189. struct mqprio_sched *priv = qdisc_priv(sch);
  190. unsigned char *b = skb_tail_pointer(skb);
  191. struct tc_mqprio_qopt opt = { 0 };
  192. struct Qdisc *qdisc;
  193. unsigned int i;
  194. sch->q.qlen = 0;
  195. memset(&sch->bstats, 0, sizeof(sch->bstats));
  196. memset(&sch->qstats, 0, sizeof(sch->qstats));
  197. for (i = 0; i < dev->num_tx_queues; i++) {
  198. qdisc = rtnl_dereference(netdev_get_tx_queue(dev, i)->qdisc);
  199. spin_lock_bh(qdisc_lock(qdisc));
  200. sch->q.qlen += qdisc->q.qlen;
  201. sch->bstats.bytes += qdisc->bstats.bytes;
  202. sch->bstats.packets += qdisc->bstats.packets;
  203. sch->qstats.backlog += qdisc->qstats.backlog;
  204. sch->qstats.drops += qdisc->qstats.drops;
  205. sch->qstats.requeues += qdisc->qstats.requeues;
  206. sch->qstats.overlimits += qdisc->qstats.overlimits;
  207. spin_unlock_bh(qdisc_lock(qdisc));
  208. }
  209. opt.num_tc = netdev_get_num_tc(dev);
  210. memcpy(opt.prio_tc_map, dev->prio_tc_map, sizeof(opt.prio_tc_map));
  211. opt.hw = priv->hw_owned;
  212. for (i = 0; i < netdev_get_num_tc(dev); i++) {
  213. opt.count[i] = dev->tc_to_txq[i].count;
  214. opt.offset[i] = dev->tc_to_txq[i].offset;
  215. }
  216. if (nla_put(skb, TCA_OPTIONS, sizeof(opt), &opt))
  217. goto nla_put_failure;
  218. return skb->len;
  219. nla_put_failure:
  220. nlmsg_trim(skb, b);
  221. return -1;
  222. }
  223. static struct Qdisc *mqprio_leaf(struct Qdisc *sch, unsigned long cl)
  224. {
  225. struct netdev_queue *dev_queue = mqprio_queue_get(sch, cl);
  226. if (!dev_queue)
  227. return NULL;
  228. return dev_queue->qdisc_sleeping;
  229. }
  230. static unsigned long mqprio_get(struct Qdisc *sch, u32 classid)
  231. {
  232. struct net_device *dev = qdisc_dev(sch);
  233. unsigned int ntx = TC_H_MIN(classid);
  234. if (ntx > dev->num_tx_queues + netdev_get_num_tc(dev))
  235. return 0;
  236. return ntx;
  237. }
  238. static void mqprio_put(struct Qdisc *sch, unsigned long cl)
  239. {
  240. }
  241. static int mqprio_dump_class(struct Qdisc *sch, unsigned long cl,
  242. struct sk_buff *skb, struct tcmsg *tcm)
  243. {
  244. struct net_device *dev = qdisc_dev(sch);
  245. if (cl <= netdev_get_num_tc(dev)) {
  246. tcm->tcm_parent = TC_H_ROOT;
  247. tcm->tcm_info = 0;
  248. } else {
  249. int i;
  250. struct netdev_queue *dev_queue;
  251. dev_queue = mqprio_queue_get(sch, cl);
  252. tcm->tcm_parent = 0;
  253. for (i = 0; i < netdev_get_num_tc(dev); i++) {
  254. struct netdev_tc_txq tc = dev->tc_to_txq[i];
  255. int q_idx = cl - netdev_get_num_tc(dev);
  256. if (q_idx > tc.offset &&
  257. q_idx <= tc.offset + tc.count) {
  258. tcm->tcm_parent =
  259. TC_H_MAKE(TC_H_MAJ(sch->handle),
  260. TC_H_MIN(i + 1));
  261. break;
  262. }
  263. }
  264. tcm->tcm_info = dev_queue->qdisc_sleeping->handle;
  265. }
  266. tcm->tcm_handle |= TC_H_MIN(cl);
  267. return 0;
  268. }
  269. static int mqprio_dump_class_stats(struct Qdisc *sch, unsigned long cl,
  270. struct gnet_dump *d)
  271. __releases(d->lock)
  272. __acquires(d->lock)
  273. {
  274. struct net_device *dev = qdisc_dev(sch);
  275. if (cl <= netdev_get_num_tc(dev)) {
  276. int i;
  277. __u32 qlen = 0;
  278. struct Qdisc *qdisc;
  279. struct gnet_stats_queue qstats = {0};
  280. struct gnet_stats_basic_packed bstats = {0};
  281. struct netdev_tc_txq tc = dev->tc_to_txq[cl - 1];
  282. /* Drop lock here it will be reclaimed before touching
  283. * statistics this is required because the d->lock we
  284. * hold here is the look on dev_queue->qdisc_sleeping
  285. * also acquired below.
  286. */
  287. if (d->lock)
  288. spin_unlock_bh(d->lock);
  289. for (i = tc.offset; i < tc.offset + tc.count; i++) {
  290. struct netdev_queue *q = netdev_get_tx_queue(dev, i);
  291. qdisc = rtnl_dereference(q->qdisc);
  292. spin_lock_bh(qdisc_lock(qdisc));
  293. qlen += qdisc->q.qlen;
  294. bstats.bytes += qdisc->bstats.bytes;
  295. bstats.packets += qdisc->bstats.packets;
  296. qstats.backlog += qdisc->qstats.backlog;
  297. qstats.drops += qdisc->qstats.drops;
  298. qstats.requeues += qdisc->qstats.requeues;
  299. qstats.overlimits += qdisc->qstats.overlimits;
  300. spin_unlock_bh(qdisc_lock(qdisc));
  301. }
  302. /* Reclaim root sleeping lock before completing stats */
  303. if (d->lock)
  304. spin_lock_bh(d->lock);
  305. if (gnet_stats_copy_basic(NULL, d, NULL, &bstats) < 0 ||
  306. gnet_stats_copy_queue(d, NULL, &qstats, qlen) < 0)
  307. return -1;
  308. } else {
  309. struct netdev_queue *dev_queue = mqprio_queue_get(sch, cl);
  310. sch = dev_queue->qdisc_sleeping;
  311. if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch),
  312. d, NULL, &sch->bstats) < 0 ||
  313. gnet_stats_copy_queue(d, NULL,
  314. &sch->qstats, sch->q.qlen) < 0)
  315. return -1;
  316. }
  317. return 0;
  318. }
  319. static void mqprio_walk(struct Qdisc *sch, struct qdisc_walker *arg)
  320. {
  321. struct net_device *dev = qdisc_dev(sch);
  322. unsigned long ntx;
  323. if (arg->stop)
  324. return;
  325. /* Walk hierarchy with a virtual class per tc */
  326. arg->count = arg->skip;
  327. for (ntx = arg->skip;
  328. ntx < dev->num_tx_queues + netdev_get_num_tc(dev);
  329. ntx++) {
  330. if (arg->fn(sch, ntx + 1, arg) < 0) {
  331. arg->stop = 1;
  332. break;
  333. }
  334. arg->count++;
  335. }
  336. }
  337. static const struct Qdisc_class_ops mqprio_class_ops = {
  338. .graft = mqprio_graft,
  339. .leaf = mqprio_leaf,
  340. .get = mqprio_get,
  341. .put = mqprio_put,
  342. .walk = mqprio_walk,
  343. .dump = mqprio_dump_class,
  344. .dump_stats = mqprio_dump_class_stats,
  345. };
  346. static struct Qdisc_ops mqprio_qdisc_ops __read_mostly = {
  347. .cl_ops = &mqprio_class_ops,
  348. .id = "mqprio",
  349. .priv_size = sizeof(struct mqprio_sched),
  350. .init = mqprio_init,
  351. .destroy = mqprio_destroy,
  352. .attach = mqprio_attach,
  353. .dump = mqprio_dump,
  354. .owner = THIS_MODULE,
  355. };
  356. static int __init mqprio_module_init(void)
  357. {
  358. return register_qdisc(&mqprio_qdisc_ops);
  359. }
  360. static void __exit mqprio_module_exit(void)
  361. {
  362. unregister_qdisc(&mqprio_qdisc_ops);
  363. }
  364. module_init(mqprio_module_init);
  365. module_exit(mqprio_module_exit);
  366. MODULE_LICENSE("GPL");