sch_generic.c 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984
  1. /*
  2. * net/sched/sch_generic.c Generic packet scheduler routines.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public License
  6. * as published by the Free Software Foundation; either version
  7. * 2 of the License, or (at your option) any later version.
  8. *
  9. * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
  10. * Jamal Hadi Salim, <hadi@cyberus.ca> 990601
  11. * - Ingress support
  12. */
  13. #include <linux/bitops.h>
  14. #include <linux/module.h>
  15. #include <linux/types.h>
  16. #include <linux/kernel.h>
  17. #include <linux/sched.h>
  18. #include <linux/string.h>
  19. #include <linux/errno.h>
  20. #include <linux/netdevice.h>
  21. #include <linux/skbuff.h>
  22. #include <linux/rtnetlink.h>
  23. #include <linux/init.h>
  24. #include <linux/rcupdate.h>
  25. #include <linux/list.h>
  26. #include <linux/slab.h>
  27. #include <linux/if_vlan.h>
  28. #include <net/sch_generic.h>
  29. #include <net/pkt_sched.h>
  30. #include <net/dst.h>
  31. /* Qdisc to use by default */
  32. const struct Qdisc_ops *default_qdisc_ops = &pfifo_fast_ops;
  33. EXPORT_SYMBOL(default_qdisc_ops);
  34. /* Main transmission queue. */
  35. /* Modifications to data participating in scheduling must be protected with
  36. * qdisc_lock(qdisc) spinlock.
  37. *
  38. * The idea is the following:
  39. * - enqueue, dequeue are serialized via qdisc root lock
  40. * - ingress filtering is also serialized via qdisc root lock
  41. * - updates to tree and tree walking are only done under the rtnl mutex.
  42. */
  43. static inline int dev_requeue_skb(struct sk_buff *skb, struct Qdisc *q)
  44. {
  45. q->gso_skb = skb;
  46. q->qstats.requeues++;
  47. q->q.qlen++; /* it's still part of the queue */
  48. __netif_schedule(q);
  49. return 0;
  50. }
  51. static void try_bulk_dequeue_skb(struct Qdisc *q,
  52. struct sk_buff *skb,
  53. const struct netdev_queue *txq,
  54. int *packets)
  55. {
  56. int bytelimit = qdisc_avail_bulklimit(txq) - skb->len;
  57. while (bytelimit > 0) {
  58. struct sk_buff *nskb = q->dequeue(q);
  59. if (!nskb)
  60. break;
  61. bytelimit -= nskb->len; /* covers GSO len */
  62. skb->next = nskb;
  63. skb = nskb;
  64. (*packets)++; /* GSO counts as one pkt */
  65. }
  66. skb->next = NULL;
  67. }
  68. /* Note that dequeue_skb can possibly return a SKB list (via skb->next).
  69. * A requeued skb (via q->gso_skb) can also be a SKB list.
  70. */
  71. static struct sk_buff *dequeue_skb(struct Qdisc *q, bool *validate,
  72. int *packets)
  73. {
  74. struct sk_buff *skb = q->gso_skb;
  75. const struct netdev_queue *txq = q->dev_queue;
  76. *packets = 1;
  77. *validate = true;
  78. if (unlikely(skb)) {
  79. /* check the reason of requeuing without tx lock first */
  80. txq = skb_get_tx_queue(txq->dev, skb);
  81. if (!netif_xmit_frozen_or_stopped(txq)) {
  82. q->gso_skb = NULL;
  83. q->q.qlen--;
  84. } else
  85. skb = NULL;
  86. /* skb in gso_skb were already validated */
  87. *validate = false;
  88. } else {
  89. if (!(q->flags & TCQ_F_ONETXQUEUE) ||
  90. !netif_xmit_frozen_or_stopped(txq)) {
  91. skb = q->dequeue(q);
  92. if (skb && qdisc_may_bulk(q))
  93. try_bulk_dequeue_skb(q, skb, txq, packets);
  94. }
  95. }
  96. return skb;
  97. }
  98. static inline int handle_dev_cpu_collision(struct sk_buff *skb,
  99. struct netdev_queue *dev_queue,
  100. struct Qdisc *q)
  101. {
  102. int ret;
  103. if (unlikely(dev_queue->xmit_lock_owner == smp_processor_id())) {
  104. /*
  105. * Same CPU holding the lock. It may be a transient
  106. * configuration error, when hard_start_xmit() recurses. We
  107. * detect it by checking xmit owner and drop the packet when
  108. * deadloop is detected. Return OK to try the next skb.
  109. */
  110. kfree_skb_list(skb);
  111. net_warn_ratelimited("Dead loop on netdevice %s, fix it urgently!\n",
  112. dev_queue->dev->name);
  113. ret = qdisc_qlen(q);
  114. } else {
  115. /*
  116. * Another cpu is holding lock, requeue & delay xmits for
  117. * some time.
  118. */
  119. __this_cpu_inc(softnet_data.cpu_collision);
  120. ret = dev_requeue_skb(skb, q);
  121. }
  122. return ret;
  123. }
  124. /*
  125. * Transmit possibly several skbs, and handle the return status as
  126. * required. Holding the __QDISC___STATE_RUNNING bit guarantees that
  127. * only one CPU can execute this function.
  128. *
  129. * Returns to the caller:
  130. * 0 - queue is empty or throttled.
  131. * >0 - queue is not empty.
  132. */
  133. int sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q,
  134. struct net_device *dev, struct netdev_queue *txq,
  135. spinlock_t *root_lock, bool validate)
  136. {
  137. int ret = NETDEV_TX_BUSY;
  138. /* And release qdisc */
  139. spin_unlock(root_lock);
  140. /* Note that we validate skb (GSO, checksum, ...) outside of locks */
  141. if (validate)
  142. skb = validate_xmit_skb_list(skb, dev);
  143. if (skb) {
  144. HARD_TX_LOCK(dev, txq, smp_processor_id());
  145. if (!netif_xmit_frozen_or_stopped(txq))
  146. skb = dev_hard_start_xmit(skb, dev, txq, &ret);
  147. HARD_TX_UNLOCK(dev, txq);
  148. }
  149. spin_lock(root_lock);
  150. if (dev_xmit_complete(ret)) {
  151. /* Driver sent out skb successfully or skb was consumed */
  152. ret = qdisc_qlen(q);
  153. } else if (ret == NETDEV_TX_LOCKED) {
  154. /* Driver try lock failed */
  155. ret = handle_dev_cpu_collision(skb, txq, q);
  156. } else {
  157. /* Driver returned NETDEV_TX_BUSY - requeue skb */
  158. if (unlikely(ret != NETDEV_TX_BUSY))
  159. net_warn_ratelimited("BUG %s code %d qlen %d\n",
  160. dev->name, ret, q->q.qlen);
  161. ret = dev_requeue_skb(skb, q);
  162. }
  163. if (ret && netif_xmit_frozen_or_stopped(txq))
  164. ret = 0;
  165. return ret;
  166. }
  167. /*
  168. * NOTE: Called under qdisc_lock(q) with locally disabled BH.
  169. *
  170. * __QDISC___STATE_RUNNING guarantees only one CPU can process
  171. * this qdisc at a time. qdisc_lock(q) serializes queue accesses for
  172. * this queue.
  173. *
  174. * netif_tx_lock serializes accesses to device driver.
  175. *
  176. * qdisc_lock(q) and netif_tx_lock are mutually exclusive,
  177. * if one is grabbed, another must be free.
  178. *
  179. * Note, that this procedure can be called by a watchdog timer
  180. *
  181. * Returns to the caller:
  182. * 0 - queue is empty or throttled.
  183. * >0 - queue is not empty.
  184. *
  185. */
  186. static inline int qdisc_restart(struct Qdisc *q, int *packets)
  187. {
  188. struct netdev_queue *txq;
  189. struct net_device *dev;
  190. spinlock_t *root_lock;
  191. struct sk_buff *skb;
  192. bool validate;
  193. /* Dequeue packet */
  194. skb = dequeue_skb(q, &validate, packets);
  195. if (unlikely(!skb))
  196. return 0;
  197. root_lock = qdisc_lock(q);
  198. dev = qdisc_dev(q);
  199. txq = skb_get_tx_queue(dev, skb);
  200. return sch_direct_xmit(skb, q, dev, txq, root_lock, validate);
  201. }
  202. void __qdisc_run(struct Qdisc *q)
  203. {
  204. int quota = weight_p;
  205. int packets;
  206. while (qdisc_restart(q, &packets)) {
  207. /*
  208. * Ordered by possible occurrence: Postpone processing if
  209. * 1. we've exceeded packet quota
  210. * 2. another process needs the CPU;
  211. */
  212. quota -= packets;
  213. if (quota <= 0 || need_resched()) {
  214. __netif_schedule(q);
  215. break;
  216. }
  217. }
  218. qdisc_run_end(q);
  219. }
  220. unsigned long dev_trans_start(struct net_device *dev)
  221. {
  222. unsigned long val, res;
  223. unsigned int i;
  224. if (is_vlan_dev(dev))
  225. dev = vlan_dev_real_dev(dev);
  226. res = dev->trans_start;
  227. for (i = 0; i < dev->num_tx_queues; i++) {
  228. val = netdev_get_tx_queue(dev, i)->trans_start;
  229. if (val && time_after(val, res))
  230. res = val;
  231. }
  232. dev->trans_start = res;
  233. return res;
  234. }
  235. EXPORT_SYMBOL(dev_trans_start);
  236. static void dev_watchdog(unsigned long arg)
  237. {
  238. struct net_device *dev = (struct net_device *)arg;
  239. netif_tx_lock(dev);
  240. if (!qdisc_tx_is_noop(dev)) {
  241. if (netif_device_present(dev) &&
  242. netif_running(dev) &&
  243. netif_carrier_ok(dev)) {
  244. int some_queue_timedout = 0;
  245. unsigned int i;
  246. unsigned long trans_start;
  247. for (i = 0; i < dev->num_tx_queues; i++) {
  248. struct netdev_queue *txq;
  249. txq = netdev_get_tx_queue(dev, i);
  250. /*
  251. * old device drivers set dev->trans_start
  252. */
  253. trans_start = txq->trans_start ? : dev->trans_start;
  254. if (netif_xmit_stopped(txq) &&
  255. time_after(jiffies, (trans_start +
  256. dev->watchdog_timeo))) {
  257. some_queue_timedout = 1;
  258. txq->trans_timeout++;
  259. break;
  260. }
  261. }
  262. if (some_queue_timedout) {
  263. WARN_ONCE(1, KERN_INFO "NETDEV WATCHDOG: %s (%s): transmit queue %u timed out\n",
  264. dev->name, netdev_drivername(dev), i);
  265. dev->netdev_ops->ndo_tx_timeout(dev);
  266. }
  267. if (!mod_timer(&dev->watchdog_timer,
  268. round_jiffies(jiffies +
  269. dev->watchdog_timeo)))
  270. dev_hold(dev);
  271. }
  272. }
  273. netif_tx_unlock(dev);
  274. dev_put(dev);
  275. }
  276. void __netdev_watchdog_up(struct net_device *dev)
  277. {
  278. if (dev->netdev_ops->ndo_tx_timeout) {
  279. if (dev->watchdog_timeo <= 0)
  280. dev->watchdog_timeo = 5*HZ;
  281. if (!mod_timer(&dev->watchdog_timer,
  282. round_jiffies(jiffies + dev->watchdog_timeo)))
  283. dev_hold(dev);
  284. }
  285. }
  286. static void dev_watchdog_up(struct net_device *dev)
  287. {
  288. __netdev_watchdog_up(dev);
  289. }
  290. static void dev_watchdog_down(struct net_device *dev)
  291. {
  292. netif_tx_lock_bh(dev);
  293. if (del_timer(&dev->watchdog_timer))
  294. dev_put(dev);
  295. netif_tx_unlock_bh(dev);
  296. }
  297. /**
  298. * netif_carrier_on - set carrier
  299. * @dev: network device
  300. *
  301. * Device has detected that carrier.
  302. */
  303. void netif_carrier_on(struct net_device *dev)
  304. {
  305. if (test_and_clear_bit(__LINK_STATE_NOCARRIER, &dev->state)) {
  306. if (dev->reg_state == NETREG_UNINITIALIZED)
  307. return;
  308. atomic_inc(&dev->carrier_changes);
  309. linkwatch_fire_event(dev);
  310. if (netif_running(dev))
  311. __netdev_watchdog_up(dev);
  312. }
  313. }
  314. EXPORT_SYMBOL(netif_carrier_on);
  315. /**
  316. * netif_carrier_off - clear carrier
  317. * @dev: network device
  318. *
  319. * Device has detected loss of carrier.
  320. */
  321. void netif_carrier_off(struct net_device *dev)
  322. {
  323. if (!test_and_set_bit(__LINK_STATE_NOCARRIER, &dev->state)) {
  324. if (dev->reg_state == NETREG_UNINITIALIZED)
  325. return;
  326. atomic_inc(&dev->carrier_changes);
  327. linkwatch_fire_event(dev);
  328. }
  329. }
  330. EXPORT_SYMBOL(netif_carrier_off);
  331. /* "NOOP" scheduler: the best scheduler, recommended for all interfaces
  332. under all circumstances. It is difficult to invent anything faster or
  333. cheaper.
  334. */
  335. static int noop_enqueue(struct sk_buff *skb, struct Qdisc *qdisc)
  336. {
  337. kfree_skb(skb);
  338. return NET_XMIT_CN;
  339. }
  340. static struct sk_buff *noop_dequeue(struct Qdisc *qdisc)
  341. {
  342. return NULL;
  343. }
  344. struct Qdisc_ops noop_qdisc_ops __read_mostly = {
  345. .id = "noop",
  346. .priv_size = 0,
  347. .enqueue = noop_enqueue,
  348. .dequeue = noop_dequeue,
  349. .peek = noop_dequeue,
  350. .owner = THIS_MODULE,
  351. };
  352. static struct netdev_queue noop_netdev_queue = {
  353. .qdisc = &noop_qdisc,
  354. .qdisc_sleeping = &noop_qdisc,
  355. };
  356. struct Qdisc noop_qdisc = {
  357. .enqueue = noop_enqueue,
  358. .dequeue = noop_dequeue,
  359. .flags = TCQ_F_BUILTIN,
  360. .ops = &noop_qdisc_ops,
  361. .list = LIST_HEAD_INIT(noop_qdisc.list),
  362. .q.lock = __SPIN_LOCK_UNLOCKED(noop_qdisc.q.lock),
  363. .dev_queue = &noop_netdev_queue,
  364. .busylock = __SPIN_LOCK_UNLOCKED(noop_qdisc.busylock),
  365. };
  366. EXPORT_SYMBOL(noop_qdisc);
  367. static int noqueue_init(struct Qdisc *qdisc, struct nlattr *opt)
  368. {
  369. /* register_qdisc() assigns a default of noop_enqueue if unset,
  370. * but __dev_queue_xmit() treats noqueue only as such
  371. * if this is NULL - so clear it here. */
  372. qdisc->enqueue = NULL;
  373. return 0;
  374. }
  375. struct Qdisc_ops noqueue_qdisc_ops __read_mostly = {
  376. .id = "noqueue",
  377. .priv_size = 0,
  378. .init = noqueue_init,
  379. .enqueue = noop_enqueue,
  380. .dequeue = noop_dequeue,
  381. .peek = noop_dequeue,
  382. .owner = THIS_MODULE,
  383. };
  384. static const u8 prio2band[TC_PRIO_MAX + 1] = {
  385. 1, 2, 2, 2, 1, 2, 0, 0 , 1, 1, 1, 1, 1, 1, 1, 1
  386. };
  387. /* 3-band FIFO queue: old style, but should be a bit faster than
  388. generic prio+fifo combination.
  389. */
  390. #define PFIFO_FAST_BANDS 3
  391. /*
  392. * Private data for a pfifo_fast scheduler containing:
  393. * - queues for the three band
  394. * - bitmap indicating which of the bands contain skbs
  395. */
  396. struct pfifo_fast_priv {
  397. u32 bitmap;
  398. struct sk_buff_head q[PFIFO_FAST_BANDS];
  399. };
  400. /*
  401. * Convert a bitmap to the first band number where an skb is queued, where:
  402. * bitmap=0 means there are no skbs on any band.
  403. * bitmap=1 means there is an skb on band 0.
  404. * bitmap=7 means there are skbs on all 3 bands, etc.
  405. */
  406. static const int bitmap2band[] = {-1, 0, 1, 0, 2, 0, 1, 0};
  407. static inline struct sk_buff_head *band2list(struct pfifo_fast_priv *priv,
  408. int band)
  409. {
  410. return priv->q + band;
  411. }
  412. static int pfifo_fast_enqueue(struct sk_buff *skb, struct Qdisc *qdisc)
  413. {
  414. if (skb_queue_len(&qdisc->q) < qdisc_dev(qdisc)->tx_queue_len) {
  415. int band = prio2band[skb->priority & TC_PRIO_MAX];
  416. struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
  417. struct sk_buff_head *list = band2list(priv, band);
  418. priv->bitmap |= (1 << band);
  419. qdisc->q.qlen++;
  420. return __qdisc_enqueue_tail(skb, qdisc, list);
  421. }
  422. return qdisc_drop(skb, qdisc);
  423. }
  424. static struct sk_buff *pfifo_fast_dequeue(struct Qdisc *qdisc)
  425. {
  426. struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
  427. int band = bitmap2band[priv->bitmap];
  428. if (likely(band >= 0)) {
  429. struct sk_buff_head *list = band2list(priv, band);
  430. struct sk_buff *skb = __qdisc_dequeue_head(qdisc, list);
  431. qdisc->q.qlen--;
  432. if (skb_queue_empty(list))
  433. priv->bitmap &= ~(1 << band);
  434. return skb;
  435. }
  436. return NULL;
  437. }
  438. static struct sk_buff *pfifo_fast_peek(struct Qdisc *qdisc)
  439. {
  440. struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
  441. int band = bitmap2band[priv->bitmap];
  442. if (band >= 0) {
  443. struct sk_buff_head *list = band2list(priv, band);
  444. return skb_peek(list);
  445. }
  446. return NULL;
  447. }
  448. static void pfifo_fast_reset(struct Qdisc *qdisc)
  449. {
  450. int prio;
  451. struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
  452. for (prio = 0; prio < PFIFO_FAST_BANDS; prio++)
  453. __qdisc_reset_queue(qdisc, band2list(priv, prio));
  454. priv->bitmap = 0;
  455. qdisc->qstats.backlog = 0;
  456. qdisc->q.qlen = 0;
  457. }
  458. static int pfifo_fast_dump(struct Qdisc *qdisc, struct sk_buff *skb)
  459. {
  460. struct tc_prio_qopt opt = { .bands = PFIFO_FAST_BANDS };
  461. memcpy(&opt.priomap, prio2band, TC_PRIO_MAX + 1);
  462. if (nla_put(skb, TCA_OPTIONS, sizeof(opt), &opt))
  463. goto nla_put_failure;
  464. return skb->len;
  465. nla_put_failure:
  466. return -1;
  467. }
  468. static int pfifo_fast_init(struct Qdisc *qdisc, struct nlattr *opt)
  469. {
  470. int prio;
  471. struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
  472. for (prio = 0; prio < PFIFO_FAST_BANDS; prio++)
  473. __skb_queue_head_init(band2list(priv, prio));
  474. /* Can by-pass the queue discipline */
  475. qdisc->flags |= TCQ_F_CAN_BYPASS;
  476. return 0;
  477. }
  478. struct Qdisc_ops pfifo_fast_ops __read_mostly = {
  479. .id = "pfifo_fast",
  480. .priv_size = sizeof(struct pfifo_fast_priv),
  481. .enqueue = pfifo_fast_enqueue,
  482. .dequeue = pfifo_fast_dequeue,
  483. .peek = pfifo_fast_peek,
  484. .init = pfifo_fast_init,
  485. .reset = pfifo_fast_reset,
  486. .dump = pfifo_fast_dump,
  487. .owner = THIS_MODULE,
  488. };
  489. static struct lock_class_key qdisc_tx_busylock;
  490. struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
  491. const struct Qdisc_ops *ops)
  492. {
  493. void *p;
  494. struct Qdisc *sch;
  495. unsigned int size = QDISC_ALIGN(sizeof(*sch)) + ops->priv_size;
  496. int err = -ENOBUFS;
  497. struct net_device *dev = dev_queue->dev;
  498. p = kzalloc_node(size, GFP_KERNEL,
  499. netdev_queue_numa_node_read(dev_queue));
  500. if (!p)
  501. goto errout;
  502. sch = (struct Qdisc *) QDISC_ALIGN((unsigned long) p);
  503. /* if we got non aligned memory, ask more and do alignment ourself */
  504. if (sch != p) {
  505. kfree(p);
  506. p = kzalloc_node(size + QDISC_ALIGNTO - 1, GFP_KERNEL,
  507. netdev_queue_numa_node_read(dev_queue));
  508. if (!p)
  509. goto errout;
  510. sch = (struct Qdisc *) QDISC_ALIGN((unsigned long) p);
  511. sch->padded = (char *) sch - (char *) p;
  512. }
  513. INIT_LIST_HEAD(&sch->list);
  514. skb_queue_head_init(&sch->q);
  515. spin_lock_init(&sch->busylock);
  516. lockdep_set_class(&sch->busylock,
  517. dev->qdisc_tx_busylock ?: &qdisc_tx_busylock);
  518. sch->ops = ops;
  519. sch->enqueue = ops->enqueue;
  520. sch->dequeue = ops->dequeue;
  521. sch->dev_queue = dev_queue;
  522. dev_hold(dev);
  523. atomic_set(&sch->refcnt, 1);
  524. return sch;
  525. errout:
  526. return ERR_PTR(err);
  527. }
  528. struct Qdisc *qdisc_create_dflt(struct netdev_queue *dev_queue,
  529. const struct Qdisc_ops *ops,
  530. unsigned int parentid)
  531. {
  532. struct Qdisc *sch;
  533. if (!try_module_get(ops->owner))
  534. goto errout;
  535. sch = qdisc_alloc(dev_queue, ops);
  536. if (IS_ERR(sch))
  537. goto errout;
  538. sch->parent = parentid;
  539. if (!ops->init || ops->init(sch, NULL) == 0)
  540. return sch;
  541. qdisc_destroy(sch);
  542. errout:
  543. return NULL;
  544. }
  545. EXPORT_SYMBOL(qdisc_create_dflt);
  546. /* Under qdisc_lock(qdisc) and BH! */
  547. void qdisc_reset(struct Qdisc *qdisc)
  548. {
  549. const struct Qdisc_ops *ops = qdisc->ops;
  550. if (ops->reset)
  551. ops->reset(qdisc);
  552. if (qdisc->gso_skb) {
  553. kfree_skb_list(qdisc->gso_skb);
  554. qdisc->gso_skb = NULL;
  555. qdisc->q.qlen = 0;
  556. }
  557. }
  558. EXPORT_SYMBOL(qdisc_reset);
  559. static void qdisc_rcu_free(struct rcu_head *head)
  560. {
  561. struct Qdisc *qdisc = container_of(head, struct Qdisc, rcu_head);
  562. if (qdisc_is_percpu_stats(qdisc))
  563. free_percpu(qdisc->cpu_bstats);
  564. kfree((char *) qdisc - qdisc->padded);
  565. }
  566. void qdisc_destroy(struct Qdisc *qdisc)
  567. {
  568. const struct Qdisc_ops *ops = qdisc->ops;
  569. if (qdisc->flags & TCQ_F_BUILTIN ||
  570. !atomic_dec_and_test(&qdisc->refcnt))
  571. return;
  572. #ifdef CONFIG_NET_SCHED
  573. qdisc_list_del(qdisc);
  574. qdisc_put_stab(rtnl_dereference(qdisc->stab));
  575. #endif
  576. gen_kill_estimator(&qdisc->bstats, &qdisc->rate_est);
  577. if (ops->reset)
  578. ops->reset(qdisc);
  579. if (ops->destroy)
  580. ops->destroy(qdisc);
  581. module_put(ops->owner);
  582. dev_put(qdisc_dev(qdisc));
  583. kfree_skb_list(qdisc->gso_skb);
  584. /*
  585. * gen_estimator est_timer() might access qdisc->q.lock,
  586. * wait a RCU grace period before freeing qdisc.
  587. */
  588. call_rcu(&qdisc->rcu_head, qdisc_rcu_free);
  589. }
  590. EXPORT_SYMBOL(qdisc_destroy);
  591. /* Attach toplevel qdisc to device queue. */
  592. struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue,
  593. struct Qdisc *qdisc)
  594. {
  595. struct Qdisc *oqdisc = dev_queue->qdisc_sleeping;
  596. spinlock_t *root_lock;
  597. root_lock = qdisc_lock(oqdisc);
  598. spin_lock_bh(root_lock);
  599. /* Prune old scheduler */
  600. if (oqdisc && atomic_read(&oqdisc->refcnt) <= 1)
  601. qdisc_reset(oqdisc);
  602. /* ... and graft new one */
  603. if (qdisc == NULL)
  604. qdisc = &noop_qdisc;
  605. dev_queue->qdisc_sleeping = qdisc;
  606. rcu_assign_pointer(dev_queue->qdisc, &noop_qdisc);
  607. spin_unlock_bh(root_lock);
  608. return oqdisc;
  609. }
  610. EXPORT_SYMBOL(dev_graft_qdisc);
  611. static void attach_one_default_qdisc(struct net_device *dev,
  612. struct netdev_queue *dev_queue,
  613. void *_unused)
  614. {
  615. struct Qdisc *qdisc;
  616. const struct Qdisc_ops *ops = default_qdisc_ops;
  617. if (dev->priv_flags & IFF_NO_QUEUE)
  618. ops = &noqueue_qdisc_ops;
  619. qdisc = qdisc_create_dflt(dev_queue, ops, TC_H_ROOT);
  620. if (!qdisc) {
  621. netdev_info(dev, "activation failed\n");
  622. return;
  623. }
  624. if (!netif_is_multiqueue(dev))
  625. qdisc->flags |= TCQ_F_ONETXQUEUE;
  626. dev_queue->qdisc_sleeping = qdisc;
  627. }
  628. static void attach_default_qdiscs(struct net_device *dev)
  629. {
  630. struct netdev_queue *txq;
  631. struct Qdisc *qdisc;
  632. txq = netdev_get_tx_queue(dev, 0);
  633. if (!netif_is_multiqueue(dev) ||
  634. dev->priv_flags & IFF_NO_QUEUE) {
  635. netdev_for_each_tx_queue(dev, attach_one_default_qdisc, NULL);
  636. dev->qdisc = txq->qdisc_sleeping;
  637. atomic_inc(&dev->qdisc->refcnt);
  638. } else {
  639. qdisc = qdisc_create_dflt(txq, &mq_qdisc_ops, TC_H_ROOT);
  640. if (qdisc) {
  641. dev->qdisc = qdisc;
  642. qdisc->ops->attach(qdisc);
  643. }
  644. }
  645. }
  646. static void transition_one_qdisc(struct net_device *dev,
  647. struct netdev_queue *dev_queue,
  648. void *_need_watchdog)
  649. {
  650. struct Qdisc *new_qdisc = dev_queue->qdisc_sleeping;
  651. int *need_watchdog_p = _need_watchdog;
  652. if (!(new_qdisc->flags & TCQ_F_BUILTIN))
  653. clear_bit(__QDISC_STATE_DEACTIVATED, &new_qdisc->state);
  654. rcu_assign_pointer(dev_queue->qdisc, new_qdisc);
  655. if (need_watchdog_p) {
  656. dev_queue->trans_start = 0;
  657. *need_watchdog_p = 1;
  658. }
  659. }
  660. void dev_activate(struct net_device *dev)
  661. {
  662. int need_watchdog;
  663. /* No queueing discipline is attached to device;
  664. * create default one for devices, which need queueing
  665. * and noqueue_qdisc for virtual interfaces
  666. */
  667. if (dev->qdisc == &noop_qdisc)
  668. attach_default_qdiscs(dev);
  669. if (!netif_carrier_ok(dev))
  670. /* Delay activation until next carrier-on event */
  671. return;
  672. need_watchdog = 0;
  673. netdev_for_each_tx_queue(dev, transition_one_qdisc, &need_watchdog);
  674. if (dev_ingress_queue(dev))
  675. transition_one_qdisc(dev, dev_ingress_queue(dev), NULL);
  676. if (need_watchdog) {
  677. dev->trans_start = jiffies;
  678. dev_watchdog_up(dev);
  679. }
  680. }
  681. EXPORT_SYMBOL(dev_activate);
  682. static void dev_deactivate_queue(struct net_device *dev,
  683. struct netdev_queue *dev_queue,
  684. void *_qdisc_default)
  685. {
  686. struct Qdisc *qdisc_default = _qdisc_default;
  687. struct Qdisc *qdisc;
  688. qdisc = rtnl_dereference(dev_queue->qdisc);
  689. if (qdisc) {
  690. spin_lock_bh(qdisc_lock(qdisc));
  691. if (!(qdisc->flags & TCQ_F_BUILTIN))
  692. set_bit(__QDISC_STATE_DEACTIVATED, &qdisc->state);
  693. rcu_assign_pointer(dev_queue->qdisc, qdisc_default);
  694. qdisc_reset(qdisc);
  695. spin_unlock_bh(qdisc_lock(qdisc));
  696. }
  697. }
  698. static bool some_qdisc_is_busy(struct net_device *dev)
  699. {
  700. unsigned int i;
  701. for (i = 0; i < dev->num_tx_queues; i++) {
  702. struct netdev_queue *dev_queue;
  703. spinlock_t *root_lock;
  704. struct Qdisc *q;
  705. int val;
  706. dev_queue = netdev_get_tx_queue(dev, i);
  707. q = dev_queue->qdisc_sleeping;
  708. root_lock = qdisc_lock(q);
  709. spin_lock_bh(root_lock);
  710. val = (qdisc_is_running(q) ||
  711. test_bit(__QDISC_STATE_SCHED, &q->state));
  712. spin_unlock_bh(root_lock);
  713. if (val)
  714. return true;
  715. }
  716. return false;
  717. }
  718. /**
  719. * dev_deactivate_many - deactivate transmissions on several devices
  720. * @head: list of devices to deactivate
  721. *
  722. * This function returns only when all outstanding transmissions
  723. * have completed, unless all devices are in dismantle phase.
  724. */
  725. void dev_deactivate_many(struct list_head *head)
  726. {
  727. struct net_device *dev;
  728. bool sync_needed = false;
  729. list_for_each_entry(dev, head, close_list) {
  730. netdev_for_each_tx_queue(dev, dev_deactivate_queue,
  731. &noop_qdisc);
  732. if (dev_ingress_queue(dev))
  733. dev_deactivate_queue(dev, dev_ingress_queue(dev),
  734. &noop_qdisc);
  735. dev_watchdog_down(dev);
  736. sync_needed |= !dev->dismantle;
  737. }
  738. /* Wait for outstanding qdisc-less dev_queue_xmit calls.
  739. * This is avoided if all devices are in dismantle phase :
  740. * Caller will call synchronize_net() for us
  741. */
  742. if (sync_needed)
  743. synchronize_net();
  744. /* Wait for outstanding qdisc_run calls. */
  745. list_for_each_entry(dev, head, close_list)
  746. while (some_qdisc_is_busy(dev))
  747. yield();
  748. }
  749. void dev_deactivate(struct net_device *dev)
  750. {
  751. LIST_HEAD(single);
  752. list_add(&dev->close_list, &single);
  753. dev_deactivate_many(&single);
  754. list_del(&single);
  755. }
  756. EXPORT_SYMBOL(dev_deactivate);
  757. static void dev_init_scheduler_queue(struct net_device *dev,
  758. struct netdev_queue *dev_queue,
  759. void *_qdisc)
  760. {
  761. struct Qdisc *qdisc = _qdisc;
  762. rcu_assign_pointer(dev_queue->qdisc, qdisc);
  763. dev_queue->qdisc_sleeping = qdisc;
  764. }
  765. void dev_init_scheduler(struct net_device *dev)
  766. {
  767. dev->qdisc = &noop_qdisc;
  768. netdev_for_each_tx_queue(dev, dev_init_scheduler_queue, &noop_qdisc);
  769. if (dev_ingress_queue(dev))
  770. dev_init_scheduler_queue(dev, dev_ingress_queue(dev), &noop_qdisc);
  771. setup_timer(&dev->watchdog_timer, dev_watchdog, (unsigned long)dev);
  772. }
  773. static void shutdown_scheduler_queue(struct net_device *dev,
  774. struct netdev_queue *dev_queue,
  775. void *_qdisc_default)
  776. {
  777. struct Qdisc *qdisc = dev_queue->qdisc_sleeping;
  778. struct Qdisc *qdisc_default = _qdisc_default;
  779. if (qdisc) {
  780. rcu_assign_pointer(dev_queue->qdisc, qdisc_default);
  781. dev_queue->qdisc_sleeping = qdisc_default;
  782. qdisc_destroy(qdisc);
  783. }
  784. }
  785. void dev_shutdown(struct net_device *dev)
  786. {
  787. netdev_for_each_tx_queue(dev, shutdown_scheduler_queue, &noop_qdisc);
  788. if (dev_ingress_queue(dev))
  789. shutdown_scheduler_queue(dev, dev_ingress_queue(dev), &noop_qdisc);
  790. qdisc_destroy(dev->qdisc);
  791. dev->qdisc = &noop_qdisc;
  792. WARN_ON(timer_pending(&dev->watchdog_timer));
  793. }
  794. void psched_ratecfg_precompute(struct psched_ratecfg *r,
  795. const struct tc_ratespec *conf,
  796. u64 rate64)
  797. {
  798. memset(r, 0, sizeof(*r));
  799. r->overhead = conf->overhead;
  800. r->rate_bytes_ps = max_t(u64, conf->rate, rate64);
  801. r->linklayer = (conf->linklayer & TC_LINKLAYER_MASK);
  802. r->mult = 1;
  803. /*
  804. * The deal here is to replace a divide by a reciprocal one
  805. * in fast path (a reciprocal divide is a multiply and a shift)
  806. *
  807. * Normal formula would be :
  808. * time_in_ns = (NSEC_PER_SEC * len) / rate_bps
  809. *
  810. * We compute mult/shift to use instead :
  811. * time_in_ns = (len * mult) >> shift;
  812. *
  813. * We try to get the highest possible mult value for accuracy,
  814. * but have to make sure no overflows will ever happen.
  815. */
  816. if (r->rate_bytes_ps > 0) {
  817. u64 factor = NSEC_PER_SEC;
  818. for (;;) {
  819. r->mult = div64_u64(factor, r->rate_bytes_ps);
  820. if (r->mult & (1U << 31) || factor & (1ULL << 63))
  821. break;
  822. factor <<= 1;
  823. r->shift++;
  824. }
  825. }
  826. }
  827. EXPORT_SYMBOL(psched_ratecfg_precompute);