sch_generic.h 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864
  1. #ifndef __NET_SCHED_GENERIC_H
  2. #define __NET_SCHED_GENERIC_H
  3. #include <linux/netdevice.h>
  4. #include <linux/types.h>
  5. #include <linux/rcupdate.h>
  6. #include <linux/pkt_sched.h>
  7. #include <linux/pkt_cls.h>
  8. #include <linux/percpu.h>
  9. #include <linux/dynamic_queue_limits.h>
  10. #include <net/gen_stats.h>
  11. #include <net/rtnetlink.h>
  12. struct Qdisc_ops;
  13. struct qdisc_walker;
  14. struct tcf_walker;
  15. struct module;
  16. struct qdisc_rate_table {
  17. struct tc_ratespec rate;
  18. u32 data[256];
  19. struct qdisc_rate_table *next;
  20. int refcnt;
  21. };
  22. enum qdisc_state_t {
  23. __QDISC_STATE_SCHED,
  24. __QDISC_STATE_DEACTIVATED,
  25. };
  26. struct qdisc_size_table {
  27. struct rcu_head rcu;
  28. struct list_head list;
  29. struct tc_sizespec szopts;
  30. int refcnt;
  31. u16 data[];
  32. };
  33. /* similar to sk_buff_head, but skb->prev pointer is undefined. */
  34. struct qdisc_skb_head {
  35. struct sk_buff *head;
  36. struct sk_buff *tail;
  37. __u32 qlen;
  38. spinlock_t lock;
  39. };
  40. struct Qdisc {
  41. int (*enqueue)(struct sk_buff *skb,
  42. struct Qdisc *sch,
  43. struct sk_buff **to_free);
  44. struct sk_buff * (*dequeue)(struct Qdisc *sch);
  45. unsigned int flags;
  46. #define TCQ_F_BUILTIN 1
  47. #define TCQ_F_INGRESS 2
  48. #define TCQ_F_CAN_BYPASS 4
  49. #define TCQ_F_MQROOT 8
  50. #define TCQ_F_ONETXQUEUE 0x10 /* dequeue_skb() can assume all skbs are for
  51. * q->dev_queue : It can test
  52. * netif_xmit_frozen_or_stopped() before
  53. * dequeueing next packet.
  54. * Its true for MQ/MQPRIO slaves, or non
  55. * multiqueue device.
  56. */
  57. #define TCQ_F_WARN_NONWC (1 << 16)
  58. #define TCQ_F_CPUSTATS 0x20 /* run using percpu statistics */
  59. #define TCQ_F_NOPARENT 0x40 /* root of its hierarchy :
  60. * qdisc_tree_decrease_qlen() should stop.
  61. */
  62. #define TCQ_F_INVISIBLE 0x80 /* invisible by default in dump */
  63. u32 limit;
  64. const struct Qdisc_ops *ops;
  65. struct qdisc_size_table __rcu *stab;
  66. struct hlist_node hash;
  67. u32 handle;
  68. u32 parent;
  69. void *u32_node;
  70. struct netdev_queue *dev_queue;
  71. struct net_rate_estimator __rcu *rate_est;
  72. struct gnet_stats_basic_cpu __percpu *cpu_bstats;
  73. struct gnet_stats_queue __percpu *cpu_qstats;
  74. /*
  75. * For performance sake on SMP, we put highly modified fields at the end
  76. */
  77. struct sk_buff *gso_skb ____cacheline_aligned_in_smp;
  78. struct qdisc_skb_head q;
  79. struct gnet_stats_basic_packed bstats;
  80. seqcount_t running;
  81. struct gnet_stats_queue qstats;
  82. unsigned long state;
  83. struct Qdisc *next_sched;
  84. struct sk_buff *skb_bad_txq;
  85. struct rcu_head rcu_head;
  86. int padded;
  87. atomic_t refcnt;
  88. spinlock_t busylock ____cacheline_aligned_in_smp;
  89. };
  90. static inline bool qdisc_is_running(const struct Qdisc *qdisc)
  91. {
  92. return (raw_read_seqcount(&qdisc->running) & 1) ? true : false;
  93. }
  94. static inline bool qdisc_run_begin(struct Qdisc *qdisc)
  95. {
  96. if (qdisc_is_running(qdisc))
  97. return false;
  98. /* Variant of write_seqcount_begin() telling lockdep a trylock
  99. * was attempted.
  100. */
  101. raw_write_seqcount_begin(&qdisc->running);
  102. seqcount_acquire(&qdisc->running.dep_map, 0, 1, _RET_IP_);
  103. return true;
  104. }
  105. static inline void qdisc_run_end(struct Qdisc *qdisc)
  106. {
  107. write_seqcount_end(&qdisc->running);
  108. }
  109. static inline bool qdisc_may_bulk(const struct Qdisc *qdisc)
  110. {
  111. return qdisc->flags & TCQ_F_ONETXQUEUE;
  112. }
  113. static inline int qdisc_avail_bulklimit(const struct netdev_queue *txq)
  114. {
  115. #ifdef CONFIG_BQL
  116. /* Non-BQL migrated drivers will return 0, too. */
  117. return dql_avail(&txq->dql);
  118. #else
  119. return 0;
  120. #endif
  121. }
  122. struct Qdisc_class_ops {
  123. /* Child qdisc manipulation */
  124. struct netdev_queue * (*select_queue)(struct Qdisc *, struct tcmsg *);
  125. int (*graft)(struct Qdisc *, unsigned long cl,
  126. struct Qdisc *, struct Qdisc **);
  127. struct Qdisc * (*leaf)(struct Qdisc *, unsigned long cl);
  128. void (*qlen_notify)(struct Qdisc *, unsigned long);
  129. /* Class manipulation routines */
  130. unsigned long (*get)(struct Qdisc *, u32 classid);
  131. void (*put)(struct Qdisc *, unsigned long);
  132. int (*change)(struct Qdisc *, u32, u32,
  133. struct nlattr **, unsigned long *);
  134. int (*delete)(struct Qdisc *, unsigned long);
  135. void (*walk)(struct Qdisc *, struct qdisc_walker * arg);
  136. /* Filter manipulation */
  137. struct tcf_proto __rcu ** (*tcf_chain)(struct Qdisc *, unsigned long);
  138. bool (*tcf_cl_offload)(u32 classid);
  139. unsigned long (*bind_tcf)(struct Qdisc *, unsigned long,
  140. u32 classid);
  141. void (*unbind_tcf)(struct Qdisc *, unsigned long);
  142. /* rtnetlink specific */
  143. int (*dump)(struct Qdisc *, unsigned long,
  144. struct sk_buff *skb, struct tcmsg*);
  145. int (*dump_stats)(struct Qdisc *, unsigned long,
  146. struct gnet_dump *);
  147. };
  148. struct Qdisc_ops {
  149. struct Qdisc_ops *next;
  150. const struct Qdisc_class_ops *cl_ops;
  151. char id[IFNAMSIZ];
  152. int priv_size;
  153. int (*enqueue)(struct sk_buff *skb,
  154. struct Qdisc *sch,
  155. struct sk_buff **to_free);
  156. struct sk_buff * (*dequeue)(struct Qdisc *);
  157. struct sk_buff * (*peek)(struct Qdisc *);
  158. int (*init)(struct Qdisc *, struct nlattr *arg);
  159. void (*reset)(struct Qdisc *);
  160. void (*destroy)(struct Qdisc *);
  161. int (*change)(struct Qdisc *, struct nlattr *arg);
  162. void (*attach)(struct Qdisc *);
  163. int (*dump)(struct Qdisc *, struct sk_buff *);
  164. int (*dump_stats)(struct Qdisc *, struct gnet_dump *);
  165. struct module *owner;
  166. };
  167. struct tcf_result {
  168. unsigned long class;
  169. u32 classid;
  170. };
  171. struct tcf_proto_ops {
  172. struct list_head head;
  173. char kind[IFNAMSIZ];
  174. int (*classify)(struct sk_buff *,
  175. const struct tcf_proto *,
  176. struct tcf_result *);
  177. int (*init)(struct tcf_proto*);
  178. void (*destroy)(struct tcf_proto*);
  179. unsigned long (*get)(struct tcf_proto*, u32 handle);
  180. int (*change)(struct net *net, struct sk_buff *,
  181. struct tcf_proto*, unsigned long,
  182. u32 handle, struct nlattr **,
  183. unsigned long *, bool);
  184. int (*delete)(struct tcf_proto*, unsigned long, bool*);
  185. void (*walk)(struct tcf_proto*, struct tcf_walker *arg);
  186. /* rtnetlink specific */
  187. int (*dump)(struct net*, struct tcf_proto*, unsigned long,
  188. struct sk_buff *skb, struct tcmsg*);
  189. struct module *owner;
  190. };
  191. struct tcf_proto {
  192. /* Fast access part */
  193. struct tcf_proto __rcu *next;
  194. void __rcu *root;
  195. int (*classify)(struct sk_buff *,
  196. const struct tcf_proto *,
  197. struct tcf_result *);
  198. __be16 protocol;
  199. /* All the rest */
  200. u32 prio;
  201. u32 classid;
  202. struct Qdisc *q;
  203. void *data;
  204. const struct tcf_proto_ops *ops;
  205. struct rcu_head rcu;
  206. };
  207. struct qdisc_skb_cb {
  208. unsigned int pkt_len;
  209. u16 slave_dev_queue_mapping;
  210. u16 tc_classid;
  211. #define QDISC_CB_PRIV_LEN 20
  212. unsigned char data[QDISC_CB_PRIV_LEN];
  213. };
  214. static inline void qdisc_cb_private_validate(const struct sk_buff *skb, int sz)
  215. {
  216. struct qdisc_skb_cb *qcb;
  217. BUILD_BUG_ON(sizeof(skb->cb) < offsetof(struct qdisc_skb_cb, data) + sz);
  218. BUILD_BUG_ON(sizeof(qcb->data) < sz);
  219. }
  220. static inline int qdisc_qlen(const struct Qdisc *q)
  221. {
  222. return q->q.qlen;
  223. }
  224. static inline struct qdisc_skb_cb *qdisc_skb_cb(const struct sk_buff *skb)
  225. {
  226. return (struct qdisc_skb_cb *)skb->cb;
  227. }
  228. static inline spinlock_t *qdisc_lock(struct Qdisc *qdisc)
  229. {
  230. return &qdisc->q.lock;
  231. }
  232. static inline struct Qdisc *qdisc_root(const struct Qdisc *qdisc)
  233. {
  234. struct Qdisc *q = rcu_dereference_rtnl(qdisc->dev_queue->qdisc);
  235. return q;
  236. }
  237. static inline struct Qdisc *qdisc_root_sleeping(const struct Qdisc *qdisc)
  238. {
  239. return qdisc->dev_queue->qdisc_sleeping;
  240. }
  241. /* The qdisc root lock is a mechanism by which to top level
  242. * of a qdisc tree can be locked from any qdisc node in the
  243. * forest. This allows changing the configuration of some
  244. * aspect of the qdisc tree while blocking out asynchronous
  245. * qdisc access in the packet processing paths.
  246. *
  247. * It is only legal to do this when the root will not change
  248. * on us. Otherwise we'll potentially lock the wrong qdisc
  249. * root. This is enforced by holding the RTNL semaphore, which
  250. * all users of this lock accessor must do.
  251. */
  252. static inline spinlock_t *qdisc_root_lock(const struct Qdisc *qdisc)
  253. {
  254. struct Qdisc *root = qdisc_root(qdisc);
  255. ASSERT_RTNL();
  256. return qdisc_lock(root);
  257. }
  258. static inline spinlock_t *qdisc_root_sleeping_lock(const struct Qdisc *qdisc)
  259. {
  260. struct Qdisc *root = qdisc_root_sleeping(qdisc);
  261. ASSERT_RTNL();
  262. return qdisc_lock(root);
  263. }
  264. static inline seqcount_t *qdisc_root_sleeping_running(const struct Qdisc *qdisc)
  265. {
  266. struct Qdisc *root = qdisc_root_sleeping(qdisc);
  267. ASSERT_RTNL();
  268. return &root->running;
  269. }
  270. static inline struct net_device *qdisc_dev(const struct Qdisc *qdisc)
  271. {
  272. return qdisc->dev_queue->dev;
  273. }
  274. static inline void sch_tree_lock(const struct Qdisc *q)
  275. {
  276. spin_lock_bh(qdisc_root_sleeping_lock(q));
  277. }
  278. static inline void sch_tree_unlock(const struct Qdisc *q)
  279. {
  280. spin_unlock_bh(qdisc_root_sleeping_lock(q));
  281. }
  282. #define tcf_tree_lock(tp) sch_tree_lock((tp)->q)
  283. #define tcf_tree_unlock(tp) sch_tree_unlock((tp)->q)
  284. extern struct Qdisc noop_qdisc;
  285. extern struct Qdisc_ops noop_qdisc_ops;
  286. extern struct Qdisc_ops pfifo_fast_ops;
  287. extern struct Qdisc_ops mq_qdisc_ops;
  288. extern struct Qdisc_ops noqueue_qdisc_ops;
  289. extern const struct Qdisc_ops *default_qdisc_ops;
  290. static inline const struct Qdisc_ops *
  291. get_default_qdisc_ops(const struct net_device *dev, int ntx)
  292. {
  293. return ntx < dev->real_num_tx_queues ?
  294. default_qdisc_ops : &pfifo_fast_ops;
  295. }
  296. struct Qdisc_class_common {
  297. u32 classid;
  298. struct hlist_node hnode;
  299. };
  300. struct Qdisc_class_hash {
  301. struct hlist_head *hash;
  302. unsigned int hashsize;
  303. unsigned int hashmask;
  304. unsigned int hashelems;
  305. };
  306. static inline unsigned int qdisc_class_hash(u32 id, u32 mask)
  307. {
  308. id ^= id >> 8;
  309. id ^= id >> 4;
  310. return id & mask;
  311. }
  312. static inline struct Qdisc_class_common *
  313. qdisc_class_find(const struct Qdisc_class_hash *hash, u32 id)
  314. {
  315. struct Qdisc_class_common *cl;
  316. unsigned int h;
  317. h = qdisc_class_hash(id, hash->hashmask);
  318. hlist_for_each_entry(cl, &hash->hash[h], hnode) {
  319. if (cl->classid == id)
  320. return cl;
  321. }
  322. return NULL;
  323. }
  324. int qdisc_class_hash_init(struct Qdisc_class_hash *);
  325. void qdisc_class_hash_insert(struct Qdisc_class_hash *,
  326. struct Qdisc_class_common *);
  327. void qdisc_class_hash_remove(struct Qdisc_class_hash *,
  328. struct Qdisc_class_common *);
  329. void qdisc_class_hash_grow(struct Qdisc *, struct Qdisc_class_hash *);
  330. void qdisc_class_hash_destroy(struct Qdisc_class_hash *);
  331. void dev_init_scheduler(struct net_device *dev);
  332. void dev_shutdown(struct net_device *dev);
  333. void dev_activate(struct net_device *dev);
  334. void dev_deactivate(struct net_device *dev);
  335. void dev_deactivate_many(struct list_head *head);
  336. struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue,
  337. struct Qdisc *qdisc);
  338. void qdisc_reset(struct Qdisc *qdisc);
  339. void qdisc_destroy(struct Qdisc *qdisc);
  340. void qdisc_tree_reduce_backlog(struct Qdisc *qdisc, unsigned int n,
  341. unsigned int len);
  342. struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
  343. const struct Qdisc_ops *ops);
  344. struct Qdisc *qdisc_create_dflt(struct netdev_queue *dev_queue,
  345. const struct Qdisc_ops *ops, u32 parentid);
  346. void __qdisc_calculate_pkt_len(struct sk_buff *skb,
  347. const struct qdisc_size_table *stab);
  348. int skb_do_redirect(struct sk_buff *);
  349. static inline void skb_reset_tc(struct sk_buff *skb)
  350. {
  351. #ifdef CONFIG_NET_CLS_ACT
  352. skb->tc_redirected = 0;
  353. #endif
  354. }
  355. static inline bool skb_at_tc_ingress(const struct sk_buff *skb)
  356. {
  357. #ifdef CONFIG_NET_CLS_ACT
  358. return skb->tc_at_ingress;
  359. #else
  360. return false;
  361. #endif
  362. }
  363. static inline bool skb_skip_tc_classify(struct sk_buff *skb)
  364. {
  365. #ifdef CONFIG_NET_CLS_ACT
  366. if (skb->tc_skip_classify) {
  367. skb->tc_skip_classify = 0;
  368. return true;
  369. }
  370. #endif
  371. return false;
  372. }
  373. /* Reset all TX qdiscs greater then index of a device. */
  374. static inline void qdisc_reset_all_tx_gt(struct net_device *dev, unsigned int i)
  375. {
  376. struct Qdisc *qdisc;
  377. for (; i < dev->num_tx_queues; i++) {
  378. qdisc = rtnl_dereference(netdev_get_tx_queue(dev, i)->qdisc);
  379. if (qdisc) {
  380. spin_lock_bh(qdisc_lock(qdisc));
  381. qdisc_reset(qdisc);
  382. spin_unlock_bh(qdisc_lock(qdisc));
  383. }
  384. }
  385. }
  386. static inline void qdisc_reset_all_tx(struct net_device *dev)
  387. {
  388. qdisc_reset_all_tx_gt(dev, 0);
  389. }
  390. /* Are all TX queues of the device empty? */
  391. static inline bool qdisc_all_tx_empty(const struct net_device *dev)
  392. {
  393. unsigned int i;
  394. rcu_read_lock();
  395. for (i = 0; i < dev->num_tx_queues; i++) {
  396. struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
  397. const struct Qdisc *q = rcu_dereference(txq->qdisc);
  398. if (q->q.qlen) {
  399. rcu_read_unlock();
  400. return false;
  401. }
  402. }
  403. rcu_read_unlock();
  404. return true;
  405. }
  406. /* Are any of the TX qdiscs changing? */
  407. static inline bool qdisc_tx_changing(const struct net_device *dev)
  408. {
  409. unsigned int i;
  410. for (i = 0; i < dev->num_tx_queues; i++) {
  411. struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
  412. if (rcu_access_pointer(txq->qdisc) != txq->qdisc_sleeping)
  413. return true;
  414. }
  415. return false;
  416. }
  417. /* Is the device using the noop qdisc on all queues? */
  418. static inline bool qdisc_tx_is_noop(const struct net_device *dev)
  419. {
  420. unsigned int i;
  421. for (i = 0; i < dev->num_tx_queues; i++) {
  422. struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
  423. if (rcu_access_pointer(txq->qdisc) != &noop_qdisc)
  424. return false;
  425. }
  426. return true;
  427. }
  428. static inline unsigned int qdisc_pkt_len(const struct sk_buff *skb)
  429. {
  430. return qdisc_skb_cb(skb)->pkt_len;
  431. }
  432. /* additional qdisc xmit flags (NET_XMIT_MASK in linux/netdevice.h) */
  433. enum net_xmit_qdisc_t {
  434. __NET_XMIT_STOLEN = 0x00010000,
  435. __NET_XMIT_BYPASS = 0x00020000,
  436. };
  437. #ifdef CONFIG_NET_CLS_ACT
  438. #define net_xmit_drop_count(e) ((e) & __NET_XMIT_STOLEN ? 0 : 1)
  439. #else
  440. #define net_xmit_drop_count(e) (1)
  441. #endif
  442. static inline void qdisc_calculate_pkt_len(struct sk_buff *skb,
  443. const struct Qdisc *sch)
  444. {
  445. #ifdef CONFIG_NET_SCHED
  446. struct qdisc_size_table *stab = rcu_dereference_bh(sch->stab);
  447. if (stab)
  448. __qdisc_calculate_pkt_len(skb, stab);
  449. #endif
  450. }
  451. static inline int qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch,
  452. struct sk_buff **to_free)
  453. {
  454. qdisc_calculate_pkt_len(skb, sch);
  455. return sch->enqueue(skb, sch, to_free);
  456. }
  457. static inline bool qdisc_is_percpu_stats(const struct Qdisc *q)
  458. {
  459. return q->flags & TCQ_F_CPUSTATS;
  460. }
  461. static inline void _bstats_update(struct gnet_stats_basic_packed *bstats,
  462. __u64 bytes, __u32 packets)
  463. {
  464. bstats->bytes += bytes;
  465. bstats->packets += packets;
  466. }
  467. static inline void bstats_update(struct gnet_stats_basic_packed *bstats,
  468. const struct sk_buff *skb)
  469. {
  470. _bstats_update(bstats,
  471. qdisc_pkt_len(skb),
  472. skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1);
  473. }
  474. static inline void _bstats_cpu_update(struct gnet_stats_basic_cpu *bstats,
  475. __u64 bytes, __u32 packets)
  476. {
  477. u64_stats_update_begin(&bstats->syncp);
  478. _bstats_update(&bstats->bstats, bytes, packets);
  479. u64_stats_update_end(&bstats->syncp);
  480. }
  481. static inline void bstats_cpu_update(struct gnet_stats_basic_cpu *bstats,
  482. const struct sk_buff *skb)
  483. {
  484. u64_stats_update_begin(&bstats->syncp);
  485. bstats_update(&bstats->bstats, skb);
  486. u64_stats_update_end(&bstats->syncp);
  487. }
  488. static inline void qdisc_bstats_cpu_update(struct Qdisc *sch,
  489. const struct sk_buff *skb)
  490. {
  491. bstats_cpu_update(this_cpu_ptr(sch->cpu_bstats), skb);
  492. }
  493. static inline void qdisc_bstats_update(struct Qdisc *sch,
  494. const struct sk_buff *skb)
  495. {
  496. bstats_update(&sch->bstats, skb);
  497. }
  498. static inline void qdisc_qstats_backlog_dec(struct Qdisc *sch,
  499. const struct sk_buff *skb)
  500. {
  501. sch->qstats.backlog -= qdisc_pkt_len(skb);
  502. }
  503. static inline void qdisc_qstats_backlog_inc(struct Qdisc *sch,
  504. const struct sk_buff *skb)
  505. {
  506. sch->qstats.backlog += qdisc_pkt_len(skb);
  507. }
  508. static inline void __qdisc_qstats_drop(struct Qdisc *sch, int count)
  509. {
  510. sch->qstats.drops += count;
  511. }
  512. static inline void qstats_drop_inc(struct gnet_stats_queue *qstats)
  513. {
  514. qstats->drops++;
  515. }
  516. static inline void qstats_overlimit_inc(struct gnet_stats_queue *qstats)
  517. {
  518. qstats->overlimits++;
  519. }
  520. static inline void qdisc_qstats_drop(struct Qdisc *sch)
  521. {
  522. qstats_drop_inc(&sch->qstats);
  523. }
  524. static inline void qdisc_qstats_cpu_drop(struct Qdisc *sch)
  525. {
  526. this_cpu_inc(sch->cpu_qstats->drops);
  527. }
  528. static inline void qdisc_qstats_overlimit(struct Qdisc *sch)
  529. {
  530. sch->qstats.overlimits++;
  531. }
  532. static inline void qdisc_skb_head_init(struct qdisc_skb_head *qh)
  533. {
  534. qh->head = NULL;
  535. qh->tail = NULL;
  536. qh->qlen = 0;
  537. }
  538. static inline int __qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch,
  539. struct qdisc_skb_head *qh)
  540. {
  541. struct sk_buff *last = qh->tail;
  542. if (last) {
  543. skb->next = NULL;
  544. last->next = skb;
  545. qh->tail = skb;
  546. } else {
  547. qh->tail = skb;
  548. qh->head = skb;
  549. }
  550. qh->qlen++;
  551. qdisc_qstats_backlog_inc(sch, skb);
  552. return NET_XMIT_SUCCESS;
  553. }
  554. static inline int qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch)
  555. {
  556. return __qdisc_enqueue_tail(skb, sch, &sch->q);
  557. }
  558. static inline struct sk_buff *__qdisc_dequeue_head(struct qdisc_skb_head *qh)
  559. {
  560. struct sk_buff *skb = qh->head;
  561. if (likely(skb != NULL)) {
  562. qh->head = skb->next;
  563. qh->qlen--;
  564. if (qh->head == NULL)
  565. qh->tail = NULL;
  566. skb->next = NULL;
  567. }
  568. return skb;
  569. }
  570. static inline struct sk_buff *qdisc_dequeue_head(struct Qdisc *sch)
  571. {
  572. struct sk_buff *skb = __qdisc_dequeue_head(&sch->q);
  573. if (likely(skb != NULL)) {
  574. qdisc_qstats_backlog_dec(sch, skb);
  575. qdisc_bstats_update(sch, skb);
  576. }
  577. return skb;
  578. }
  579. /* Instead of calling kfree_skb() while root qdisc lock is held,
  580. * queue the skb for future freeing at end of __dev_xmit_skb()
  581. */
  582. static inline void __qdisc_drop(struct sk_buff *skb, struct sk_buff **to_free)
  583. {
  584. skb->next = *to_free;
  585. *to_free = skb;
  586. }
  587. static inline unsigned int __qdisc_queue_drop_head(struct Qdisc *sch,
  588. struct qdisc_skb_head *qh,
  589. struct sk_buff **to_free)
  590. {
  591. struct sk_buff *skb = __qdisc_dequeue_head(qh);
  592. if (likely(skb != NULL)) {
  593. unsigned int len = qdisc_pkt_len(skb);
  594. qdisc_qstats_backlog_dec(sch, skb);
  595. __qdisc_drop(skb, to_free);
  596. return len;
  597. }
  598. return 0;
  599. }
  600. static inline unsigned int qdisc_queue_drop_head(struct Qdisc *sch,
  601. struct sk_buff **to_free)
  602. {
  603. return __qdisc_queue_drop_head(sch, &sch->q, to_free);
  604. }
  605. static inline struct sk_buff *qdisc_peek_head(struct Qdisc *sch)
  606. {
  607. const struct qdisc_skb_head *qh = &sch->q;
  608. return qh->head;
  609. }
  610. /* generic pseudo peek method for non-work-conserving qdisc */
  611. static inline struct sk_buff *qdisc_peek_dequeued(struct Qdisc *sch)
  612. {
  613. /* we can reuse ->gso_skb because peek isn't called for root qdiscs */
  614. if (!sch->gso_skb) {
  615. sch->gso_skb = sch->dequeue(sch);
  616. if (sch->gso_skb) {
  617. /* it's still part of the queue */
  618. qdisc_qstats_backlog_inc(sch, sch->gso_skb);
  619. sch->q.qlen++;
  620. }
  621. }
  622. return sch->gso_skb;
  623. }
  624. /* use instead of qdisc->dequeue() for all qdiscs queried with ->peek() */
  625. static inline struct sk_buff *qdisc_dequeue_peeked(struct Qdisc *sch)
  626. {
  627. struct sk_buff *skb = sch->gso_skb;
  628. if (skb) {
  629. sch->gso_skb = NULL;
  630. qdisc_qstats_backlog_dec(sch, skb);
  631. sch->q.qlen--;
  632. } else {
  633. skb = sch->dequeue(sch);
  634. }
  635. return skb;
  636. }
  637. static inline void __qdisc_reset_queue(struct qdisc_skb_head *qh)
  638. {
  639. /*
  640. * We do not know the backlog in bytes of this list, it
  641. * is up to the caller to correct it
  642. */
  643. ASSERT_RTNL();
  644. if (qh->qlen) {
  645. rtnl_kfree_skbs(qh->head, qh->tail);
  646. qh->head = NULL;
  647. qh->tail = NULL;
  648. qh->qlen = 0;
  649. }
  650. }
  651. static inline void qdisc_reset_queue(struct Qdisc *sch)
  652. {
  653. __qdisc_reset_queue(&sch->q);
  654. sch->qstats.backlog = 0;
  655. }
  656. static inline struct Qdisc *qdisc_replace(struct Qdisc *sch, struct Qdisc *new,
  657. struct Qdisc **pold)
  658. {
  659. struct Qdisc *old;
  660. sch_tree_lock(sch);
  661. old = *pold;
  662. *pold = new;
  663. if (old != NULL) {
  664. qdisc_tree_reduce_backlog(old, old->q.qlen, old->qstats.backlog);
  665. qdisc_reset(old);
  666. }
  667. sch_tree_unlock(sch);
  668. return old;
  669. }
  670. static inline void rtnl_qdisc_drop(struct sk_buff *skb, struct Qdisc *sch)
  671. {
  672. rtnl_kfree_skbs(skb, skb);
  673. qdisc_qstats_drop(sch);
  674. }
  675. static inline int qdisc_drop(struct sk_buff *skb, struct Qdisc *sch,
  676. struct sk_buff **to_free)
  677. {
  678. __qdisc_drop(skb, to_free);
  679. qdisc_qstats_drop(sch);
  680. return NET_XMIT_DROP;
  681. }
  682. /* Length to Time (L2T) lookup in a qdisc_rate_table, to determine how
  683. long it will take to send a packet given its size.
  684. */
  685. static inline u32 qdisc_l2t(struct qdisc_rate_table* rtab, unsigned int pktlen)
  686. {
  687. int slot = pktlen + rtab->rate.cell_align + rtab->rate.overhead;
  688. if (slot < 0)
  689. slot = 0;
  690. slot >>= rtab->rate.cell_log;
  691. if (slot > 255)
  692. return rtab->data[255]*(slot >> 8) + rtab->data[slot & 0xFF];
  693. return rtab->data[slot];
  694. }
  695. struct psched_ratecfg {
  696. u64 rate_bytes_ps; /* bytes per second */
  697. u32 mult;
  698. u16 overhead;
  699. u8 linklayer;
  700. u8 shift;
  701. };
  702. static inline u64 psched_l2t_ns(const struct psched_ratecfg *r,
  703. unsigned int len)
  704. {
  705. len += r->overhead;
  706. if (unlikely(r->linklayer == TC_LINKLAYER_ATM))
  707. return ((u64)(DIV_ROUND_UP(len,48)*53) * r->mult) >> r->shift;
  708. return ((u64)len * r->mult) >> r->shift;
  709. }
  710. void psched_ratecfg_precompute(struct psched_ratecfg *r,
  711. const struct tc_ratespec *conf,
  712. u64 rate64);
  713. static inline void psched_ratecfg_getrate(struct tc_ratespec *res,
  714. const struct psched_ratecfg *r)
  715. {
  716. memset(res, 0, sizeof(*res));
  717. /* legacy struct tc_ratespec has a 32bit @rate field
  718. * Qdisc using 64bit rate should add new attributes
  719. * in order to maintain compatibility.
  720. */
  721. res->rate = min_t(u64, r->rate_bytes_ps, ~0U);
  722. res->overhead = r->overhead;
  723. res->linklayer = (r->linklayer & TC_LINKLAYER_MASK);
  724. }
  725. #endif