sch_generic.h 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884
  1. #ifndef __NET_SCHED_GENERIC_H
  2. #define __NET_SCHED_GENERIC_H
  3. #include <linux/netdevice.h>
  4. #include <linux/types.h>
  5. #include <linux/rcupdate.h>
  6. #include <linux/pkt_sched.h>
  7. #include <linux/pkt_cls.h>
  8. #include <linux/percpu.h>
  9. #include <linux/dynamic_queue_limits.h>
  10. #include <linux/list.h>
  11. #include <net/gen_stats.h>
  12. #include <net/rtnetlink.h>
  13. struct Qdisc_ops;
  14. struct qdisc_walker;
  15. struct tcf_walker;
  16. struct module;
  17. struct qdisc_rate_table {
  18. struct tc_ratespec rate;
  19. u32 data[256];
  20. struct qdisc_rate_table *next;
  21. int refcnt;
  22. };
  23. enum qdisc_state_t {
  24. __QDISC_STATE_SCHED,
  25. __QDISC_STATE_DEACTIVATED,
  26. };
  27. struct qdisc_size_table {
  28. struct rcu_head rcu;
  29. struct list_head list;
  30. struct tc_sizespec szopts;
  31. int refcnt;
  32. u16 data[];
  33. };
  34. /* similar to sk_buff_head, but skb->prev pointer is undefined. */
  35. struct qdisc_skb_head {
  36. struct sk_buff *head;
  37. struct sk_buff *tail;
  38. __u32 qlen;
  39. spinlock_t lock;
  40. };
  41. struct Qdisc {
  42. int (*enqueue)(struct sk_buff *skb,
  43. struct Qdisc *sch,
  44. struct sk_buff **to_free);
  45. struct sk_buff * (*dequeue)(struct Qdisc *sch);
  46. unsigned int flags;
  47. #define TCQ_F_BUILTIN 1
  48. #define TCQ_F_INGRESS 2
  49. #define TCQ_F_CAN_BYPASS 4
  50. #define TCQ_F_MQROOT 8
  51. #define TCQ_F_ONETXQUEUE 0x10 /* dequeue_skb() can assume all skbs are for
  52. * q->dev_queue : It can test
  53. * netif_xmit_frozen_or_stopped() before
  54. * dequeueing next packet.
  55. * Its true for MQ/MQPRIO slaves, or non
  56. * multiqueue device.
  57. */
  58. #define TCQ_F_WARN_NONWC (1 << 16)
  59. #define TCQ_F_CPUSTATS 0x20 /* run using percpu statistics */
  60. #define TCQ_F_NOPARENT 0x40 /* root of its hierarchy :
  61. * qdisc_tree_decrease_qlen() should stop.
  62. */
  63. #define TCQ_F_INVISIBLE 0x80 /* invisible by default in dump */
  64. u32 limit;
  65. const struct Qdisc_ops *ops;
  66. struct qdisc_size_table __rcu *stab;
  67. struct hlist_node hash;
  68. u32 handle;
  69. u32 parent;
  70. void *u32_node;
  71. struct netdev_queue *dev_queue;
  72. struct net_rate_estimator __rcu *rate_est;
  73. struct gnet_stats_basic_cpu __percpu *cpu_bstats;
  74. struct gnet_stats_queue __percpu *cpu_qstats;
  75. /*
  76. * For performance sake on SMP, we put highly modified fields at the end
  77. */
  78. struct sk_buff *gso_skb ____cacheline_aligned_in_smp;
  79. struct qdisc_skb_head q;
  80. struct gnet_stats_basic_packed bstats;
  81. seqcount_t running;
  82. struct gnet_stats_queue qstats;
  83. unsigned long state;
  84. struct Qdisc *next_sched;
  85. struct sk_buff *skb_bad_txq;
  86. struct rcu_head rcu_head;
  87. int padded;
  88. atomic_t refcnt;
  89. spinlock_t busylock ____cacheline_aligned_in_smp;
  90. };
  91. static inline bool qdisc_is_running(const struct Qdisc *qdisc)
  92. {
  93. return (raw_read_seqcount(&qdisc->running) & 1) ? true : false;
  94. }
  95. static inline bool qdisc_run_begin(struct Qdisc *qdisc)
  96. {
  97. if (qdisc_is_running(qdisc))
  98. return false;
  99. /* Variant of write_seqcount_begin() telling lockdep a trylock
  100. * was attempted.
  101. */
  102. raw_write_seqcount_begin(&qdisc->running);
  103. seqcount_acquire(&qdisc->running.dep_map, 0, 1, _RET_IP_);
  104. return true;
  105. }
  106. static inline void qdisc_run_end(struct Qdisc *qdisc)
  107. {
  108. write_seqcount_end(&qdisc->running);
  109. }
  110. static inline bool qdisc_may_bulk(const struct Qdisc *qdisc)
  111. {
  112. return qdisc->flags & TCQ_F_ONETXQUEUE;
  113. }
  114. static inline int qdisc_avail_bulklimit(const struct netdev_queue *txq)
  115. {
  116. #ifdef CONFIG_BQL
  117. /* Non-BQL migrated drivers will return 0, too. */
  118. return dql_avail(&txq->dql);
  119. #else
  120. return 0;
  121. #endif
  122. }
  123. struct Qdisc_class_ops {
  124. /* Child qdisc manipulation */
  125. struct netdev_queue * (*select_queue)(struct Qdisc *, struct tcmsg *);
  126. int (*graft)(struct Qdisc *, unsigned long cl,
  127. struct Qdisc *, struct Qdisc **);
  128. struct Qdisc * (*leaf)(struct Qdisc *, unsigned long cl);
  129. void (*qlen_notify)(struct Qdisc *, unsigned long);
  130. /* Class manipulation routines */
  131. unsigned long (*get)(struct Qdisc *, u32 classid);
  132. void (*put)(struct Qdisc *, unsigned long);
  133. int (*change)(struct Qdisc *, u32, u32,
  134. struct nlattr **, unsigned long *);
  135. int (*delete)(struct Qdisc *, unsigned long);
  136. void (*walk)(struct Qdisc *, struct qdisc_walker * arg);
  137. /* Filter manipulation */
  138. struct tcf_block * (*tcf_block)(struct Qdisc *, unsigned long);
  139. bool (*tcf_cl_offload)(u32 classid);
  140. unsigned long (*bind_tcf)(struct Qdisc *, unsigned long,
  141. u32 classid);
  142. void (*unbind_tcf)(struct Qdisc *, unsigned long);
  143. /* rtnetlink specific */
  144. int (*dump)(struct Qdisc *, unsigned long,
  145. struct sk_buff *skb, struct tcmsg*);
  146. int (*dump_stats)(struct Qdisc *, unsigned long,
  147. struct gnet_dump *);
  148. };
  149. struct Qdisc_ops {
  150. struct Qdisc_ops *next;
  151. const struct Qdisc_class_ops *cl_ops;
  152. char id[IFNAMSIZ];
  153. int priv_size;
  154. int (*enqueue)(struct sk_buff *skb,
  155. struct Qdisc *sch,
  156. struct sk_buff **to_free);
  157. struct sk_buff * (*dequeue)(struct Qdisc *);
  158. struct sk_buff * (*peek)(struct Qdisc *);
  159. int (*init)(struct Qdisc *, struct nlattr *arg);
  160. void (*reset)(struct Qdisc *);
  161. void (*destroy)(struct Qdisc *);
  162. int (*change)(struct Qdisc *, struct nlattr *arg);
  163. void (*attach)(struct Qdisc *);
  164. int (*dump)(struct Qdisc *, struct sk_buff *);
  165. int (*dump_stats)(struct Qdisc *, struct gnet_dump *);
  166. struct module *owner;
  167. };
  168. struct tcf_result {
  169. union {
  170. struct {
  171. unsigned long class;
  172. u32 classid;
  173. };
  174. const struct tcf_proto *goto_tp;
  175. };
  176. };
  177. struct tcf_proto_ops {
  178. struct list_head head;
  179. char kind[IFNAMSIZ];
  180. int (*classify)(struct sk_buff *,
  181. const struct tcf_proto *,
  182. struct tcf_result *);
  183. int (*init)(struct tcf_proto*);
  184. void (*destroy)(struct tcf_proto*);
  185. unsigned long (*get)(struct tcf_proto*, u32 handle);
  186. int (*change)(struct net *net, struct sk_buff *,
  187. struct tcf_proto*, unsigned long,
  188. u32 handle, struct nlattr **,
  189. unsigned long *, bool);
  190. int (*delete)(struct tcf_proto*, unsigned long, bool*);
  191. void (*walk)(struct tcf_proto*, struct tcf_walker *arg);
  192. /* rtnetlink specific */
  193. int (*dump)(struct net*, struct tcf_proto*, unsigned long,
  194. struct sk_buff *skb, struct tcmsg*);
  195. struct module *owner;
  196. };
  197. struct tcf_proto {
  198. /* Fast access part */
  199. struct tcf_proto __rcu *next;
  200. void __rcu *root;
  201. int (*classify)(struct sk_buff *,
  202. const struct tcf_proto *,
  203. struct tcf_result *);
  204. __be16 protocol;
  205. /* All the rest */
  206. u32 prio;
  207. u32 classid;
  208. struct Qdisc *q;
  209. void *data;
  210. const struct tcf_proto_ops *ops;
  211. struct tcf_chain *chain;
  212. struct rcu_head rcu;
  213. };
  214. struct qdisc_skb_cb {
  215. unsigned int pkt_len;
  216. u16 slave_dev_queue_mapping;
  217. u16 tc_classid;
  218. #define QDISC_CB_PRIV_LEN 20
  219. unsigned char data[QDISC_CB_PRIV_LEN];
  220. };
  221. struct tcf_chain {
  222. struct tcf_proto __rcu *filter_chain;
  223. struct tcf_proto __rcu **p_filter_chain;
  224. struct list_head list;
  225. struct tcf_block *block;
  226. u32 index; /* chain index */
  227. unsigned int refcnt;
  228. };
  229. struct tcf_block {
  230. struct list_head chain_list;
  231. };
  232. static inline void qdisc_cb_private_validate(const struct sk_buff *skb, int sz)
  233. {
  234. struct qdisc_skb_cb *qcb;
  235. BUILD_BUG_ON(sizeof(skb->cb) < offsetof(struct qdisc_skb_cb, data) + sz);
  236. BUILD_BUG_ON(sizeof(qcb->data) < sz);
  237. }
  238. static inline int qdisc_qlen(const struct Qdisc *q)
  239. {
  240. return q->q.qlen;
  241. }
  242. static inline struct qdisc_skb_cb *qdisc_skb_cb(const struct sk_buff *skb)
  243. {
  244. return (struct qdisc_skb_cb *)skb->cb;
  245. }
  246. static inline spinlock_t *qdisc_lock(struct Qdisc *qdisc)
  247. {
  248. return &qdisc->q.lock;
  249. }
  250. static inline struct Qdisc *qdisc_root(const struct Qdisc *qdisc)
  251. {
  252. struct Qdisc *q = rcu_dereference_rtnl(qdisc->dev_queue->qdisc);
  253. return q;
  254. }
  255. static inline struct Qdisc *qdisc_root_sleeping(const struct Qdisc *qdisc)
  256. {
  257. return qdisc->dev_queue->qdisc_sleeping;
  258. }
  259. /* The qdisc root lock is a mechanism by which to top level
  260. * of a qdisc tree can be locked from any qdisc node in the
  261. * forest. This allows changing the configuration of some
  262. * aspect of the qdisc tree while blocking out asynchronous
  263. * qdisc access in the packet processing paths.
  264. *
  265. * It is only legal to do this when the root will not change
  266. * on us. Otherwise we'll potentially lock the wrong qdisc
  267. * root. This is enforced by holding the RTNL semaphore, which
  268. * all users of this lock accessor must do.
  269. */
  270. static inline spinlock_t *qdisc_root_lock(const struct Qdisc *qdisc)
  271. {
  272. struct Qdisc *root = qdisc_root(qdisc);
  273. ASSERT_RTNL();
  274. return qdisc_lock(root);
  275. }
  276. static inline spinlock_t *qdisc_root_sleeping_lock(const struct Qdisc *qdisc)
  277. {
  278. struct Qdisc *root = qdisc_root_sleeping(qdisc);
  279. ASSERT_RTNL();
  280. return qdisc_lock(root);
  281. }
  282. static inline seqcount_t *qdisc_root_sleeping_running(const struct Qdisc *qdisc)
  283. {
  284. struct Qdisc *root = qdisc_root_sleeping(qdisc);
  285. ASSERT_RTNL();
  286. return &root->running;
  287. }
  288. static inline struct net_device *qdisc_dev(const struct Qdisc *qdisc)
  289. {
  290. return qdisc->dev_queue->dev;
  291. }
  292. static inline void sch_tree_lock(const struct Qdisc *q)
  293. {
  294. spin_lock_bh(qdisc_root_sleeping_lock(q));
  295. }
  296. static inline void sch_tree_unlock(const struct Qdisc *q)
  297. {
  298. spin_unlock_bh(qdisc_root_sleeping_lock(q));
  299. }
  300. #define tcf_tree_lock(tp) sch_tree_lock((tp)->q)
  301. #define tcf_tree_unlock(tp) sch_tree_unlock((tp)->q)
  302. extern struct Qdisc noop_qdisc;
  303. extern struct Qdisc_ops noop_qdisc_ops;
  304. extern struct Qdisc_ops pfifo_fast_ops;
  305. extern struct Qdisc_ops mq_qdisc_ops;
  306. extern struct Qdisc_ops noqueue_qdisc_ops;
  307. extern const struct Qdisc_ops *default_qdisc_ops;
  308. static inline const struct Qdisc_ops *
  309. get_default_qdisc_ops(const struct net_device *dev, int ntx)
  310. {
  311. return ntx < dev->real_num_tx_queues ?
  312. default_qdisc_ops : &pfifo_fast_ops;
  313. }
  314. struct Qdisc_class_common {
  315. u32 classid;
  316. struct hlist_node hnode;
  317. };
  318. struct Qdisc_class_hash {
  319. struct hlist_head *hash;
  320. unsigned int hashsize;
  321. unsigned int hashmask;
  322. unsigned int hashelems;
  323. };
  324. static inline unsigned int qdisc_class_hash(u32 id, u32 mask)
  325. {
  326. id ^= id >> 8;
  327. id ^= id >> 4;
  328. return id & mask;
  329. }
  330. static inline struct Qdisc_class_common *
  331. qdisc_class_find(const struct Qdisc_class_hash *hash, u32 id)
  332. {
  333. struct Qdisc_class_common *cl;
  334. unsigned int h;
  335. h = qdisc_class_hash(id, hash->hashmask);
  336. hlist_for_each_entry(cl, &hash->hash[h], hnode) {
  337. if (cl->classid == id)
  338. return cl;
  339. }
  340. return NULL;
  341. }
  342. int qdisc_class_hash_init(struct Qdisc_class_hash *);
  343. void qdisc_class_hash_insert(struct Qdisc_class_hash *,
  344. struct Qdisc_class_common *);
  345. void qdisc_class_hash_remove(struct Qdisc_class_hash *,
  346. struct Qdisc_class_common *);
  347. void qdisc_class_hash_grow(struct Qdisc *, struct Qdisc_class_hash *);
  348. void qdisc_class_hash_destroy(struct Qdisc_class_hash *);
  349. void dev_init_scheduler(struct net_device *dev);
  350. void dev_shutdown(struct net_device *dev);
  351. void dev_activate(struct net_device *dev);
  352. void dev_deactivate(struct net_device *dev);
  353. void dev_deactivate_many(struct list_head *head);
  354. struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue,
  355. struct Qdisc *qdisc);
  356. void qdisc_reset(struct Qdisc *qdisc);
  357. void qdisc_destroy(struct Qdisc *qdisc);
  358. void qdisc_tree_reduce_backlog(struct Qdisc *qdisc, unsigned int n,
  359. unsigned int len);
  360. struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
  361. const struct Qdisc_ops *ops);
  362. struct Qdisc *qdisc_create_dflt(struct netdev_queue *dev_queue,
  363. const struct Qdisc_ops *ops, u32 parentid);
  364. void __qdisc_calculate_pkt_len(struct sk_buff *skb,
  365. const struct qdisc_size_table *stab);
  366. int skb_do_redirect(struct sk_buff *);
  367. static inline void skb_reset_tc(struct sk_buff *skb)
  368. {
  369. #ifdef CONFIG_NET_CLS_ACT
  370. skb->tc_redirected = 0;
  371. #endif
  372. }
  373. static inline bool skb_at_tc_ingress(const struct sk_buff *skb)
  374. {
  375. #ifdef CONFIG_NET_CLS_ACT
  376. return skb->tc_at_ingress;
  377. #else
  378. return false;
  379. #endif
  380. }
  381. static inline bool skb_skip_tc_classify(struct sk_buff *skb)
  382. {
  383. #ifdef CONFIG_NET_CLS_ACT
  384. if (skb->tc_skip_classify) {
  385. skb->tc_skip_classify = 0;
  386. return true;
  387. }
  388. #endif
  389. return false;
  390. }
  391. /* Reset all TX qdiscs greater then index of a device. */
  392. static inline void qdisc_reset_all_tx_gt(struct net_device *dev, unsigned int i)
  393. {
  394. struct Qdisc *qdisc;
  395. for (; i < dev->num_tx_queues; i++) {
  396. qdisc = rtnl_dereference(netdev_get_tx_queue(dev, i)->qdisc);
  397. if (qdisc) {
  398. spin_lock_bh(qdisc_lock(qdisc));
  399. qdisc_reset(qdisc);
  400. spin_unlock_bh(qdisc_lock(qdisc));
  401. }
  402. }
  403. }
  404. static inline void qdisc_reset_all_tx(struct net_device *dev)
  405. {
  406. qdisc_reset_all_tx_gt(dev, 0);
  407. }
  408. /* Are all TX queues of the device empty? */
  409. static inline bool qdisc_all_tx_empty(const struct net_device *dev)
  410. {
  411. unsigned int i;
  412. rcu_read_lock();
  413. for (i = 0; i < dev->num_tx_queues; i++) {
  414. struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
  415. const struct Qdisc *q = rcu_dereference(txq->qdisc);
  416. if (q->q.qlen) {
  417. rcu_read_unlock();
  418. return false;
  419. }
  420. }
  421. rcu_read_unlock();
  422. return true;
  423. }
  424. /* Are any of the TX qdiscs changing? */
  425. static inline bool qdisc_tx_changing(const struct net_device *dev)
  426. {
  427. unsigned int i;
  428. for (i = 0; i < dev->num_tx_queues; i++) {
  429. struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
  430. if (rcu_access_pointer(txq->qdisc) != txq->qdisc_sleeping)
  431. return true;
  432. }
  433. return false;
  434. }
  435. /* Is the device using the noop qdisc on all queues? */
  436. static inline bool qdisc_tx_is_noop(const struct net_device *dev)
  437. {
  438. unsigned int i;
  439. for (i = 0; i < dev->num_tx_queues; i++) {
  440. struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
  441. if (rcu_access_pointer(txq->qdisc) != &noop_qdisc)
  442. return false;
  443. }
  444. return true;
  445. }
  446. static inline unsigned int qdisc_pkt_len(const struct sk_buff *skb)
  447. {
  448. return qdisc_skb_cb(skb)->pkt_len;
  449. }
  450. /* additional qdisc xmit flags (NET_XMIT_MASK in linux/netdevice.h) */
  451. enum net_xmit_qdisc_t {
  452. __NET_XMIT_STOLEN = 0x00010000,
  453. __NET_XMIT_BYPASS = 0x00020000,
  454. };
  455. #ifdef CONFIG_NET_CLS_ACT
  456. #define net_xmit_drop_count(e) ((e) & __NET_XMIT_STOLEN ? 0 : 1)
  457. #else
  458. #define net_xmit_drop_count(e) (1)
  459. #endif
  460. static inline void qdisc_calculate_pkt_len(struct sk_buff *skb,
  461. const struct Qdisc *sch)
  462. {
  463. #ifdef CONFIG_NET_SCHED
  464. struct qdisc_size_table *stab = rcu_dereference_bh(sch->stab);
  465. if (stab)
  466. __qdisc_calculate_pkt_len(skb, stab);
  467. #endif
  468. }
  469. static inline int qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch,
  470. struct sk_buff **to_free)
  471. {
  472. qdisc_calculate_pkt_len(skb, sch);
  473. return sch->enqueue(skb, sch, to_free);
  474. }
  475. static inline bool qdisc_is_percpu_stats(const struct Qdisc *q)
  476. {
  477. return q->flags & TCQ_F_CPUSTATS;
  478. }
  479. static inline void _bstats_update(struct gnet_stats_basic_packed *bstats,
  480. __u64 bytes, __u32 packets)
  481. {
  482. bstats->bytes += bytes;
  483. bstats->packets += packets;
  484. }
  485. static inline void bstats_update(struct gnet_stats_basic_packed *bstats,
  486. const struct sk_buff *skb)
  487. {
  488. _bstats_update(bstats,
  489. qdisc_pkt_len(skb),
  490. skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1);
  491. }
  492. static inline void _bstats_cpu_update(struct gnet_stats_basic_cpu *bstats,
  493. __u64 bytes, __u32 packets)
  494. {
  495. u64_stats_update_begin(&bstats->syncp);
  496. _bstats_update(&bstats->bstats, bytes, packets);
  497. u64_stats_update_end(&bstats->syncp);
  498. }
  499. static inline void bstats_cpu_update(struct gnet_stats_basic_cpu *bstats,
  500. const struct sk_buff *skb)
  501. {
  502. u64_stats_update_begin(&bstats->syncp);
  503. bstats_update(&bstats->bstats, skb);
  504. u64_stats_update_end(&bstats->syncp);
  505. }
  506. static inline void qdisc_bstats_cpu_update(struct Qdisc *sch,
  507. const struct sk_buff *skb)
  508. {
  509. bstats_cpu_update(this_cpu_ptr(sch->cpu_bstats), skb);
  510. }
  511. static inline void qdisc_bstats_update(struct Qdisc *sch,
  512. const struct sk_buff *skb)
  513. {
  514. bstats_update(&sch->bstats, skb);
  515. }
  516. static inline void qdisc_qstats_backlog_dec(struct Qdisc *sch,
  517. const struct sk_buff *skb)
  518. {
  519. sch->qstats.backlog -= qdisc_pkt_len(skb);
  520. }
  521. static inline void qdisc_qstats_backlog_inc(struct Qdisc *sch,
  522. const struct sk_buff *skb)
  523. {
  524. sch->qstats.backlog += qdisc_pkt_len(skb);
  525. }
  526. static inline void __qdisc_qstats_drop(struct Qdisc *sch, int count)
  527. {
  528. sch->qstats.drops += count;
  529. }
  530. static inline void qstats_drop_inc(struct gnet_stats_queue *qstats)
  531. {
  532. qstats->drops++;
  533. }
  534. static inline void qstats_overlimit_inc(struct gnet_stats_queue *qstats)
  535. {
  536. qstats->overlimits++;
  537. }
  538. static inline void qdisc_qstats_drop(struct Qdisc *sch)
  539. {
  540. qstats_drop_inc(&sch->qstats);
  541. }
  542. static inline void qdisc_qstats_cpu_drop(struct Qdisc *sch)
  543. {
  544. this_cpu_inc(sch->cpu_qstats->drops);
  545. }
  546. static inline void qdisc_qstats_overlimit(struct Qdisc *sch)
  547. {
  548. sch->qstats.overlimits++;
  549. }
  550. static inline void qdisc_skb_head_init(struct qdisc_skb_head *qh)
  551. {
  552. qh->head = NULL;
  553. qh->tail = NULL;
  554. qh->qlen = 0;
  555. }
  556. static inline int __qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch,
  557. struct qdisc_skb_head *qh)
  558. {
  559. struct sk_buff *last = qh->tail;
  560. if (last) {
  561. skb->next = NULL;
  562. last->next = skb;
  563. qh->tail = skb;
  564. } else {
  565. qh->tail = skb;
  566. qh->head = skb;
  567. }
  568. qh->qlen++;
  569. qdisc_qstats_backlog_inc(sch, skb);
  570. return NET_XMIT_SUCCESS;
  571. }
  572. static inline int qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch)
  573. {
  574. return __qdisc_enqueue_tail(skb, sch, &sch->q);
  575. }
  576. static inline struct sk_buff *__qdisc_dequeue_head(struct qdisc_skb_head *qh)
  577. {
  578. struct sk_buff *skb = qh->head;
  579. if (likely(skb != NULL)) {
  580. qh->head = skb->next;
  581. qh->qlen--;
  582. if (qh->head == NULL)
  583. qh->tail = NULL;
  584. skb->next = NULL;
  585. }
  586. return skb;
  587. }
  588. static inline struct sk_buff *qdisc_dequeue_head(struct Qdisc *sch)
  589. {
  590. struct sk_buff *skb = __qdisc_dequeue_head(&sch->q);
  591. if (likely(skb != NULL)) {
  592. qdisc_qstats_backlog_dec(sch, skb);
  593. qdisc_bstats_update(sch, skb);
  594. }
  595. return skb;
  596. }
  597. /* Instead of calling kfree_skb() while root qdisc lock is held,
  598. * queue the skb for future freeing at end of __dev_xmit_skb()
  599. */
  600. static inline void __qdisc_drop(struct sk_buff *skb, struct sk_buff **to_free)
  601. {
  602. skb->next = *to_free;
  603. *to_free = skb;
  604. }
  605. static inline unsigned int __qdisc_queue_drop_head(struct Qdisc *sch,
  606. struct qdisc_skb_head *qh,
  607. struct sk_buff **to_free)
  608. {
  609. struct sk_buff *skb = __qdisc_dequeue_head(qh);
  610. if (likely(skb != NULL)) {
  611. unsigned int len = qdisc_pkt_len(skb);
  612. qdisc_qstats_backlog_dec(sch, skb);
  613. __qdisc_drop(skb, to_free);
  614. return len;
  615. }
  616. return 0;
  617. }
  618. static inline unsigned int qdisc_queue_drop_head(struct Qdisc *sch,
  619. struct sk_buff **to_free)
  620. {
  621. return __qdisc_queue_drop_head(sch, &sch->q, to_free);
  622. }
  623. static inline struct sk_buff *qdisc_peek_head(struct Qdisc *sch)
  624. {
  625. const struct qdisc_skb_head *qh = &sch->q;
  626. return qh->head;
  627. }
  628. /* generic pseudo peek method for non-work-conserving qdisc */
  629. static inline struct sk_buff *qdisc_peek_dequeued(struct Qdisc *sch)
  630. {
  631. /* we can reuse ->gso_skb because peek isn't called for root qdiscs */
  632. if (!sch->gso_skb) {
  633. sch->gso_skb = sch->dequeue(sch);
  634. if (sch->gso_skb) {
  635. /* it's still part of the queue */
  636. qdisc_qstats_backlog_inc(sch, sch->gso_skb);
  637. sch->q.qlen++;
  638. }
  639. }
  640. return sch->gso_skb;
  641. }
  642. /* use instead of qdisc->dequeue() for all qdiscs queried with ->peek() */
  643. static inline struct sk_buff *qdisc_dequeue_peeked(struct Qdisc *sch)
  644. {
  645. struct sk_buff *skb = sch->gso_skb;
  646. if (skb) {
  647. sch->gso_skb = NULL;
  648. qdisc_qstats_backlog_dec(sch, skb);
  649. sch->q.qlen--;
  650. } else {
  651. skb = sch->dequeue(sch);
  652. }
  653. return skb;
  654. }
  655. static inline void __qdisc_reset_queue(struct qdisc_skb_head *qh)
  656. {
  657. /*
  658. * We do not know the backlog in bytes of this list, it
  659. * is up to the caller to correct it
  660. */
  661. ASSERT_RTNL();
  662. if (qh->qlen) {
  663. rtnl_kfree_skbs(qh->head, qh->tail);
  664. qh->head = NULL;
  665. qh->tail = NULL;
  666. qh->qlen = 0;
  667. }
  668. }
  669. static inline void qdisc_reset_queue(struct Qdisc *sch)
  670. {
  671. __qdisc_reset_queue(&sch->q);
  672. sch->qstats.backlog = 0;
  673. }
  674. static inline struct Qdisc *qdisc_replace(struct Qdisc *sch, struct Qdisc *new,
  675. struct Qdisc **pold)
  676. {
  677. struct Qdisc *old;
  678. sch_tree_lock(sch);
  679. old = *pold;
  680. *pold = new;
  681. if (old != NULL) {
  682. qdisc_tree_reduce_backlog(old, old->q.qlen, old->qstats.backlog);
  683. qdisc_reset(old);
  684. }
  685. sch_tree_unlock(sch);
  686. return old;
  687. }
  688. static inline void rtnl_qdisc_drop(struct sk_buff *skb, struct Qdisc *sch)
  689. {
  690. rtnl_kfree_skbs(skb, skb);
  691. qdisc_qstats_drop(sch);
  692. }
  693. static inline int qdisc_drop(struct sk_buff *skb, struct Qdisc *sch,
  694. struct sk_buff **to_free)
  695. {
  696. __qdisc_drop(skb, to_free);
  697. qdisc_qstats_drop(sch);
  698. return NET_XMIT_DROP;
  699. }
  700. /* Length to Time (L2T) lookup in a qdisc_rate_table, to determine how
  701. long it will take to send a packet given its size.
  702. */
  703. static inline u32 qdisc_l2t(struct qdisc_rate_table* rtab, unsigned int pktlen)
  704. {
  705. int slot = pktlen + rtab->rate.cell_align + rtab->rate.overhead;
  706. if (slot < 0)
  707. slot = 0;
  708. slot >>= rtab->rate.cell_log;
  709. if (slot > 255)
  710. return rtab->data[255]*(slot >> 8) + rtab->data[slot & 0xFF];
  711. return rtab->data[slot];
  712. }
  713. struct psched_ratecfg {
  714. u64 rate_bytes_ps; /* bytes per second */
  715. u32 mult;
  716. u16 overhead;
  717. u8 linklayer;
  718. u8 shift;
  719. };
  720. static inline u64 psched_l2t_ns(const struct psched_ratecfg *r,
  721. unsigned int len)
  722. {
  723. len += r->overhead;
  724. if (unlikely(r->linklayer == TC_LINKLAYER_ATM))
  725. return ((u64)(DIV_ROUND_UP(len,48)*53) * r->mult) >> r->shift;
  726. return ((u64)len * r->mult) >> r->shift;
  727. }
  728. void psched_ratecfg_precompute(struct psched_ratecfg *r,
  729. const struct tc_ratespec *conf,
  730. u64 rate64);
  731. static inline void psched_ratecfg_getrate(struct tc_ratespec *res,
  732. const struct psched_ratecfg *r)
  733. {
  734. memset(res, 0, sizeof(*res));
  735. /* legacy struct tc_ratespec has a 32bit @rate field
  736. * Qdisc using 64bit rate should add new attributes
  737. * in order to maintain compatibility.
  738. */
  739. res->rate = min_t(u64, r->rate_bytes_ps, ~0U);
  740. res->overhead = r->overhead;
  741. res->linklayer = (r->linklayer & TC_LINKLAYER_MASK);
  742. }
  743. #endif