sch_generic.h 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. #ifndef __NET_SCHED_GENERIC_H
  3. #define __NET_SCHED_GENERIC_H
  4. #include <linux/netdevice.h>
  5. #include <linux/types.h>
  6. #include <linux/rcupdate.h>
  7. #include <linux/pkt_sched.h>
  8. #include <linux/pkt_cls.h>
  9. #include <linux/percpu.h>
  10. #include <linux/dynamic_queue_limits.h>
  11. #include <linux/list.h>
  12. #include <linux/refcount.h>
  13. #include <linux/workqueue.h>
  14. #include <net/gen_stats.h>
  15. #include <net/rtnetlink.h>
  16. struct Qdisc_ops;
  17. struct qdisc_walker;
  18. struct tcf_walker;
  19. struct module;
  20. struct qdisc_rate_table {
  21. struct tc_ratespec rate;
  22. u32 data[256];
  23. struct qdisc_rate_table *next;
  24. int refcnt;
  25. };
  26. enum qdisc_state_t {
  27. __QDISC_STATE_SCHED,
  28. __QDISC_STATE_DEACTIVATED,
  29. };
  30. struct qdisc_size_table {
  31. struct rcu_head rcu;
  32. struct list_head list;
  33. struct tc_sizespec szopts;
  34. int refcnt;
  35. u16 data[];
  36. };
  37. /* similar to sk_buff_head, but skb->prev pointer is undefined. */
  38. struct qdisc_skb_head {
  39. struct sk_buff *head;
  40. struct sk_buff *tail;
  41. __u32 qlen;
  42. spinlock_t lock;
  43. };
  44. struct Qdisc {
  45. int (*enqueue)(struct sk_buff *skb,
  46. struct Qdisc *sch,
  47. struct sk_buff **to_free);
  48. struct sk_buff * (*dequeue)(struct Qdisc *sch);
  49. unsigned int flags;
  50. #define TCQ_F_BUILTIN 1
  51. #define TCQ_F_INGRESS 2
  52. #define TCQ_F_CAN_BYPASS 4
  53. #define TCQ_F_MQROOT 8
  54. #define TCQ_F_ONETXQUEUE 0x10 /* dequeue_skb() can assume all skbs are for
  55. * q->dev_queue : It can test
  56. * netif_xmit_frozen_or_stopped() before
  57. * dequeueing next packet.
  58. * Its true for MQ/MQPRIO slaves, or non
  59. * multiqueue device.
  60. */
  61. #define TCQ_F_WARN_NONWC (1 << 16)
  62. #define TCQ_F_CPUSTATS 0x20 /* run using percpu statistics */
  63. #define TCQ_F_NOPARENT 0x40 /* root of its hierarchy :
  64. * qdisc_tree_decrease_qlen() should stop.
  65. */
  66. #define TCQ_F_INVISIBLE 0x80 /* invisible by default in dump */
  67. u32 limit;
  68. const struct Qdisc_ops *ops;
  69. struct qdisc_size_table __rcu *stab;
  70. struct hlist_node hash;
  71. u32 handle;
  72. u32 parent;
  73. struct netdev_queue *dev_queue;
  74. struct net_rate_estimator __rcu *rate_est;
  75. struct gnet_stats_basic_cpu __percpu *cpu_bstats;
  76. struct gnet_stats_queue __percpu *cpu_qstats;
  77. /*
  78. * For performance sake on SMP, we put highly modified fields at the end
  79. */
  80. struct sk_buff *gso_skb ____cacheline_aligned_in_smp;
  81. struct qdisc_skb_head q;
  82. struct gnet_stats_basic_packed bstats;
  83. seqcount_t running;
  84. struct gnet_stats_queue qstats;
  85. unsigned long state;
  86. struct Qdisc *next_sched;
  87. struct sk_buff *skb_bad_txq;
  88. int padded;
  89. refcount_t refcnt;
  90. spinlock_t busylock ____cacheline_aligned_in_smp;
  91. };
  92. static inline void qdisc_refcount_inc(struct Qdisc *qdisc)
  93. {
  94. if (qdisc->flags & TCQ_F_BUILTIN)
  95. return;
  96. refcount_inc(&qdisc->refcnt);
  97. }
  98. static inline bool qdisc_is_running(const struct Qdisc *qdisc)
  99. {
  100. return (raw_read_seqcount(&qdisc->running) & 1) ? true : false;
  101. }
  102. static inline bool qdisc_run_begin(struct Qdisc *qdisc)
  103. {
  104. if (qdisc_is_running(qdisc))
  105. return false;
  106. /* Variant of write_seqcount_begin() telling lockdep a trylock
  107. * was attempted.
  108. */
  109. raw_write_seqcount_begin(&qdisc->running);
  110. seqcount_acquire(&qdisc->running.dep_map, 0, 1, _RET_IP_);
  111. return true;
  112. }
  113. static inline void qdisc_run_end(struct Qdisc *qdisc)
  114. {
  115. write_seqcount_end(&qdisc->running);
  116. }
  117. static inline bool qdisc_may_bulk(const struct Qdisc *qdisc)
  118. {
  119. return qdisc->flags & TCQ_F_ONETXQUEUE;
  120. }
  121. static inline int qdisc_avail_bulklimit(const struct netdev_queue *txq)
  122. {
  123. #ifdef CONFIG_BQL
  124. /* Non-BQL migrated drivers will return 0, too. */
  125. return dql_avail(&txq->dql);
  126. #else
  127. return 0;
  128. #endif
  129. }
  130. struct Qdisc_class_ops {
  131. /* Child qdisc manipulation */
  132. struct netdev_queue * (*select_queue)(struct Qdisc *, struct tcmsg *);
  133. int (*graft)(struct Qdisc *, unsigned long cl,
  134. struct Qdisc *, struct Qdisc **);
  135. struct Qdisc * (*leaf)(struct Qdisc *, unsigned long cl);
  136. void (*qlen_notify)(struct Qdisc *, unsigned long);
  137. /* Class manipulation routines */
  138. unsigned long (*find)(struct Qdisc *, u32 classid);
  139. int (*change)(struct Qdisc *, u32, u32,
  140. struct nlattr **, unsigned long *);
  141. int (*delete)(struct Qdisc *, unsigned long);
  142. void (*walk)(struct Qdisc *, struct qdisc_walker * arg);
  143. /* Filter manipulation */
  144. struct tcf_block * (*tcf_block)(struct Qdisc *, unsigned long);
  145. unsigned long (*bind_tcf)(struct Qdisc *, unsigned long,
  146. u32 classid);
  147. void (*unbind_tcf)(struct Qdisc *, unsigned long);
  148. /* rtnetlink specific */
  149. int (*dump)(struct Qdisc *, unsigned long,
  150. struct sk_buff *skb, struct tcmsg*);
  151. int (*dump_stats)(struct Qdisc *, unsigned long,
  152. struct gnet_dump *);
  153. };
  154. struct Qdisc_ops {
  155. struct Qdisc_ops *next;
  156. const struct Qdisc_class_ops *cl_ops;
  157. char id[IFNAMSIZ];
  158. int priv_size;
  159. int (*enqueue)(struct sk_buff *skb,
  160. struct Qdisc *sch,
  161. struct sk_buff **to_free);
  162. struct sk_buff * (*dequeue)(struct Qdisc *);
  163. struct sk_buff * (*peek)(struct Qdisc *);
  164. int (*init)(struct Qdisc *, struct nlattr *arg);
  165. void (*reset)(struct Qdisc *);
  166. void (*destroy)(struct Qdisc *);
  167. int (*change)(struct Qdisc *, struct nlattr *arg);
  168. void (*attach)(struct Qdisc *);
  169. int (*dump)(struct Qdisc *, struct sk_buff *);
  170. int (*dump_stats)(struct Qdisc *, struct gnet_dump *);
  171. struct module *owner;
  172. };
  173. struct tcf_result {
  174. union {
  175. struct {
  176. unsigned long class;
  177. u32 classid;
  178. };
  179. const struct tcf_proto *goto_tp;
  180. };
  181. };
  182. struct tcf_proto_ops {
  183. struct list_head head;
  184. char kind[IFNAMSIZ];
  185. int (*classify)(struct sk_buff *,
  186. const struct tcf_proto *,
  187. struct tcf_result *);
  188. int (*init)(struct tcf_proto*);
  189. void (*destroy)(struct tcf_proto*);
  190. void* (*get)(struct tcf_proto*, u32 handle);
  191. int (*change)(struct net *net, struct sk_buff *,
  192. struct tcf_proto*, unsigned long,
  193. u32 handle, struct nlattr **,
  194. void **, bool);
  195. int (*delete)(struct tcf_proto*, void *, bool*);
  196. void (*walk)(struct tcf_proto*, struct tcf_walker *arg);
  197. void (*bind_class)(void *, u32, unsigned long);
  198. /* rtnetlink specific */
  199. int (*dump)(struct net*, struct tcf_proto*, void *,
  200. struct sk_buff *skb, struct tcmsg*);
  201. struct module *owner;
  202. };
  203. struct tcf_proto {
  204. /* Fast access part */
  205. struct tcf_proto __rcu *next;
  206. void __rcu *root;
  207. int (*classify)(struct sk_buff *,
  208. const struct tcf_proto *,
  209. struct tcf_result *);
  210. __be16 protocol;
  211. /* All the rest */
  212. u32 prio;
  213. u32 classid;
  214. struct Qdisc *q;
  215. void *data;
  216. const struct tcf_proto_ops *ops;
  217. struct tcf_chain *chain;
  218. struct rcu_head rcu;
  219. };
  220. struct qdisc_skb_cb {
  221. unsigned int pkt_len;
  222. u16 slave_dev_queue_mapping;
  223. u16 tc_classid;
  224. #define QDISC_CB_PRIV_LEN 20
  225. unsigned char data[QDISC_CB_PRIV_LEN];
  226. };
  227. typedef void tcf_chain_head_change_t(struct tcf_proto *tp_head, void *priv);
  228. struct tcf_chain {
  229. struct tcf_proto __rcu *filter_chain;
  230. tcf_chain_head_change_t *chain_head_change;
  231. void *chain_head_change_priv;
  232. struct list_head list;
  233. struct tcf_block *block;
  234. u32 index; /* chain index */
  235. unsigned int refcnt;
  236. };
  237. struct tcf_block {
  238. struct list_head chain_list;
  239. struct net *net;
  240. struct Qdisc *q;
  241. struct list_head cb_list;
  242. struct work_struct work;
  243. };
  244. static inline void qdisc_cb_private_validate(const struct sk_buff *skb, int sz)
  245. {
  246. struct qdisc_skb_cb *qcb;
  247. BUILD_BUG_ON(sizeof(skb->cb) < offsetof(struct qdisc_skb_cb, data) + sz);
  248. BUILD_BUG_ON(sizeof(qcb->data) < sz);
  249. }
  250. static inline int qdisc_qlen(const struct Qdisc *q)
  251. {
  252. return q->q.qlen;
  253. }
  254. static inline struct qdisc_skb_cb *qdisc_skb_cb(const struct sk_buff *skb)
  255. {
  256. return (struct qdisc_skb_cb *)skb->cb;
  257. }
  258. static inline spinlock_t *qdisc_lock(struct Qdisc *qdisc)
  259. {
  260. return &qdisc->q.lock;
  261. }
  262. static inline struct Qdisc *qdisc_root(const struct Qdisc *qdisc)
  263. {
  264. struct Qdisc *q = rcu_dereference_rtnl(qdisc->dev_queue->qdisc);
  265. return q;
  266. }
  267. static inline struct Qdisc *qdisc_root_sleeping(const struct Qdisc *qdisc)
  268. {
  269. return qdisc->dev_queue->qdisc_sleeping;
  270. }
  271. /* The qdisc root lock is a mechanism by which to top level
  272. * of a qdisc tree can be locked from any qdisc node in the
  273. * forest. This allows changing the configuration of some
  274. * aspect of the qdisc tree while blocking out asynchronous
  275. * qdisc access in the packet processing paths.
  276. *
  277. * It is only legal to do this when the root will not change
  278. * on us. Otherwise we'll potentially lock the wrong qdisc
  279. * root. This is enforced by holding the RTNL semaphore, which
  280. * all users of this lock accessor must do.
  281. */
  282. static inline spinlock_t *qdisc_root_lock(const struct Qdisc *qdisc)
  283. {
  284. struct Qdisc *root = qdisc_root(qdisc);
  285. ASSERT_RTNL();
  286. return qdisc_lock(root);
  287. }
  288. static inline spinlock_t *qdisc_root_sleeping_lock(const struct Qdisc *qdisc)
  289. {
  290. struct Qdisc *root = qdisc_root_sleeping(qdisc);
  291. ASSERT_RTNL();
  292. return qdisc_lock(root);
  293. }
  294. static inline seqcount_t *qdisc_root_sleeping_running(const struct Qdisc *qdisc)
  295. {
  296. struct Qdisc *root = qdisc_root_sleeping(qdisc);
  297. ASSERT_RTNL();
  298. return &root->running;
  299. }
  300. static inline struct net_device *qdisc_dev(const struct Qdisc *qdisc)
  301. {
  302. return qdisc->dev_queue->dev;
  303. }
  304. static inline void sch_tree_lock(const struct Qdisc *q)
  305. {
  306. spin_lock_bh(qdisc_root_sleeping_lock(q));
  307. }
  308. static inline void sch_tree_unlock(const struct Qdisc *q)
  309. {
  310. spin_unlock_bh(qdisc_root_sleeping_lock(q));
  311. }
  312. extern struct Qdisc noop_qdisc;
  313. extern struct Qdisc_ops noop_qdisc_ops;
  314. extern struct Qdisc_ops pfifo_fast_ops;
  315. extern struct Qdisc_ops mq_qdisc_ops;
  316. extern struct Qdisc_ops noqueue_qdisc_ops;
  317. extern const struct Qdisc_ops *default_qdisc_ops;
  318. static inline const struct Qdisc_ops *
  319. get_default_qdisc_ops(const struct net_device *dev, int ntx)
  320. {
  321. return ntx < dev->real_num_tx_queues ?
  322. default_qdisc_ops : &pfifo_fast_ops;
  323. }
  324. struct Qdisc_class_common {
  325. u32 classid;
  326. struct hlist_node hnode;
  327. };
  328. struct Qdisc_class_hash {
  329. struct hlist_head *hash;
  330. unsigned int hashsize;
  331. unsigned int hashmask;
  332. unsigned int hashelems;
  333. };
  334. static inline unsigned int qdisc_class_hash(u32 id, u32 mask)
  335. {
  336. id ^= id >> 8;
  337. id ^= id >> 4;
  338. return id & mask;
  339. }
  340. static inline struct Qdisc_class_common *
  341. qdisc_class_find(const struct Qdisc_class_hash *hash, u32 id)
  342. {
  343. struct Qdisc_class_common *cl;
  344. unsigned int h;
  345. if (!id)
  346. return NULL;
  347. h = qdisc_class_hash(id, hash->hashmask);
  348. hlist_for_each_entry(cl, &hash->hash[h], hnode) {
  349. if (cl->classid == id)
  350. return cl;
  351. }
  352. return NULL;
  353. }
  354. static inline int tc_classid_to_hwtc(struct net_device *dev, u32 classid)
  355. {
  356. u32 hwtc = TC_H_MIN(classid) - TC_H_MIN_PRIORITY;
  357. return (hwtc < netdev_get_num_tc(dev)) ? hwtc : -EINVAL;
  358. }
  359. int qdisc_class_hash_init(struct Qdisc_class_hash *);
  360. void qdisc_class_hash_insert(struct Qdisc_class_hash *,
  361. struct Qdisc_class_common *);
  362. void qdisc_class_hash_remove(struct Qdisc_class_hash *,
  363. struct Qdisc_class_common *);
  364. void qdisc_class_hash_grow(struct Qdisc *, struct Qdisc_class_hash *);
  365. void qdisc_class_hash_destroy(struct Qdisc_class_hash *);
  366. void dev_init_scheduler(struct net_device *dev);
  367. void dev_shutdown(struct net_device *dev);
  368. void dev_activate(struct net_device *dev);
  369. void dev_deactivate(struct net_device *dev);
  370. void dev_deactivate_many(struct list_head *head);
  371. struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue,
  372. struct Qdisc *qdisc);
  373. void qdisc_reset(struct Qdisc *qdisc);
  374. void qdisc_destroy(struct Qdisc *qdisc);
  375. void qdisc_tree_reduce_backlog(struct Qdisc *qdisc, unsigned int n,
  376. unsigned int len);
  377. struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
  378. const struct Qdisc_ops *ops);
  379. struct Qdisc *qdisc_create_dflt(struct netdev_queue *dev_queue,
  380. const struct Qdisc_ops *ops, u32 parentid);
  381. void __qdisc_calculate_pkt_len(struct sk_buff *skb,
  382. const struct qdisc_size_table *stab);
  383. int skb_do_redirect(struct sk_buff *);
  384. static inline void skb_reset_tc(struct sk_buff *skb)
  385. {
  386. #ifdef CONFIG_NET_CLS_ACT
  387. skb->tc_redirected = 0;
  388. #endif
  389. }
  390. static inline bool skb_at_tc_ingress(const struct sk_buff *skb)
  391. {
  392. #ifdef CONFIG_NET_CLS_ACT
  393. return skb->tc_at_ingress;
  394. #else
  395. return false;
  396. #endif
  397. }
  398. static inline bool skb_skip_tc_classify(struct sk_buff *skb)
  399. {
  400. #ifdef CONFIG_NET_CLS_ACT
  401. if (skb->tc_skip_classify) {
  402. skb->tc_skip_classify = 0;
  403. return true;
  404. }
  405. #endif
  406. return false;
  407. }
  408. /* Reset all TX qdiscs greater then index of a device. */
  409. static inline void qdisc_reset_all_tx_gt(struct net_device *dev, unsigned int i)
  410. {
  411. struct Qdisc *qdisc;
  412. for (; i < dev->num_tx_queues; i++) {
  413. qdisc = rtnl_dereference(netdev_get_tx_queue(dev, i)->qdisc);
  414. if (qdisc) {
  415. spin_lock_bh(qdisc_lock(qdisc));
  416. qdisc_reset(qdisc);
  417. spin_unlock_bh(qdisc_lock(qdisc));
  418. }
  419. }
  420. }
  421. static inline void qdisc_reset_all_tx(struct net_device *dev)
  422. {
  423. qdisc_reset_all_tx_gt(dev, 0);
  424. }
  425. /* Are all TX queues of the device empty? */
  426. static inline bool qdisc_all_tx_empty(const struct net_device *dev)
  427. {
  428. unsigned int i;
  429. rcu_read_lock();
  430. for (i = 0; i < dev->num_tx_queues; i++) {
  431. struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
  432. const struct Qdisc *q = rcu_dereference(txq->qdisc);
  433. if (q->q.qlen) {
  434. rcu_read_unlock();
  435. return false;
  436. }
  437. }
  438. rcu_read_unlock();
  439. return true;
  440. }
  441. /* Are any of the TX qdiscs changing? */
  442. static inline bool qdisc_tx_changing(const struct net_device *dev)
  443. {
  444. unsigned int i;
  445. for (i = 0; i < dev->num_tx_queues; i++) {
  446. struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
  447. if (rcu_access_pointer(txq->qdisc) != txq->qdisc_sleeping)
  448. return true;
  449. }
  450. return false;
  451. }
  452. /* Is the device using the noop qdisc on all queues? */
  453. static inline bool qdisc_tx_is_noop(const struct net_device *dev)
  454. {
  455. unsigned int i;
  456. for (i = 0; i < dev->num_tx_queues; i++) {
  457. struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
  458. if (rcu_access_pointer(txq->qdisc) != &noop_qdisc)
  459. return false;
  460. }
  461. return true;
  462. }
  463. static inline unsigned int qdisc_pkt_len(const struct sk_buff *skb)
  464. {
  465. return qdisc_skb_cb(skb)->pkt_len;
  466. }
  467. /* additional qdisc xmit flags (NET_XMIT_MASK in linux/netdevice.h) */
  468. enum net_xmit_qdisc_t {
  469. __NET_XMIT_STOLEN = 0x00010000,
  470. __NET_XMIT_BYPASS = 0x00020000,
  471. };
  472. #ifdef CONFIG_NET_CLS_ACT
  473. #define net_xmit_drop_count(e) ((e) & __NET_XMIT_STOLEN ? 0 : 1)
  474. #else
  475. #define net_xmit_drop_count(e) (1)
  476. #endif
  477. static inline void qdisc_calculate_pkt_len(struct sk_buff *skb,
  478. const struct Qdisc *sch)
  479. {
  480. #ifdef CONFIG_NET_SCHED
  481. struct qdisc_size_table *stab = rcu_dereference_bh(sch->stab);
  482. if (stab)
  483. __qdisc_calculate_pkt_len(skb, stab);
  484. #endif
  485. }
  486. static inline int qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch,
  487. struct sk_buff **to_free)
  488. {
  489. qdisc_calculate_pkt_len(skb, sch);
  490. return sch->enqueue(skb, sch, to_free);
  491. }
  492. static inline bool qdisc_is_percpu_stats(const struct Qdisc *q)
  493. {
  494. return q->flags & TCQ_F_CPUSTATS;
  495. }
  496. static inline void _bstats_update(struct gnet_stats_basic_packed *bstats,
  497. __u64 bytes, __u32 packets)
  498. {
  499. bstats->bytes += bytes;
  500. bstats->packets += packets;
  501. }
  502. static inline void bstats_update(struct gnet_stats_basic_packed *bstats,
  503. const struct sk_buff *skb)
  504. {
  505. _bstats_update(bstats,
  506. qdisc_pkt_len(skb),
  507. skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1);
  508. }
  509. static inline void _bstats_cpu_update(struct gnet_stats_basic_cpu *bstats,
  510. __u64 bytes, __u32 packets)
  511. {
  512. u64_stats_update_begin(&bstats->syncp);
  513. _bstats_update(&bstats->bstats, bytes, packets);
  514. u64_stats_update_end(&bstats->syncp);
  515. }
  516. static inline void bstats_cpu_update(struct gnet_stats_basic_cpu *bstats,
  517. const struct sk_buff *skb)
  518. {
  519. u64_stats_update_begin(&bstats->syncp);
  520. bstats_update(&bstats->bstats, skb);
  521. u64_stats_update_end(&bstats->syncp);
  522. }
  523. static inline void qdisc_bstats_cpu_update(struct Qdisc *sch,
  524. const struct sk_buff *skb)
  525. {
  526. bstats_cpu_update(this_cpu_ptr(sch->cpu_bstats), skb);
  527. }
  528. static inline void qdisc_bstats_update(struct Qdisc *sch,
  529. const struct sk_buff *skb)
  530. {
  531. bstats_update(&sch->bstats, skb);
  532. }
  533. static inline void qdisc_qstats_backlog_dec(struct Qdisc *sch,
  534. const struct sk_buff *skb)
  535. {
  536. sch->qstats.backlog -= qdisc_pkt_len(skb);
  537. }
  538. static inline void qdisc_qstats_backlog_inc(struct Qdisc *sch,
  539. const struct sk_buff *skb)
  540. {
  541. sch->qstats.backlog += qdisc_pkt_len(skb);
  542. }
  543. static inline void __qdisc_qstats_drop(struct Qdisc *sch, int count)
  544. {
  545. sch->qstats.drops += count;
  546. }
  547. static inline void qstats_drop_inc(struct gnet_stats_queue *qstats)
  548. {
  549. qstats->drops++;
  550. }
  551. static inline void qstats_overlimit_inc(struct gnet_stats_queue *qstats)
  552. {
  553. qstats->overlimits++;
  554. }
  555. static inline void qdisc_qstats_drop(struct Qdisc *sch)
  556. {
  557. qstats_drop_inc(&sch->qstats);
  558. }
  559. static inline void qdisc_qstats_cpu_drop(struct Qdisc *sch)
  560. {
  561. this_cpu_inc(sch->cpu_qstats->drops);
  562. }
  563. static inline void qdisc_qstats_overlimit(struct Qdisc *sch)
  564. {
  565. sch->qstats.overlimits++;
  566. }
  567. static inline void qdisc_skb_head_init(struct qdisc_skb_head *qh)
  568. {
  569. qh->head = NULL;
  570. qh->tail = NULL;
  571. qh->qlen = 0;
  572. }
  573. static inline int __qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch,
  574. struct qdisc_skb_head *qh)
  575. {
  576. struct sk_buff *last = qh->tail;
  577. if (last) {
  578. skb->next = NULL;
  579. last->next = skb;
  580. qh->tail = skb;
  581. } else {
  582. qh->tail = skb;
  583. qh->head = skb;
  584. }
  585. qh->qlen++;
  586. qdisc_qstats_backlog_inc(sch, skb);
  587. return NET_XMIT_SUCCESS;
  588. }
  589. static inline int qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch)
  590. {
  591. return __qdisc_enqueue_tail(skb, sch, &sch->q);
  592. }
  593. static inline struct sk_buff *__qdisc_dequeue_head(struct qdisc_skb_head *qh)
  594. {
  595. struct sk_buff *skb = qh->head;
  596. if (likely(skb != NULL)) {
  597. qh->head = skb->next;
  598. qh->qlen--;
  599. if (qh->head == NULL)
  600. qh->tail = NULL;
  601. skb->next = NULL;
  602. }
  603. return skb;
  604. }
  605. static inline struct sk_buff *qdisc_dequeue_head(struct Qdisc *sch)
  606. {
  607. struct sk_buff *skb = __qdisc_dequeue_head(&sch->q);
  608. if (likely(skb != NULL)) {
  609. qdisc_qstats_backlog_dec(sch, skb);
  610. qdisc_bstats_update(sch, skb);
  611. }
  612. return skb;
  613. }
  614. /* Instead of calling kfree_skb() while root qdisc lock is held,
  615. * queue the skb for future freeing at end of __dev_xmit_skb()
  616. */
  617. static inline void __qdisc_drop(struct sk_buff *skb, struct sk_buff **to_free)
  618. {
  619. skb->next = *to_free;
  620. *to_free = skb;
  621. }
  622. static inline unsigned int __qdisc_queue_drop_head(struct Qdisc *sch,
  623. struct qdisc_skb_head *qh,
  624. struct sk_buff **to_free)
  625. {
  626. struct sk_buff *skb = __qdisc_dequeue_head(qh);
  627. if (likely(skb != NULL)) {
  628. unsigned int len = qdisc_pkt_len(skb);
  629. qdisc_qstats_backlog_dec(sch, skb);
  630. __qdisc_drop(skb, to_free);
  631. return len;
  632. }
  633. return 0;
  634. }
  635. static inline unsigned int qdisc_queue_drop_head(struct Qdisc *sch,
  636. struct sk_buff **to_free)
  637. {
  638. return __qdisc_queue_drop_head(sch, &sch->q, to_free);
  639. }
  640. static inline struct sk_buff *qdisc_peek_head(struct Qdisc *sch)
  641. {
  642. const struct qdisc_skb_head *qh = &sch->q;
  643. return qh->head;
  644. }
  645. /* generic pseudo peek method for non-work-conserving qdisc */
  646. static inline struct sk_buff *qdisc_peek_dequeued(struct Qdisc *sch)
  647. {
  648. /* we can reuse ->gso_skb because peek isn't called for root qdiscs */
  649. if (!sch->gso_skb) {
  650. sch->gso_skb = sch->dequeue(sch);
  651. if (sch->gso_skb) {
  652. /* it's still part of the queue */
  653. qdisc_qstats_backlog_inc(sch, sch->gso_skb);
  654. sch->q.qlen++;
  655. }
  656. }
  657. return sch->gso_skb;
  658. }
  659. /* use instead of qdisc->dequeue() for all qdiscs queried with ->peek() */
  660. static inline struct sk_buff *qdisc_dequeue_peeked(struct Qdisc *sch)
  661. {
  662. struct sk_buff *skb = sch->gso_skb;
  663. if (skb) {
  664. sch->gso_skb = NULL;
  665. qdisc_qstats_backlog_dec(sch, skb);
  666. sch->q.qlen--;
  667. } else {
  668. skb = sch->dequeue(sch);
  669. }
  670. return skb;
  671. }
  672. static inline void __qdisc_reset_queue(struct qdisc_skb_head *qh)
  673. {
  674. /*
  675. * We do not know the backlog in bytes of this list, it
  676. * is up to the caller to correct it
  677. */
  678. ASSERT_RTNL();
  679. if (qh->qlen) {
  680. rtnl_kfree_skbs(qh->head, qh->tail);
  681. qh->head = NULL;
  682. qh->tail = NULL;
  683. qh->qlen = 0;
  684. }
  685. }
  686. static inline void qdisc_reset_queue(struct Qdisc *sch)
  687. {
  688. __qdisc_reset_queue(&sch->q);
  689. sch->qstats.backlog = 0;
  690. }
  691. static inline struct Qdisc *qdisc_replace(struct Qdisc *sch, struct Qdisc *new,
  692. struct Qdisc **pold)
  693. {
  694. struct Qdisc *old;
  695. sch_tree_lock(sch);
  696. old = *pold;
  697. *pold = new;
  698. if (old != NULL) {
  699. unsigned int qlen = old->q.qlen;
  700. unsigned int backlog = old->qstats.backlog;
  701. qdisc_reset(old);
  702. qdisc_tree_reduce_backlog(old, qlen, backlog);
  703. }
  704. sch_tree_unlock(sch);
  705. return old;
  706. }
  707. static inline void rtnl_qdisc_drop(struct sk_buff *skb, struct Qdisc *sch)
  708. {
  709. rtnl_kfree_skbs(skb, skb);
  710. qdisc_qstats_drop(sch);
  711. }
  712. static inline int qdisc_drop(struct sk_buff *skb, struct Qdisc *sch,
  713. struct sk_buff **to_free)
  714. {
  715. __qdisc_drop(skb, to_free);
  716. qdisc_qstats_drop(sch);
  717. return NET_XMIT_DROP;
  718. }
  719. /* Length to Time (L2T) lookup in a qdisc_rate_table, to determine how
  720. long it will take to send a packet given its size.
  721. */
  722. static inline u32 qdisc_l2t(struct qdisc_rate_table* rtab, unsigned int pktlen)
  723. {
  724. int slot = pktlen + rtab->rate.cell_align + rtab->rate.overhead;
  725. if (slot < 0)
  726. slot = 0;
  727. slot >>= rtab->rate.cell_log;
  728. if (slot > 255)
  729. return rtab->data[255]*(slot >> 8) + rtab->data[slot & 0xFF];
  730. return rtab->data[slot];
  731. }
  732. struct psched_ratecfg {
  733. u64 rate_bytes_ps; /* bytes per second */
  734. u32 mult;
  735. u16 overhead;
  736. u8 linklayer;
  737. u8 shift;
  738. };
  739. static inline u64 psched_l2t_ns(const struct psched_ratecfg *r,
  740. unsigned int len)
  741. {
  742. len += r->overhead;
  743. if (unlikely(r->linklayer == TC_LINKLAYER_ATM))
  744. return ((u64)(DIV_ROUND_UP(len,48)*53) * r->mult) >> r->shift;
  745. return ((u64)len * r->mult) >> r->shift;
  746. }
  747. void psched_ratecfg_precompute(struct psched_ratecfg *r,
  748. const struct tc_ratespec *conf,
  749. u64 rate64);
  750. static inline void psched_ratecfg_getrate(struct tc_ratespec *res,
  751. const struct psched_ratecfg *r)
  752. {
  753. memset(res, 0, sizeof(*res));
  754. /* legacy struct tc_ratespec has a 32bit @rate field
  755. * Qdisc using 64bit rate should add new attributes
  756. * in order to maintain compatibility.
  757. */
  758. res->rate = min_t(u64, r->rate_bytes_ps, ~0U);
  759. res->overhead = r->overhead;
  760. res->linklayer = (r->linklayer & TC_LINKLAYER_MASK);
  761. }
  762. /* Mini Qdisc serves for specific needs of ingress/clsact Qdisc.
  763. * The fast path only needs to access filter list and to update stats
  764. */
  765. struct mini_Qdisc {
  766. struct tcf_proto *filter_list;
  767. struct gnet_stats_basic_cpu __percpu *cpu_bstats;
  768. struct gnet_stats_queue __percpu *cpu_qstats;
  769. struct rcu_head rcu;
  770. };
  771. static inline void mini_qdisc_bstats_cpu_update(struct mini_Qdisc *miniq,
  772. const struct sk_buff *skb)
  773. {
  774. bstats_cpu_update(this_cpu_ptr(miniq->cpu_bstats), skb);
  775. }
  776. static inline void mini_qdisc_qstats_cpu_drop(struct mini_Qdisc *miniq)
  777. {
  778. this_cpu_inc(miniq->cpu_qstats->drops);
  779. }
  780. struct mini_Qdisc_pair {
  781. struct mini_Qdisc miniq1;
  782. struct mini_Qdisc miniq2;
  783. struct mini_Qdisc __rcu **p_miniq;
  784. };
  785. void mini_qdisc_pair_swap(struct mini_Qdisc_pair *miniqp,
  786. struct tcf_proto *tp_head);
  787. void mini_qdisc_pair_init(struct mini_Qdisc_pair *miniqp, struct Qdisc *qdisc,
  788. struct mini_Qdisc __rcu **p_miniq);
  789. #endif