sch_generic.h 26 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. #ifndef __NET_SCHED_GENERIC_H
  3. #define __NET_SCHED_GENERIC_H
  4. #include <linux/netdevice.h>
  5. #include <linux/types.h>
  6. #include <linux/rcupdate.h>
  7. #include <linux/pkt_sched.h>
  8. #include <linux/pkt_cls.h>
  9. #include <linux/percpu.h>
  10. #include <linux/dynamic_queue_limits.h>
  11. #include <linux/list.h>
  12. #include <linux/refcount.h>
  13. #include <linux/workqueue.h>
  14. #include <net/gen_stats.h>
  15. #include <net/rtnetlink.h>
  16. struct Qdisc_ops;
  17. struct qdisc_walker;
  18. struct tcf_walker;
  19. struct module;
  20. struct qdisc_rate_table {
  21. struct tc_ratespec rate;
  22. u32 data[256];
  23. struct qdisc_rate_table *next;
  24. int refcnt;
  25. };
  26. enum qdisc_state_t {
  27. __QDISC_STATE_SCHED,
  28. __QDISC_STATE_DEACTIVATED,
  29. };
  30. struct qdisc_size_table {
  31. struct rcu_head rcu;
  32. struct list_head list;
  33. struct tc_sizespec szopts;
  34. int refcnt;
  35. u16 data[];
  36. };
  37. /* similar to sk_buff_head, but skb->prev pointer is undefined. */
  38. struct qdisc_skb_head {
  39. struct sk_buff *head;
  40. struct sk_buff *tail;
  41. __u32 qlen;
  42. spinlock_t lock;
  43. };
  44. struct Qdisc {
  45. int (*enqueue)(struct sk_buff *skb,
  46. struct Qdisc *sch,
  47. struct sk_buff **to_free);
  48. struct sk_buff * (*dequeue)(struct Qdisc *sch);
  49. unsigned int flags;
  50. #define TCQ_F_BUILTIN 1
  51. #define TCQ_F_INGRESS 2
  52. #define TCQ_F_CAN_BYPASS 4
  53. #define TCQ_F_MQROOT 8
  54. #define TCQ_F_ONETXQUEUE 0x10 /* dequeue_skb() can assume all skbs are for
  55. * q->dev_queue : It can test
  56. * netif_xmit_frozen_or_stopped() before
  57. * dequeueing next packet.
  58. * Its true for MQ/MQPRIO slaves, or non
  59. * multiqueue device.
  60. */
  61. #define TCQ_F_WARN_NONWC (1 << 16)
  62. #define TCQ_F_CPUSTATS 0x20 /* run using percpu statistics */
  63. #define TCQ_F_NOPARENT 0x40 /* root of its hierarchy :
  64. * qdisc_tree_decrease_qlen() should stop.
  65. */
  66. #define TCQ_F_INVISIBLE 0x80 /* invisible by default in dump */
  67. #define TCQ_F_NOLOCK 0x100 /* qdisc does not require locking */
  68. #define TCQ_F_OFFLOADED 0x200 /* qdisc is offloaded to HW */
  69. u32 limit;
  70. const struct Qdisc_ops *ops;
  71. struct qdisc_size_table __rcu *stab;
  72. struct hlist_node hash;
  73. u32 handle;
  74. u32 parent;
  75. struct netdev_queue *dev_queue;
  76. struct net_rate_estimator __rcu *rate_est;
  77. struct gnet_stats_basic_cpu __percpu *cpu_bstats;
  78. struct gnet_stats_queue __percpu *cpu_qstats;
  79. int padded;
  80. refcount_t refcnt;
  81. /*
  82. * For performance sake on SMP, we put highly modified fields at the end
  83. */
  84. struct sk_buff_head gso_skb ____cacheline_aligned_in_smp;
  85. struct qdisc_skb_head q;
  86. struct gnet_stats_basic_packed bstats;
  87. seqcount_t running;
  88. struct gnet_stats_queue qstats;
  89. unsigned long state;
  90. struct Qdisc *next_sched;
  91. struct sk_buff_head skb_bad_txq;
  92. spinlock_t busylock ____cacheline_aligned_in_smp;
  93. spinlock_t seqlock;
  94. };
  95. static inline void qdisc_refcount_inc(struct Qdisc *qdisc)
  96. {
  97. if (qdisc->flags & TCQ_F_BUILTIN)
  98. return;
  99. refcount_inc(&qdisc->refcnt);
  100. }
  101. static inline bool qdisc_is_running(struct Qdisc *qdisc)
  102. {
  103. if (qdisc->flags & TCQ_F_NOLOCK)
  104. return spin_is_locked(&qdisc->seqlock);
  105. return (raw_read_seqcount(&qdisc->running) & 1) ? true : false;
  106. }
  107. static inline bool qdisc_run_begin(struct Qdisc *qdisc)
  108. {
  109. if (qdisc->flags & TCQ_F_NOLOCK) {
  110. if (!spin_trylock(&qdisc->seqlock))
  111. return false;
  112. } else if (qdisc_is_running(qdisc)) {
  113. return false;
  114. }
  115. /* Variant of write_seqcount_begin() telling lockdep a trylock
  116. * was attempted.
  117. */
  118. raw_write_seqcount_begin(&qdisc->running);
  119. seqcount_acquire(&qdisc->running.dep_map, 0, 1, _RET_IP_);
  120. return true;
  121. }
  122. static inline void qdisc_run_end(struct Qdisc *qdisc)
  123. {
  124. write_seqcount_end(&qdisc->running);
  125. if (qdisc->flags & TCQ_F_NOLOCK)
  126. spin_unlock(&qdisc->seqlock);
  127. }
  128. static inline bool qdisc_may_bulk(const struct Qdisc *qdisc)
  129. {
  130. return qdisc->flags & TCQ_F_ONETXQUEUE;
  131. }
  132. static inline int qdisc_avail_bulklimit(const struct netdev_queue *txq)
  133. {
  134. #ifdef CONFIG_BQL
  135. /* Non-BQL migrated drivers will return 0, too. */
  136. return dql_avail(&txq->dql);
  137. #else
  138. return 0;
  139. #endif
  140. }
  141. struct Qdisc_class_ops {
  142. /* Child qdisc manipulation */
  143. struct netdev_queue * (*select_queue)(struct Qdisc *, struct tcmsg *);
  144. int (*graft)(struct Qdisc *, unsigned long cl,
  145. struct Qdisc *, struct Qdisc **,
  146. struct netlink_ext_ack *extack);
  147. struct Qdisc * (*leaf)(struct Qdisc *, unsigned long cl);
  148. void (*qlen_notify)(struct Qdisc *, unsigned long);
  149. /* Class manipulation routines */
  150. unsigned long (*find)(struct Qdisc *, u32 classid);
  151. int (*change)(struct Qdisc *, u32, u32,
  152. struct nlattr **, unsigned long *,
  153. struct netlink_ext_ack *);
  154. int (*delete)(struct Qdisc *, unsigned long);
  155. void (*walk)(struct Qdisc *, struct qdisc_walker * arg);
  156. /* Filter manipulation */
  157. struct tcf_block * (*tcf_block)(struct Qdisc *sch,
  158. unsigned long arg,
  159. struct netlink_ext_ack *extack);
  160. unsigned long (*bind_tcf)(struct Qdisc *, unsigned long,
  161. u32 classid);
  162. void (*unbind_tcf)(struct Qdisc *, unsigned long);
  163. /* rtnetlink specific */
  164. int (*dump)(struct Qdisc *, unsigned long,
  165. struct sk_buff *skb, struct tcmsg*);
  166. int (*dump_stats)(struct Qdisc *, unsigned long,
  167. struct gnet_dump *);
  168. };
  169. struct Qdisc_ops {
  170. struct Qdisc_ops *next;
  171. const struct Qdisc_class_ops *cl_ops;
  172. char id[IFNAMSIZ];
  173. int priv_size;
  174. unsigned int static_flags;
  175. int (*enqueue)(struct sk_buff *skb,
  176. struct Qdisc *sch,
  177. struct sk_buff **to_free);
  178. struct sk_buff * (*dequeue)(struct Qdisc *);
  179. struct sk_buff * (*peek)(struct Qdisc *);
  180. int (*init)(struct Qdisc *sch, struct nlattr *arg,
  181. struct netlink_ext_ack *extack);
  182. void (*reset)(struct Qdisc *);
  183. void (*destroy)(struct Qdisc *);
  184. int (*change)(struct Qdisc *sch,
  185. struct nlattr *arg,
  186. struct netlink_ext_ack *extack);
  187. void (*attach)(struct Qdisc *sch);
  188. int (*change_tx_queue_len)(struct Qdisc *, unsigned int);
  189. int (*dump)(struct Qdisc *, struct sk_buff *);
  190. int (*dump_stats)(struct Qdisc *, struct gnet_dump *);
  191. void (*ingress_block_set)(struct Qdisc *sch,
  192. u32 block_index);
  193. void (*egress_block_set)(struct Qdisc *sch,
  194. u32 block_index);
  195. u32 (*ingress_block_get)(struct Qdisc *sch);
  196. u32 (*egress_block_get)(struct Qdisc *sch);
  197. struct module *owner;
  198. };
  199. struct tcf_result {
  200. union {
  201. struct {
  202. unsigned long class;
  203. u32 classid;
  204. };
  205. const struct tcf_proto *goto_tp;
  206. };
  207. };
  208. struct tcf_proto_ops {
  209. struct list_head head;
  210. char kind[IFNAMSIZ];
  211. int (*classify)(struct sk_buff *,
  212. const struct tcf_proto *,
  213. struct tcf_result *);
  214. int (*init)(struct tcf_proto*);
  215. void (*destroy)(struct tcf_proto *tp,
  216. struct netlink_ext_ack *extack);
  217. void* (*get)(struct tcf_proto*, u32 handle);
  218. int (*change)(struct net *net, struct sk_buff *,
  219. struct tcf_proto*, unsigned long,
  220. u32 handle, struct nlattr **,
  221. void **, bool,
  222. struct netlink_ext_ack *);
  223. int (*delete)(struct tcf_proto *tp, void *arg,
  224. bool *last,
  225. struct netlink_ext_ack *);
  226. void (*walk)(struct tcf_proto*, struct tcf_walker *arg);
  227. void (*bind_class)(void *, u32, unsigned long);
  228. /* rtnetlink specific */
  229. int (*dump)(struct net*, struct tcf_proto*, void *,
  230. struct sk_buff *skb, struct tcmsg*);
  231. struct module *owner;
  232. };
  233. struct tcf_proto {
  234. /* Fast access part */
  235. struct tcf_proto __rcu *next;
  236. void __rcu *root;
  237. int (*classify)(struct sk_buff *,
  238. const struct tcf_proto *,
  239. struct tcf_result *);
  240. __be16 protocol;
  241. /* All the rest */
  242. u32 prio;
  243. void *data;
  244. const struct tcf_proto_ops *ops;
  245. struct tcf_chain *chain;
  246. struct rcu_head rcu;
  247. };
  248. struct qdisc_skb_cb {
  249. unsigned int pkt_len;
  250. u16 slave_dev_queue_mapping;
  251. u16 tc_classid;
  252. #define QDISC_CB_PRIV_LEN 20
  253. unsigned char data[QDISC_CB_PRIV_LEN];
  254. };
  255. typedef void tcf_chain_head_change_t(struct tcf_proto *tp_head, void *priv);
  256. struct tcf_chain {
  257. struct tcf_proto __rcu *filter_chain;
  258. struct list_head filter_chain_list;
  259. struct list_head list;
  260. struct tcf_block *block;
  261. u32 index; /* chain index */
  262. unsigned int refcnt;
  263. };
  264. struct tcf_block {
  265. struct list_head chain_list;
  266. u32 index; /* block index for shared blocks */
  267. unsigned int refcnt;
  268. struct net *net;
  269. struct Qdisc *q;
  270. struct list_head cb_list;
  271. struct list_head owner_list;
  272. bool keep_dst;
  273. unsigned int offloadcnt; /* Number of oddloaded filters */
  274. unsigned int nooffloaddevcnt; /* Number of devs unable to do offload */
  275. };
  276. static inline void tcf_block_offload_inc(struct tcf_block *block, u32 *flags)
  277. {
  278. if (*flags & TCA_CLS_FLAGS_IN_HW)
  279. return;
  280. *flags |= TCA_CLS_FLAGS_IN_HW;
  281. block->offloadcnt++;
  282. }
  283. static inline void tcf_block_offload_dec(struct tcf_block *block, u32 *flags)
  284. {
  285. if (!(*flags & TCA_CLS_FLAGS_IN_HW))
  286. return;
  287. *flags &= ~TCA_CLS_FLAGS_IN_HW;
  288. block->offloadcnt--;
  289. }
  290. static inline void qdisc_cb_private_validate(const struct sk_buff *skb, int sz)
  291. {
  292. struct qdisc_skb_cb *qcb;
  293. BUILD_BUG_ON(sizeof(skb->cb) < offsetof(struct qdisc_skb_cb, data) + sz);
  294. BUILD_BUG_ON(sizeof(qcb->data) < sz);
  295. }
  296. static inline int qdisc_qlen_cpu(const struct Qdisc *q)
  297. {
  298. return this_cpu_ptr(q->cpu_qstats)->qlen;
  299. }
  300. static inline int qdisc_qlen(const struct Qdisc *q)
  301. {
  302. return q->q.qlen;
  303. }
  304. static inline int qdisc_qlen_sum(const struct Qdisc *q)
  305. {
  306. __u32 qlen = q->qstats.qlen;
  307. int i;
  308. if (q->flags & TCQ_F_NOLOCK) {
  309. for_each_possible_cpu(i)
  310. qlen += per_cpu_ptr(q->cpu_qstats, i)->qlen;
  311. } else {
  312. qlen += q->q.qlen;
  313. }
  314. return qlen;
  315. }
  316. static inline struct qdisc_skb_cb *qdisc_skb_cb(const struct sk_buff *skb)
  317. {
  318. return (struct qdisc_skb_cb *)skb->cb;
  319. }
  320. static inline spinlock_t *qdisc_lock(struct Qdisc *qdisc)
  321. {
  322. return &qdisc->q.lock;
  323. }
  324. static inline struct Qdisc *qdisc_root(const struct Qdisc *qdisc)
  325. {
  326. struct Qdisc *q = rcu_dereference_rtnl(qdisc->dev_queue->qdisc);
  327. return q;
  328. }
  329. static inline struct Qdisc *qdisc_root_sleeping(const struct Qdisc *qdisc)
  330. {
  331. return qdisc->dev_queue->qdisc_sleeping;
  332. }
  333. /* The qdisc root lock is a mechanism by which to top level
  334. * of a qdisc tree can be locked from any qdisc node in the
  335. * forest. This allows changing the configuration of some
  336. * aspect of the qdisc tree while blocking out asynchronous
  337. * qdisc access in the packet processing paths.
  338. *
  339. * It is only legal to do this when the root will not change
  340. * on us. Otherwise we'll potentially lock the wrong qdisc
  341. * root. This is enforced by holding the RTNL semaphore, which
  342. * all users of this lock accessor must do.
  343. */
  344. static inline spinlock_t *qdisc_root_lock(const struct Qdisc *qdisc)
  345. {
  346. struct Qdisc *root = qdisc_root(qdisc);
  347. ASSERT_RTNL();
  348. return qdisc_lock(root);
  349. }
  350. static inline spinlock_t *qdisc_root_sleeping_lock(const struct Qdisc *qdisc)
  351. {
  352. struct Qdisc *root = qdisc_root_sleeping(qdisc);
  353. ASSERT_RTNL();
  354. return qdisc_lock(root);
  355. }
  356. static inline seqcount_t *qdisc_root_sleeping_running(const struct Qdisc *qdisc)
  357. {
  358. struct Qdisc *root = qdisc_root_sleeping(qdisc);
  359. ASSERT_RTNL();
  360. return &root->running;
  361. }
  362. static inline struct net_device *qdisc_dev(const struct Qdisc *qdisc)
  363. {
  364. return qdisc->dev_queue->dev;
  365. }
  366. static inline void sch_tree_lock(const struct Qdisc *q)
  367. {
  368. spin_lock_bh(qdisc_root_sleeping_lock(q));
  369. }
  370. static inline void sch_tree_unlock(const struct Qdisc *q)
  371. {
  372. spin_unlock_bh(qdisc_root_sleeping_lock(q));
  373. }
  374. extern struct Qdisc noop_qdisc;
  375. extern struct Qdisc_ops noop_qdisc_ops;
  376. extern struct Qdisc_ops pfifo_fast_ops;
  377. extern struct Qdisc_ops mq_qdisc_ops;
  378. extern struct Qdisc_ops noqueue_qdisc_ops;
  379. extern const struct Qdisc_ops *default_qdisc_ops;
  380. static inline const struct Qdisc_ops *
  381. get_default_qdisc_ops(const struct net_device *dev, int ntx)
  382. {
  383. return ntx < dev->real_num_tx_queues ?
  384. default_qdisc_ops : &pfifo_fast_ops;
  385. }
  386. struct Qdisc_class_common {
  387. u32 classid;
  388. struct hlist_node hnode;
  389. };
  390. struct Qdisc_class_hash {
  391. struct hlist_head *hash;
  392. unsigned int hashsize;
  393. unsigned int hashmask;
  394. unsigned int hashelems;
  395. };
  396. static inline unsigned int qdisc_class_hash(u32 id, u32 mask)
  397. {
  398. id ^= id >> 8;
  399. id ^= id >> 4;
  400. return id & mask;
  401. }
  402. static inline struct Qdisc_class_common *
  403. qdisc_class_find(const struct Qdisc_class_hash *hash, u32 id)
  404. {
  405. struct Qdisc_class_common *cl;
  406. unsigned int h;
  407. if (!id)
  408. return NULL;
  409. h = qdisc_class_hash(id, hash->hashmask);
  410. hlist_for_each_entry(cl, &hash->hash[h], hnode) {
  411. if (cl->classid == id)
  412. return cl;
  413. }
  414. return NULL;
  415. }
  416. static inline int tc_classid_to_hwtc(struct net_device *dev, u32 classid)
  417. {
  418. u32 hwtc = TC_H_MIN(classid) - TC_H_MIN_PRIORITY;
  419. return (hwtc < netdev_get_num_tc(dev)) ? hwtc : -EINVAL;
  420. }
  421. int qdisc_class_hash_init(struct Qdisc_class_hash *);
  422. void qdisc_class_hash_insert(struct Qdisc_class_hash *,
  423. struct Qdisc_class_common *);
  424. void qdisc_class_hash_remove(struct Qdisc_class_hash *,
  425. struct Qdisc_class_common *);
  426. void qdisc_class_hash_grow(struct Qdisc *, struct Qdisc_class_hash *);
  427. void qdisc_class_hash_destroy(struct Qdisc_class_hash *);
  428. int dev_qdisc_change_tx_queue_len(struct net_device *dev);
  429. void dev_init_scheduler(struct net_device *dev);
  430. void dev_shutdown(struct net_device *dev);
  431. void dev_activate(struct net_device *dev);
  432. void dev_deactivate(struct net_device *dev);
  433. void dev_deactivate_many(struct list_head *head);
  434. struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue,
  435. struct Qdisc *qdisc);
  436. void qdisc_reset(struct Qdisc *qdisc);
  437. void qdisc_destroy(struct Qdisc *qdisc);
  438. void qdisc_tree_reduce_backlog(struct Qdisc *qdisc, unsigned int n,
  439. unsigned int len);
  440. struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
  441. const struct Qdisc_ops *ops,
  442. struct netlink_ext_ack *extack);
  443. void qdisc_free(struct Qdisc *qdisc);
  444. struct Qdisc *qdisc_create_dflt(struct netdev_queue *dev_queue,
  445. const struct Qdisc_ops *ops, u32 parentid,
  446. struct netlink_ext_ack *extack);
  447. void __qdisc_calculate_pkt_len(struct sk_buff *skb,
  448. const struct qdisc_size_table *stab);
  449. int skb_do_redirect(struct sk_buff *);
  450. static inline void skb_reset_tc(struct sk_buff *skb)
  451. {
  452. #ifdef CONFIG_NET_CLS_ACT
  453. skb->tc_redirected = 0;
  454. #endif
  455. }
  456. static inline bool skb_at_tc_ingress(const struct sk_buff *skb)
  457. {
  458. #ifdef CONFIG_NET_CLS_ACT
  459. return skb->tc_at_ingress;
  460. #else
  461. return false;
  462. #endif
  463. }
  464. static inline bool skb_skip_tc_classify(struct sk_buff *skb)
  465. {
  466. #ifdef CONFIG_NET_CLS_ACT
  467. if (skb->tc_skip_classify) {
  468. skb->tc_skip_classify = 0;
  469. return true;
  470. }
  471. #endif
  472. return false;
  473. }
  474. /* Reset all TX qdiscs greater than index of a device. */
  475. static inline void qdisc_reset_all_tx_gt(struct net_device *dev, unsigned int i)
  476. {
  477. struct Qdisc *qdisc;
  478. for (; i < dev->num_tx_queues; i++) {
  479. qdisc = rtnl_dereference(netdev_get_tx_queue(dev, i)->qdisc);
  480. if (qdisc) {
  481. spin_lock_bh(qdisc_lock(qdisc));
  482. qdisc_reset(qdisc);
  483. spin_unlock_bh(qdisc_lock(qdisc));
  484. }
  485. }
  486. }
  487. static inline void qdisc_reset_all_tx(struct net_device *dev)
  488. {
  489. qdisc_reset_all_tx_gt(dev, 0);
  490. }
  491. /* Are all TX queues of the device empty? */
  492. static inline bool qdisc_all_tx_empty(const struct net_device *dev)
  493. {
  494. unsigned int i;
  495. rcu_read_lock();
  496. for (i = 0; i < dev->num_tx_queues; i++) {
  497. struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
  498. const struct Qdisc *q = rcu_dereference(txq->qdisc);
  499. if (q->q.qlen) {
  500. rcu_read_unlock();
  501. return false;
  502. }
  503. }
  504. rcu_read_unlock();
  505. return true;
  506. }
  507. /* Are any of the TX qdiscs changing? */
  508. static inline bool qdisc_tx_changing(const struct net_device *dev)
  509. {
  510. unsigned int i;
  511. for (i = 0; i < dev->num_tx_queues; i++) {
  512. struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
  513. if (rcu_access_pointer(txq->qdisc) != txq->qdisc_sleeping)
  514. return true;
  515. }
  516. return false;
  517. }
  518. /* Is the device using the noop qdisc on all queues? */
  519. static inline bool qdisc_tx_is_noop(const struct net_device *dev)
  520. {
  521. unsigned int i;
  522. for (i = 0; i < dev->num_tx_queues; i++) {
  523. struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
  524. if (rcu_access_pointer(txq->qdisc) != &noop_qdisc)
  525. return false;
  526. }
  527. return true;
  528. }
  529. static inline unsigned int qdisc_pkt_len(const struct sk_buff *skb)
  530. {
  531. return qdisc_skb_cb(skb)->pkt_len;
  532. }
  533. /* additional qdisc xmit flags (NET_XMIT_MASK in linux/netdevice.h) */
  534. enum net_xmit_qdisc_t {
  535. __NET_XMIT_STOLEN = 0x00010000,
  536. __NET_XMIT_BYPASS = 0x00020000,
  537. };
  538. #ifdef CONFIG_NET_CLS_ACT
  539. #define net_xmit_drop_count(e) ((e) & __NET_XMIT_STOLEN ? 0 : 1)
  540. #else
  541. #define net_xmit_drop_count(e) (1)
  542. #endif
  543. static inline void qdisc_calculate_pkt_len(struct sk_buff *skb,
  544. const struct Qdisc *sch)
  545. {
  546. #ifdef CONFIG_NET_SCHED
  547. struct qdisc_size_table *stab = rcu_dereference_bh(sch->stab);
  548. if (stab)
  549. __qdisc_calculate_pkt_len(skb, stab);
  550. #endif
  551. }
  552. static inline int qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch,
  553. struct sk_buff **to_free)
  554. {
  555. qdisc_calculate_pkt_len(skb, sch);
  556. return sch->enqueue(skb, sch, to_free);
  557. }
  558. static inline bool qdisc_is_percpu_stats(const struct Qdisc *q)
  559. {
  560. return q->flags & TCQ_F_CPUSTATS;
  561. }
  562. static inline void _bstats_update(struct gnet_stats_basic_packed *bstats,
  563. __u64 bytes, __u32 packets)
  564. {
  565. bstats->bytes += bytes;
  566. bstats->packets += packets;
  567. }
  568. static inline void bstats_update(struct gnet_stats_basic_packed *bstats,
  569. const struct sk_buff *skb)
  570. {
  571. _bstats_update(bstats,
  572. qdisc_pkt_len(skb),
  573. skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1);
  574. }
  575. static inline void _bstats_cpu_update(struct gnet_stats_basic_cpu *bstats,
  576. __u64 bytes, __u32 packets)
  577. {
  578. u64_stats_update_begin(&bstats->syncp);
  579. _bstats_update(&bstats->bstats, bytes, packets);
  580. u64_stats_update_end(&bstats->syncp);
  581. }
  582. static inline void bstats_cpu_update(struct gnet_stats_basic_cpu *bstats,
  583. const struct sk_buff *skb)
  584. {
  585. u64_stats_update_begin(&bstats->syncp);
  586. bstats_update(&bstats->bstats, skb);
  587. u64_stats_update_end(&bstats->syncp);
  588. }
  589. static inline void qdisc_bstats_cpu_update(struct Qdisc *sch,
  590. const struct sk_buff *skb)
  591. {
  592. bstats_cpu_update(this_cpu_ptr(sch->cpu_bstats), skb);
  593. }
  594. static inline void qdisc_bstats_update(struct Qdisc *sch,
  595. const struct sk_buff *skb)
  596. {
  597. bstats_update(&sch->bstats, skb);
  598. }
  599. static inline void qdisc_qstats_backlog_dec(struct Qdisc *sch,
  600. const struct sk_buff *skb)
  601. {
  602. sch->qstats.backlog -= qdisc_pkt_len(skb);
  603. }
  604. static inline void qdisc_qstats_cpu_backlog_dec(struct Qdisc *sch,
  605. const struct sk_buff *skb)
  606. {
  607. this_cpu_sub(sch->cpu_qstats->backlog, qdisc_pkt_len(skb));
  608. }
  609. static inline void qdisc_qstats_backlog_inc(struct Qdisc *sch,
  610. const struct sk_buff *skb)
  611. {
  612. sch->qstats.backlog += qdisc_pkt_len(skb);
  613. }
  614. static inline void qdisc_qstats_cpu_backlog_inc(struct Qdisc *sch,
  615. const struct sk_buff *skb)
  616. {
  617. this_cpu_add(sch->cpu_qstats->backlog, qdisc_pkt_len(skb));
  618. }
  619. static inline void qdisc_qstats_cpu_qlen_inc(struct Qdisc *sch)
  620. {
  621. this_cpu_inc(sch->cpu_qstats->qlen);
  622. }
  623. static inline void qdisc_qstats_cpu_qlen_dec(struct Qdisc *sch)
  624. {
  625. this_cpu_dec(sch->cpu_qstats->qlen);
  626. }
  627. static inline void qdisc_qstats_cpu_requeues_inc(struct Qdisc *sch)
  628. {
  629. this_cpu_inc(sch->cpu_qstats->requeues);
  630. }
  631. static inline void __qdisc_qstats_drop(struct Qdisc *sch, int count)
  632. {
  633. sch->qstats.drops += count;
  634. }
  635. static inline void qstats_drop_inc(struct gnet_stats_queue *qstats)
  636. {
  637. qstats->drops++;
  638. }
  639. static inline void qstats_overlimit_inc(struct gnet_stats_queue *qstats)
  640. {
  641. qstats->overlimits++;
  642. }
  643. static inline void qdisc_qstats_drop(struct Qdisc *sch)
  644. {
  645. qstats_drop_inc(&sch->qstats);
  646. }
  647. static inline void qdisc_qstats_cpu_drop(struct Qdisc *sch)
  648. {
  649. this_cpu_inc(sch->cpu_qstats->drops);
  650. }
  651. static inline void qdisc_qstats_overlimit(struct Qdisc *sch)
  652. {
  653. sch->qstats.overlimits++;
  654. }
  655. static inline void qdisc_skb_head_init(struct qdisc_skb_head *qh)
  656. {
  657. qh->head = NULL;
  658. qh->tail = NULL;
  659. qh->qlen = 0;
  660. }
  661. static inline int __qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch,
  662. struct qdisc_skb_head *qh)
  663. {
  664. struct sk_buff *last = qh->tail;
  665. if (last) {
  666. skb->next = NULL;
  667. last->next = skb;
  668. qh->tail = skb;
  669. } else {
  670. qh->tail = skb;
  671. qh->head = skb;
  672. }
  673. qh->qlen++;
  674. qdisc_qstats_backlog_inc(sch, skb);
  675. return NET_XMIT_SUCCESS;
  676. }
  677. static inline int qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch)
  678. {
  679. return __qdisc_enqueue_tail(skb, sch, &sch->q);
  680. }
  681. static inline struct sk_buff *__qdisc_dequeue_head(struct qdisc_skb_head *qh)
  682. {
  683. struct sk_buff *skb = qh->head;
  684. if (likely(skb != NULL)) {
  685. qh->head = skb->next;
  686. qh->qlen--;
  687. if (qh->head == NULL)
  688. qh->tail = NULL;
  689. skb->next = NULL;
  690. }
  691. return skb;
  692. }
  693. static inline struct sk_buff *qdisc_dequeue_head(struct Qdisc *sch)
  694. {
  695. struct sk_buff *skb = __qdisc_dequeue_head(&sch->q);
  696. if (likely(skb != NULL)) {
  697. qdisc_qstats_backlog_dec(sch, skb);
  698. qdisc_bstats_update(sch, skb);
  699. }
  700. return skb;
  701. }
  702. /* Instead of calling kfree_skb() while root qdisc lock is held,
  703. * queue the skb for future freeing at end of __dev_xmit_skb()
  704. */
  705. static inline void __qdisc_drop(struct sk_buff *skb, struct sk_buff **to_free)
  706. {
  707. skb->next = *to_free;
  708. *to_free = skb;
  709. }
  710. static inline void __qdisc_drop_all(struct sk_buff *skb,
  711. struct sk_buff **to_free)
  712. {
  713. if (skb->prev)
  714. skb->prev->next = *to_free;
  715. else
  716. skb->next = *to_free;
  717. *to_free = skb;
  718. }
  719. static inline unsigned int __qdisc_queue_drop_head(struct Qdisc *sch,
  720. struct qdisc_skb_head *qh,
  721. struct sk_buff **to_free)
  722. {
  723. struct sk_buff *skb = __qdisc_dequeue_head(qh);
  724. if (likely(skb != NULL)) {
  725. unsigned int len = qdisc_pkt_len(skb);
  726. qdisc_qstats_backlog_dec(sch, skb);
  727. __qdisc_drop(skb, to_free);
  728. return len;
  729. }
  730. return 0;
  731. }
  732. static inline unsigned int qdisc_queue_drop_head(struct Qdisc *sch,
  733. struct sk_buff **to_free)
  734. {
  735. return __qdisc_queue_drop_head(sch, &sch->q, to_free);
  736. }
  737. static inline struct sk_buff *qdisc_peek_head(struct Qdisc *sch)
  738. {
  739. const struct qdisc_skb_head *qh = &sch->q;
  740. return qh->head;
  741. }
  742. /* generic pseudo peek method for non-work-conserving qdisc */
  743. static inline struct sk_buff *qdisc_peek_dequeued(struct Qdisc *sch)
  744. {
  745. struct sk_buff *skb = skb_peek(&sch->gso_skb);
  746. /* we can reuse ->gso_skb because peek isn't called for root qdiscs */
  747. if (!skb) {
  748. skb = sch->dequeue(sch);
  749. if (skb) {
  750. __skb_queue_head(&sch->gso_skb, skb);
  751. /* it's still part of the queue */
  752. qdisc_qstats_backlog_inc(sch, skb);
  753. sch->q.qlen++;
  754. }
  755. }
  756. return skb;
  757. }
  758. /* use instead of qdisc->dequeue() for all qdiscs queried with ->peek() */
  759. static inline struct sk_buff *qdisc_dequeue_peeked(struct Qdisc *sch)
  760. {
  761. struct sk_buff *skb = skb_peek(&sch->gso_skb);
  762. if (skb) {
  763. skb = __skb_dequeue(&sch->gso_skb);
  764. qdisc_qstats_backlog_dec(sch, skb);
  765. sch->q.qlen--;
  766. } else {
  767. skb = sch->dequeue(sch);
  768. }
  769. return skb;
  770. }
  771. static inline void __qdisc_reset_queue(struct qdisc_skb_head *qh)
  772. {
  773. /*
  774. * We do not know the backlog in bytes of this list, it
  775. * is up to the caller to correct it
  776. */
  777. ASSERT_RTNL();
  778. if (qh->qlen) {
  779. rtnl_kfree_skbs(qh->head, qh->tail);
  780. qh->head = NULL;
  781. qh->tail = NULL;
  782. qh->qlen = 0;
  783. }
  784. }
  785. static inline void qdisc_reset_queue(struct Qdisc *sch)
  786. {
  787. __qdisc_reset_queue(&sch->q);
  788. sch->qstats.backlog = 0;
  789. }
  790. static inline struct Qdisc *qdisc_replace(struct Qdisc *sch, struct Qdisc *new,
  791. struct Qdisc **pold)
  792. {
  793. struct Qdisc *old;
  794. sch_tree_lock(sch);
  795. old = *pold;
  796. *pold = new;
  797. if (old != NULL) {
  798. unsigned int qlen = old->q.qlen;
  799. unsigned int backlog = old->qstats.backlog;
  800. qdisc_reset(old);
  801. qdisc_tree_reduce_backlog(old, qlen, backlog);
  802. }
  803. sch_tree_unlock(sch);
  804. return old;
  805. }
  806. static inline void rtnl_qdisc_drop(struct sk_buff *skb, struct Qdisc *sch)
  807. {
  808. rtnl_kfree_skbs(skb, skb);
  809. qdisc_qstats_drop(sch);
  810. }
  811. static inline int qdisc_drop_cpu(struct sk_buff *skb, struct Qdisc *sch,
  812. struct sk_buff **to_free)
  813. {
  814. __qdisc_drop(skb, to_free);
  815. qdisc_qstats_cpu_drop(sch);
  816. return NET_XMIT_DROP;
  817. }
  818. static inline int qdisc_drop(struct sk_buff *skb, struct Qdisc *sch,
  819. struct sk_buff **to_free)
  820. {
  821. __qdisc_drop(skb, to_free);
  822. qdisc_qstats_drop(sch);
  823. return NET_XMIT_DROP;
  824. }
  825. static inline int qdisc_drop_all(struct sk_buff *skb, struct Qdisc *sch,
  826. struct sk_buff **to_free)
  827. {
  828. __qdisc_drop_all(skb, to_free);
  829. qdisc_qstats_drop(sch);
  830. return NET_XMIT_DROP;
  831. }
  832. /* Length to Time (L2T) lookup in a qdisc_rate_table, to determine how
  833. long it will take to send a packet given its size.
  834. */
  835. static inline u32 qdisc_l2t(struct qdisc_rate_table* rtab, unsigned int pktlen)
  836. {
  837. int slot = pktlen + rtab->rate.cell_align + rtab->rate.overhead;
  838. if (slot < 0)
  839. slot = 0;
  840. slot >>= rtab->rate.cell_log;
  841. if (slot > 255)
  842. return rtab->data[255]*(slot >> 8) + rtab->data[slot & 0xFF];
  843. return rtab->data[slot];
  844. }
  845. struct psched_ratecfg {
  846. u64 rate_bytes_ps; /* bytes per second */
  847. u32 mult;
  848. u16 overhead;
  849. u8 linklayer;
  850. u8 shift;
  851. };
  852. static inline u64 psched_l2t_ns(const struct psched_ratecfg *r,
  853. unsigned int len)
  854. {
  855. len += r->overhead;
  856. if (unlikely(r->linklayer == TC_LINKLAYER_ATM))
  857. return ((u64)(DIV_ROUND_UP(len,48)*53) * r->mult) >> r->shift;
  858. return ((u64)len * r->mult) >> r->shift;
  859. }
  860. void psched_ratecfg_precompute(struct psched_ratecfg *r,
  861. const struct tc_ratespec *conf,
  862. u64 rate64);
  863. static inline void psched_ratecfg_getrate(struct tc_ratespec *res,
  864. const struct psched_ratecfg *r)
  865. {
  866. memset(res, 0, sizeof(*res));
  867. /* legacy struct tc_ratespec has a 32bit @rate field
  868. * Qdisc using 64bit rate should add new attributes
  869. * in order to maintain compatibility.
  870. */
  871. res->rate = min_t(u64, r->rate_bytes_ps, ~0U);
  872. res->overhead = r->overhead;
  873. res->linklayer = (r->linklayer & TC_LINKLAYER_MASK);
  874. }
  875. /* Mini Qdisc serves for specific needs of ingress/clsact Qdisc.
  876. * The fast path only needs to access filter list and to update stats
  877. */
  878. struct mini_Qdisc {
  879. struct tcf_proto *filter_list;
  880. struct gnet_stats_basic_cpu __percpu *cpu_bstats;
  881. struct gnet_stats_queue __percpu *cpu_qstats;
  882. struct rcu_head rcu;
  883. };
  884. static inline void mini_qdisc_bstats_cpu_update(struct mini_Qdisc *miniq,
  885. const struct sk_buff *skb)
  886. {
  887. bstats_cpu_update(this_cpu_ptr(miniq->cpu_bstats), skb);
  888. }
  889. static inline void mini_qdisc_qstats_cpu_drop(struct mini_Qdisc *miniq)
  890. {
  891. this_cpu_inc(miniq->cpu_qstats->drops);
  892. }
  893. struct mini_Qdisc_pair {
  894. struct mini_Qdisc miniq1;
  895. struct mini_Qdisc miniq2;
  896. struct mini_Qdisc __rcu **p_miniq;
  897. };
  898. void mini_qdisc_pair_swap(struct mini_Qdisc_pair *miniqp,
  899. struct tcf_proto *tp_head);
  900. void mini_qdisc_pair_init(struct mini_Qdisc_pair *miniqp, struct Qdisc *qdisc,
  901. struct mini_Qdisc __rcu **p_miniq);
  902. #endif