sch_generic.h 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842
  1. #ifndef __NET_SCHED_GENERIC_H
  2. #define __NET_SCHED_GENERIC_H
  3. #include <linux/netdevice.h>
  4. #include <linux/types.h>
  5. #include <linux/rcupdate.h>
  6. #include <linux/pkt_sched.h>
  7. #include <linux/pkt_cls.h>
  8. #include <linux/percpu.h>
  9. #include <linux/dynamic_queue_limits.h>
  10. #include <net/gen_stats.h>
  11. #include <net/rtnetlink.h>
  12. struct Qdisc_ops;
  13. struct qdisc_walker;
  14. struct tcf_walker;
  15. struct module;
  16. struct qdisc_rate_table {
  17. struct tc_ratespec rate;
  18. u32 data[256];
  19. struct qdisc_rate_table *next;
  20. int refcnt;
  21. };
  22. enum qdisc_state_t {
  23. __QDISC_STATE_SCHED,
  24. __QDISC_STATE_DEACTIVATED,
  25. __QDISC_STATE_THROTTLED,
  26. };
  27. struct qdisc_size_table {
  28. struct rcu_head rcu;
  29. struct list_head list;
  30. struct tc_sizespec szopts;
  31. int refcnt;
  32. u16 data[];
  33. };
  34. struct Qdisc {
  35. int (*enqueue)(struct sk_buff *skb, struct Qdisc *dev);
  36. struct sk_buff * (*dequeue)(struct Qdisc *dev);
  37. unsigned int flags;
  38. #define TCQ_F_BUILTIN 1
  39. #define TCQ_F_INGRESS 2
  40. #define TCQ_F_CAN_BYPASS 4
  41. #define TCQ_F_MQROOT 8
  42. #define TCQ_F_ONETXQUEUE 0x10 /* dequeue_skb() can assume all skbs are for
  43. * q->dev_queue : It can test
  44. * netif_xmit_frozen_or_stopped() before
  45. * dequeueing next packet.
  46. * Its true for MQ/MQPRIO slaves, or non
  47. * multiqueue device.
  48. */
  49. #define TCQ_F_WARN_NONWC (1 << 16)
  50. #define TCQ_F_CPUSTATS 0x20 /* run using percpu statistics */
  51. #define TCQ_F_NOPARENT 0x40 /* root of its hierarchy :
  52. * qdisc_tree_decrease_qlen() should stop.
  53. */
  54. u32 limit;
  55. const struct Qdisc_ops *ops;
  56. struct qdisc_size_table __rcu *stab;
  57. struct list_head list;
  58. u32 handle;
  59. u32 parent;
  60. int (*reshape_fail)(struct sk_buff *skb,
  61. struct Qdisc *q);
  62. void *u32_node;
  63. struct netdev_queue *dev_queue;
  64. struct gnet_stats_rate_est64 rate_est;
  65. struct gnet_stats_basic_cpu __percpu *cpu_bstats;
  66. struct gnet_stats_queue __percpu *cpu_qstats;
  67. struct Qdisc *next_sched;
  68. struct sk_buff *gso_skb;
  69. /*
  70. * For performance sake on SMP, we put highly modified fields at the end
  71. */
  72. unsigned long state;
  73. struct sk_buff_head q;
  74. struct gnet_stats_basic_packed bstats;
  75. seqcount_t running;
  76. struct gnet_stats_queue qstats;
  77. struct rcu_head rcu_head;
  78. int padded;
  79. atomic_t refcnt;
  80. spinlock_t busylock ____cacheline_aligned_in_smp;
  81. };
  82. static inline bool qdisc_is_running(const struct Qdisc *qdisc)
  83. {
  84. return (raw_read_seqcount(&qdisc->running) & 1) ? true : false;
  85. }
  86. static inline bool qdisc_run_begin(struct Qdisc *qdisc)
  87. {
  88. if (qdisc_is_running(qdisc))
  89. return false;
  90. write_seqcount_begin(&qdisc->running);
  91. return true;
  92. }
  93. static inline void qdisc_run_end(struct Qdisc *qdisc)
  94. {
  95. write_seqcount_end(&qdisc->running);
  96. }
  97. static inline bool qdisc_may_bulk(const struct Qdisc *qdisc)
  98. {
  99. return qdisc->flags & TCQ_F_ONETXQUEUE;
  100. }
  101. static inline int qdisc_avail_bulklimit(const struct netdev_queue *txq)
  102. {
  103. #ifdef CONFIG_BQL
  104. /* Non-BQL migrated drivers will return 0, too. */
  105. return dql_avail(&txq->dql);
  106. #else
  107. return 0;
  108. #endif
  109. }
  110. static inline bool qdisc_is_throttled(const struct Qdisc *qdisc)
  111. {
  112. return test_bit(__QDISC_STATE_THROTTLED, &qdisc->state) ? true : false;
  113. }
  114. static inline void qdisc_throttled(struct Qdisc *qdisc)
  115. {
  116. set_bit(__QDISC_STATE_THROTTLED, &qdisc->state);
  117. }
  118. static inline void qdisc_unthrottled(struct Qdisc *qdisc)
  119. {
  120. clear_bit(__QDISC_STATE_THROTTLED, &qdisc->state);
  121. }
  122. struct Qdisc_class_ops {
  123. /* Child qdisc manipulation */
  124. struct netdev_queue * (*select_queue)(struct Qdisc *, struct tcmsg *);
  125. int (*graft)(struct Qdisc *, unsigned long cl,
  126. struct Qdisc *, struct Qdisc **);
  127. struct Qdisc * (*leaf)(struct Qdisc *, unsigned long cl);
  128. void (*qlen_notify)(struct Qdisc *, unsigned long);
  129. /* Class manipulation routines */
  130. unsigned long (*get)(struct Qdisc *, u32 classid);
  131. void (*put)(struct Qdisc *, unsigned long);
  132. int (*change)(struct Qdisc *, u32, u32,
  133. struct nlattr **, unsigned long *);
  134. int (*delete)(struct Qdisc *, unsigned long);
  135. void (*walk)(struct Qdisc *, struct qdisc_walker * arg);
  136. /* Filter manipulation */
  137. struct tcf_proto __rcu ** (*tcf_chain)(struct Qdisc *, unsigned long);
  138. unsigned long (*bind_tcf)(struct Qdisc *, unsigned long,
  139. u32 classid);
  140. void (*unbind_tcf)(struct Qdisc *, unsigned long);
  141. /* rtnetlink specific */
  142. int (*dump)(struct Qdisc *, unsigned long,
  143. struct sk_buff *skb, struct tcmsg*);
  144. int (*dump_stats)(struct Qdisc *, unsigned long,
  145. struct gnet_dump *);
  146. };
  147. struct Qdisc_ops {
  148. struct Qdisc_ops *next;
  149. const struct Qdisc_class_ops *cl_ops;
  150. char id[IFNAMSIZ];
  151. int priv_size;
  152. int (*enqueue)(struct sk_buff *, struct Qdisc *);
  153. struct sk_buff * (*dequeue)(struct Qdisc *);
  154. struct sk_buff * (*peek)(struct Qdisc *);
  155. unsigned int (*drop)(struct Qdisc *);
  156. int (*init)(struct Qdisc *, struct nlattr *arg);
  157. void (*reset)(struct Qdisc *);
  158. void (*destroy)(struct Qdisc *);
  159. int (*change)(struct Qdisc *, struct nlattr *arg);
  160. void (*attach)(struct Qdisc *);
  161. int (*dump)(struct Qdisc *, struct sk_buff *);
  162. int (*dump_stats)(struct Qdisc *, struct gnet_dump *);
  163. struct module *owner;
  164. };
  165. struct tcf_result {
  166. unsigned long class;
  167. u32 classid;
  168. };
  169. struct tcf_proto_ops {
  170. struct list_head head;
  171. char kind[IFNAMSIZ];
  172. int (*classify)(struct sk_buff *,
  173. const struct tcf_proto *,
  174. struct tcf_result *);
  175. int (*init)(struct tcf_proto*);
  176. bool (*destroy)(struct tcf_proto*, bool);
  177. unsigned long (*get)(struct tcf_proto*, u32 handle);
  178. int (*change)(struct net *net, struct sk_buff *,
  179. struct tcf_proto*, unsigned long,
  180. u32 handle, struct nlattr **,
  181. unsigned long *, bool);
  182. int (*delete)(struct tcf_proto*, unsigned long);
  183. void (*walk)(struct tcf_proto*, struct tcf_walker *arg);
  184. /* rtnetlink specific */
  185. int (*dump)(struct net*, struct tcf_proto*, unsigned long,
  186. struct sk_buff *skb, struct tcmsg*);
  187. struct module *owner;
  188. };
  189. struct tcf_proto {
  190. /* Fast access part */
  191. struct tcf_proto __rcu *next;
  192. void __rcu *root;
  193. int (*classify)(struct sk_buff *,
  194. const struct tcf_proto *,
  195. struct tcf_result *);
  196. __be16 protocol;
  197. /* All the rest */
  198. u32 prio;
  199. u32 classid;
  200. struct Qdisc *q;
  201. void *data;
  202. const struct tcf_proto_ops *ops;
  203. struct rcu_head rcu;
  204. };
  205. struct qdisc_skb_cb {
  206. unsigned int pkt_len;
  207. u16 slave_dev_queue_mapping;
  208. u16 tc_classid;
  209. #define QDISC_CB_PRIV_LEN 20
  210. unsigned char data[QDISC_CB_PRIV_LEN];
  211. };
  212. static inline void qdisc_cb_private_validate(const struct sk_buff *skb, int sz)
  213. {
  214. struct qdisc_skb_cb *qcb;
  215. BUILD_BUG_ON(sizeof(skb->cb) < offsetof(struct qdisc_skb_cb, data) + sz);
  216. BUILD_BUG_ON(sizeof(qcb->data) < sz);
  217. }
  218. static inline int qdisc_qlen(const struct Qdisc *q)
  219. {
  220. return q->q.qlen;
  221. }
  222. static inline struct qdisc_skb_cb *qdisc_skb_cb(const struct sk_buff *skb)
  223. {
  224. return (struct qdisc_skb_cb *)skb->cb;
  225. }
  226. static inline spinlock_t *qdisc_lock(struct Qdisc *qdisc)
  227. {
  228. return &qdisc->q.lock;
  229. }
  230. static inline struct Qdisc *qdisc_root(const struct Qdisc *qdisc)
  231. {
  232. struct Qdisc *q = rcu_dereference_rtnl(qdisc->dev_queue->qdisc);
  233. return q;
  234. }
  235. static inline struct Qdisc *qdisc_root_sleeping(const struct Qdisc *qdisc)
  236. {
  237. return qdisc->dev_queue->qdisc_sleeping;
  238. }
  239. /* The qdisc root lock is a mechanism by which to top level
  240. * of a qdisc tree can be locked from any qdisc node in the
  241. * forest. This allows changing the configuration of some
  242. * aspect of the qdisc tree while blocking out asynchronous
  243. * qdisc access in the packet processing paths.
  244. *
  245. * It is only legal to do this when the root will not change
  246. * on us. Otherwise we'll potentially lock the wrong qdisc
  247. * root. This is enforced by holding the RTNL semaphore, which
  248. * all users of this lock accessor must do.
  249. */
  250. static inline spinlock_t *qdisc_root_lock(const struct Qdisc *qdisc)
  251. {
  252. struct Qdisc *root = qdisc_root(qdisc);
  253. ASSERT_RTNL();
  254. return qdisc_lock(root);
  255. }
  256. static inline spinlock_t *qdisc_root_sleeping_lock(const struct Qdisc *qdisc)
  257. {
  258. struct Qdisc *root = qdisc_root_sleeping(qdisc);
  259. ASSERT_RTNL();
  260. return qdisc_lock(root);
  261. }
  262. static inline seqcount_t *qdisc_root_sleeping_running(const struct Qdisc *qdisc)
  263. {
  264. struct Qdisc *root = qdisc_root_sleeping(qdisc);
  265. ASSERT_RTNL();
  266. return &root->running;
  267. }
  268. static inline struct net_device *qdisc_dev(const struct Qdisc *qdisc)
  269. {
  270. return qdisc->dev_queue->dev;
  271. }
  272. static inline void sch_tree_lock(const struct Qdisc *q)
  273. {
  274. spin_lock_bh(qdisc_root_sleeping_lock(q));
  275. }
  276. static inline void sch_tree_unlock(const struct Qdisc *q)
  277. {
  278. spin_unlock_bh(qdisc_root_sleeping_lock(q));
  279. }
  280. #define tcf_tree_lock(tp) sch_tree_lock((tp)->q)
  281. #define tcf_tree_unlock(tp) sch_tree_unlock((tp)->q)
  282. extern struct Qdisc noop_qdisc;
  283. extern struct Qdisc_ops noop_qdisc_ops;
  284. extern struct Qdisc_ops pfifo_fast_ops;
  285. extern struct Qdisc_ops mq_qdisc_ops;
  286. extern struct Qdisc_ops noqueue_qdisc_ops;
  287. extern const struct Qdisc_ops *default_qdisc_ops;
  288. static inline const struct Qdisc_ops *
  289. get_default_qdisc_ops(const struct net_device *dev, int ntx)
  290. {
  291. return ntx < dev->real_num_tx_queues ?
  292. default_qdisc_ops : &pfifo_fast_ops;
  293. }
  294. struct Qdisc_class_common {
  295. u32 classid;
  296. struct hlist_node hnode;
  297. };
  298. struct Qdisc_class_hash {
  299. struct hlist_head *hash;
  300. unsigned int hashsize;
  301. unsigned int hashmask;
  302. unsigned int hashelems;
  303. };
  304. static inline unsigned int qdisc_class_hash(u32 id, u32 mask)
  305. {
  306. id ^= id >> 8;
  307. id ^= id >> 4;
  308. return id & mask;
  309. }
  310. static inline struct Qdisc_class_common *
  311. qdisc_class_find(const struct Qdisc_class_hash *hash, u32 id)
  312. {
  313. struct Qdisc_class_common *cl;
  314. unsigned int h;
  315. h = qdisc_class_hash(id, hash->hashmask);
  316. hlist_for_each_entry(cl, &hash->hash[h], hnode) {
  317. if (cl->classid == id)
  318. return cl;
  319. }
  320. return NULL;
  321. }
  322. int qdisc_class_hash_init(struct Qdisc_class_hash *);
  323. void qdisc_class_hash_insert(struct Qdisc_class_hash *,
  324. struct Qdisc_class_common *);
  325. void qdisc_class_hash_remove(struct Qdisc_class_hash *,
  326. struct Qdisc_class_common *);
  327. void qdisc_class_hash_grow(struct Qdisc *, struct Qdisc_class_hash *);
  328. void qdisc_class_hash_destroy(struct Qdisc_class_hash *);
  329. void dev_init_scheduler(struct net_device *dev);
  330. void dev_shutdown(struct net_device *dev);
  331. void dev_activate(struct net_device *dev);
  332. void dev_deactivate(struct net_device *dev);
  333. void dev_deactivate_many(struct list_head *head);
  334. struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue,
  335. struct Qdisc *qdisc);
  336. void qdisc_reset(struct Qdisc *qdisc);
  337. void qdisc_destroy(struct Qdisc *qdisc);
  338. void qdisc_tree_reduce_backlog(struct Qdisc *qdisc, unsigned int n,
  339. unsigned int len);
  340. struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
  341. const struct Qdisc_ops *ops);
  342. struct Qdisc *qdisc_create_dflt(struct netdev_queue *dev_queue,
  343. const struct Qdisc_ops *ops, u32 parentid);
  344. void __qdisc_calculate_pkt_len(struct sk_buff *skb,
  345. const struct qdisc_size_table *stab);
  346. bool tcf_destroy(struct tcf_proto *tp, bool force);
  347. void tcf_destroy_chain(struct tcf_proto __rcu **fl);
  348. int skb_do_redirect(struct sk_buff *);
  349. static inline bool skb_at_tc_ingress(const struct sk_buff *skb)
  350. {
  351. #ifdef CONFIG_NET_CLS_ACT
  352. return G_TC_AT(skb->tc_verd) & AT_INGRESS;
  353. #else
  354. return false;
  355. #endif
  356. }
  357. /* Reset all TX qdiscs greater then index of a device. */
  358. static inline void qdisc_reset_all_tx_gt(struct net_device *dev, unsigned int i)
  359. {
  360. struct Qdisc *qdisc;
  361. for (; i < dev->num_tx_queues; i++) {
  362. qdisc = rtnl_dereference(netdev_get_tx_queue(dev, i)->qdisc);
  363. if (qdisc) {
  364. spin_lock_bh(qdisc_lock(qdisc));
  365. qdisc_reset(qdisc);
  366. spin_unlock_bh(qdisc_lock(qdisc));
  367. }
  368. }
  369. }
  370. static inline void qdisc_reset_all_tx(struct net_device *dev)
  371. {
  372. qdisc_reset_all_tx_gt(dev, 0);
  373. }
  374. /* Are all TX queues of the device empty? */
  375. static inline bool qdisc_all_tx_empty(const struct net_device *dev)
  376. {
  377. unsigned int i;
  378. rcu_read_lock();
  379. for (i = 0; i < dev->num_tx_queues; i++) {
  380. struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
  381. const struct Qdisc *q = rcu_dereference(txq->qdisc);
  382. if (q->q.qlen) {
  383. rcu_read_unlock();
  384. return false;
  385. }
  386. }
  387. rcu_read_unlock();
  388. return true;
  389. }
  390. /* Are any of the TX qdiscs changing? */
  391. static inline bool qdisc_tx_changing(const struct net_device *dev)
  392. {
  393. unsigned int i;
  394. for (i = 0; i < dev->num_tx_queues; i++) {
  395. struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
  396. if (rcu_access_pointer(txq->qdisc) != txq->qdisc_sleeping)
  397. return true;
  398. }
  399. return false;
  400. }
  401. /* Is the device using the noop qdisc on all queues? */
  402. static inline bool qdisc_tx_is_noop(const struct net_device *dev)
  403. {
  404. unsigned int i;
  405. for (i = 0; i < dev->num_tx_queues; i++) {
  406. struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
  407. if (rcu_access_pointer(txq->qdisc) != &noop_qdisc)
  408. return false;
  409. }
  410. return true;
  411. }
  412. static inline unsigned int qdisc_pkt_len(const struct sk_buff *skb)
  413. {
  414. return qdisc_skb_cb(skb)->pkt_len;
  415. }
  416. /* additional qdisc xmit flags (NET_XMIT_MASK in linux/netdevice.h) */
  417. enum net_xmit_qdisc_t {
  418. __NET_XMIT_STOLEN = 0x00010000,
  419. __NET_XMIT_BYPASS = 0x00020000,
  420. };
  421. #ifdef CONFIG_NET_CLS_ACT
  422. #define net_xmit_drop_count(e) ((e) & __NET_XMIT_STOLEN ? 0 : 1)
  423. #else
  424. #define net_xmit_drop_count(e) (1)
  425. #endif
  426. static inline void qdisc_calculate_pkt_len(struct sk_buff *skb,
  427. const struct Qdisc *sch)
  428. {
  429. #ifdef CONFIG_NET_SCHED
  430. struct qdisc_size_table *stab = rcu_dereference_bh(sch->stab);
  431. if (stab)
  432. __qdisc_calculate_pkt_len(skb, stab);
  433. #endif
  434. }
  435. static inline int qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch)
  436. {
  437. qdisc_calculate_pkt_len(skb, sch);
  438. return sch->enqueue(skb, sch);
  439. }
  440. static inline bool qdisc_is_percpu_stats(const struct Qdisc *q)
  441. {
  442. return q->flags & TCQ_F_CPUSTATS;
  443. }
  444. static inline void _bstats_update(struct gnet_stats_basic_packed *bstats,
  445. __u64 bytes, __u32 packets)
  446. {
  447. bstats->bytes += bytes;
  448. bstats->packets += packets;
  449. }
  450. static inline void bstats_update(struct gnet_stats_basic_packed *bstats,
  451. const struct sk_buff *skb)
  452. {
  453. _bstats_update(bstats,
  454. qdisc_pkt_len(skb),
  455. skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1);
  456. }
  457. static inline void _bstats_cpu_update(struct gnet_stats_basic_cpu *bstats,
  458. __u64 bytes, __u32 packets)
  459. {
  460. u64_stats_update_begin(&bstats->syncp);
  461. _bstats_update(&bstats->bstats, bytes, packets);
  462. u64_stats_update_end(&bstats->syncp);
  463. }
  464. static inline void bstats_cpu_update(struct gnet_stats_basic_cpu *bstats,
  465. const struct sk_buff *skb)
  466. {
  467. u64_stats_update_begin(&bstats->syncp);
  468. bstats_update(&bstats->bstats, skb);
  469. u64_stats_update_end(&bstats->syncp);
  470. }
  471. static inline void qdisc_bstats_cpu_update(struct Qdisc *sch,
  472. const struct sk_buff *skb)
  473. {
  474. bstats_cpu_update(this_cpu_ptr(sch->cpu_bstats), skb);
  475. }
  476. static inline void qdisc_bstats_update(struct Qdisc *sch,
  477. const struct sk_buff *skb)
  478. {
  479. bstats_update(&sch->bstats, skb);
  480. }
  481. static inline void qdisc_qstats_backlog_dec(struct Qdisc *sch,
  482. const struct sk_buff *skb)
  483. {
  484. sch->qstats.backlog -= qdisc_pkt_len(skb);
  485. }
  486. static inline void qdisc_qstats_backlog_inc(struct Qdisc *sch,
  487. const struct sk_buff *skb)
  488. {
  489. sch->qstats.backlog += qdisc_pkt_len(skb);
  490. }
  491. static inline void __qdisc_qstats_drop(struct Qdisc *sch, int count)
  492. {
  493. sch->qstats.drops += count;
  494. }
  495. static inline void qstats_drop_inc(struct gnet_stats_queue *qstats)
  496. {
  497. qstats->drops++;
  498. }
  499. static inline void qstats_overlimit_inc(struct gnet_stats_queue *qstats)
  500. {
  501. qstats->overlimits++;
  502. }
  503. static inline void qdisc_qstats_drop(struct Qdisc *sch)
  504. {
  505. qstats_drop_inc(&sch->qstats);
  506. }
  507. static inline void qdisc_qstats_cpu_drop(struct Qdisc *sch)
  508. {
  509. qstats_drop_inc(this_cpu_ptr(sch->cpu_qstats));
  510. }
  511. static inline void qdisc_qstats_overlimit(struct Qdisc *sch)
  512. {
  513. sch->qstats.overlimits++;
  514. }
  515. static inline int __qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch,
  516. struct sk_buff_head *list)
  517. {
  518. __skb_queue_tail(list, skb);
  519. qdisc_qstats_backlog_inc(sch, skb);
  520. return NET_XMIT_SUCCESS;
  521. }
  522. static inline int qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch)
  523. {
  524. return __qdisc_enqueue_tail(skb, sch, &sch->q);
  525. }
  526. static inline struct sk_buff *__qdisc_dequeue_head(struct Qdisc *sch,
  527. struct sk_buff_head *list)
  528. {
  529. struct sk_buff *skb = __skb_dequeue(list);
  530. if (likely(skb != NULL)) {
  531. qdisc_qstats_backlog_dec(sch, skb);
  532. qdisc_bstats_update(sch, skb);
  533. }
  534. return skb;
  535. }
  536. static inline struct sk_buff *qdisc_dequeue_head(struct Qdisc *sch)
  537. {
  538. return __qdisc_dequeue_head(sch, &sch->q);
  539. }
  540. static inline unsigned int __qdisc_queue_drop_head(struct Qdisc *sch,
  541. struct sk_buff_head *list)
  542. {
  543. struct sk_buff *skb = __skb_dequeue(list);
  544. if (likely(skb != NULL)) {
  545. unsigned int len = qdisc_pkt_len(skb);
  546. qdisc_qstats_backlog_dec(sch, skb);
  547. kfree_skb(skb);
  548. return len;
  549. }
  550. return 0;
  551. }
  552. static inline unsigned int qdisc_queue_drop_head(struct Qdisc *sch)
  553. {
  554. return __qdisc_queue_drop_head(sch, &sch->q);
  555. }
  556. static inline struct sk_buff *__qdisc_dequeue_tail(struct Qdisc *sch,
  557. struct sk_buff_head *list)
  558. {
  559. struct sk_buff *skb = __skb_dequeue_tail(list);
  560. if (likely(skb != NULL))
  561. qdisc_qstats_backlog_dec(sch, skb);
  562. return skb;
  563. }
  564. static inline struct sk_buff *qdisc_dequeue_tail(struct Qdisc *sch)
  565. {
  566. return __qdisc_dequeue_tail(sch, &sch->q);
  567. }
  568. static inline struct sk_buff *qdisc_peek_head(struct Qdisc *sch)
  569. {
  570. return skb_peek(&sch->q);
  571. }
  572. /* generic pseudo peek method for non-work-conserving qdisc */
  573. static inline struct sk_buff *qdisc_peek_dequeued(struct Qdisc *sch)
  574. {
  575. /* we can reuse ->gso_skb because peek isn't called for root qdiscs */
  576. if (!sch->gso_skb) {
  577. sch->gso_skb = sch->dequeue(sch);
  578. if (sch->gso_skb)
  579. /* it's still part of the queue */
  580. sch->q.qlen++;
  581. }
  582. return sch->gso_skb;
  583. }
  584. /* use instead of qdisc->dequeue() for all qdiscs queried with ->peek() */
  585. static inline struct sk_buff *qdisc_dequeue_peeked(struct Qdisc *sch)
  586. {
  587. struct sk_buff *skb = sch->gso_skb;
  588. if (skb) {
  589. sch->gso_skb = NULL;
  590. sch->q.qlen--;
  591. } else {
  592. skb = sch->dequeue(sch);
  593. }
  594. return skb;
  595. }
  596. static inline void __qdisc_reset_queue(struct Qdisc *sch,
  597. struct sk_buff_head *list)
  598. {
  599. /*
  600. * We do not know the backlog in bytes of this list, it
  601. * is up to the caller to correct it
  602. */
  603. __skb_queue_purge(list);
  604. }
  605. static inline void qdisc_reset_queue(struct Qdisc *sch)
  606. {
  607. __qdisc_reset_queue(sch, &sch->q);
  608. sch->qstats.backlog = 0;
  609. }
  610. static inline struct Qdisc *qdisc_replace(struct Qdisc *sch, struct Qdisc *new,
  611. struct Qdisc **pold)
  612. {
  613. struct Qdisc *old;
  614. sch_tree_lock(sch);
  615. old = *pold;
  616. *pold = new;
  617. if (old != NULL) {
  618. qdisc_tree_reduce_backlog(old, old->q.qlen, old->qstats.backlog);
  619. qdisc_reset(old);
  620. }
  621. sch_tree_unlock(sch);
  622. return old;
  623. }
  624. static inline unsigned int __qdisc_queue_drop(struct Qdisc *sch,
  625. struct sk_buff_head *list)
  626. {
  627. struct sk_buff *skb = __qdisc_dequeue_tail(sch, list);
  628. if (likely(skb != NULL)) {
  629. unsigned int len = qdisc_pkt_len(skb);
  630. kfree_skb(skb);
  631. return len;
  632. }
  633. return 0;
  634. }
  635. static inline unsigned int qdisc_queue_drop(struct Qdisc *sch)
  636. {
  637. return __qdisc_queue_drop(sch, &sch->q);
  638. }
  639. static inline int qdisc_drop(struct sk_buff *skb, struct Qdisc *sch)
  640. {
  641. kfree_skb(skb);
  642. qdisc_qstats_drop(sch);
  643. return NET_XMIT_DROP;
  644. }
  645. static inline int qdisc_reshape_fail(struct sk_buff *skb, struct Qdisc *sch)
  646. {
  647. qdisc_qstats_drop(sch);
  648. #ifdef CONFIG_NET_CLS_ACT
  649. if (sch->reshape_fail == NULL || sch->reshape_fail(skb, sch))
  650. goto drop;
  651. return NET_XMIT_SUCCESS;
  652. drop:
  653. #endif
  654. kfree_skb(skb);
  655. return NET_XMIT_DROP;
  656. }
  657. /* Length to Time (L2T) lookup in a qdisc_rate_table, to determine how
  658. long it will take to send a packet given its size.
  659. */
  660. static inline u32 qdisc_l2t(struct qdisc_rate_table* rtab, unsigned int pktlen)
  661. {
  662. int slot = pktlen + rtab->rate.cell_align + rtab->rate.overhead;
  663. if (slot < 0)
  664. slot = 0;
  665. slot >>= rtab->rate.cell_log;
  666. if (slot > 255)
  667. return rtab->data[255]*(slot >> 8) + rtab->data[slot & 0xFF];
  668. return rtab->data[slot];
  669. }
  670. struct psched_ratecfg {
  671. u64 rate_bytes_ps; /* bytes per second */
  672. u32 mult;
  673. u16 overhead;
  674. u8 linklayer;
  675. u8 shift;
  676. };
  677. static inline u64 psched_l2t_ns(const struct psched_ratecfg *r,
  678. unsigned int len)
  679. {
  680. len += r->overhead;
  681. if (unlikely(r->linklayer == TC_LINKLAYER_ATM))
  682. return ((u64)(DIV_ROUND_UP(len,48)*53) * r->mult) >> r->shift;
  683. return ((u64)len * r->mult) >> r->shift;
  684. }
  685. void psched_ratecfg_precompute(struct psched_ratecfg *r,
  686. const struct tc_ratespec *conf,
  687. u64 rate64);
  688. static inline void psched_ratecfg_getrate(struct tc_ratespec *res,
  689. const struct psched_ratecfg *r)
  690. {
  691. memset(res, 0, sizeof(*res));
  692. /* legacy struct tc_ratespec has a 32bit @rate field
  693. * Qdisc using 64bit rate should add new attributes
  694. * in order to maintain compatibility.
  695. */
  696. res->rate = min_t(u64, r->rate_bytes_ps, ~0U);
  697. res->overhead = r->overhead;
  698. res->linklayer = (r->linklayer & TC_LINKLAYER_MASK);
  699. }
  700. #endif