sch_generic.h 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806
  1. #ifndef __NET_SCHED_GENERIC_H
  2. #define __NET_SCHED_GENERIC_H
  3. #include <linux/netdevice.h>
  4. #include <linux/types.h>
  5. #include <linux/rcupdate.h>
  6. #include <linux/pkt_sched.h>
  7. #include <linux/pkt_cls.h>
  8. #include <linux/percpu.h>
  9. #include <linux/dynamic_queue_limits.h>
  10. #include <net/gen_stats.h>
  11. #include <net/rtnetlink.h>
  12. struct Qdisc_ops;
  13. struct qdisc_walker;
  14. struct tcf_walker;
  15. struct module;
  16. struct qdisc_rate_table {
  17. struct tc_ratespec rate;
  18. u32 data[256];
  19. struct qdisc_rate_table *next;
  20. int refcnt;
  21. };
  22. enum qdisc_state_t {
  23. __QDISC_STATE_SCHED,
  24. __QDISC_STATE_DEACTIVATED,
  25. __QDISC_STATE_THROTTLED,
  26. };
  27. /*
  28. * following bits are only changed while qdisc lock is held
  29. */
  30. enum qdisc___state_t {
  31. __QDISC___STATE_RUNNING = 1,
  32. };
  33. struct qdisc_size_table {
  34. struct rcu_head rcu;
  35. struct list_head list;
  36. struct tc_sizespec szopts;
  37. int refcnt;
  38. u16 data[];
  39. };
  40. struct Qdisc {
  41. int (*enqueue)(struct sk_buff *skb, struct Qdisc *dev);
  42. struct sk_buff * (*dequeue)(struct Qdisc *dev);
  43. unsigned int flags;
  44. #define TCQ_F_BUILTIN 1
  45. #define TCQ_F_INGRESS 2
  46. #define TCQ_F_CAN_BYPASS 4
  47. #define TCQ_F_MQROOT 8
  48. #define TCQ_F_ONETXQUEUE 0x10 /* dequeue_skb() can assume all skbs are for
  49. * q->dev_queue : It can test
  50. * netif_xmit_frozen_or_stopped() before
  51. * dequeueing next packet.
  52. * Its true for MQ/MQPRIO slaves, or non
  53. * multiqueue device.
  54. */
  55. #define TCQ_F_WARN_NONWC (1 << 16)
  56. #define TCQ_F_CPUSTATS 0x20 /* run using percpu statistics */
  57. u32 limit;
  58. const struct Qdisc_ops *ops;
  59. struct qdisc_size_table __rcu *stab;
  60. struct list_head list;
  61. u32 handle;
  62. u32 parent;
  63. int (*reshape_fail)(struct sk_buff *skb,
  64. struct Qdisc *q);
  65. void *u32_node;
  66. /* This field is deprecated, but it is still used by CBQ
  67. * and it will live until better solution will be invented.
  68. */
  69. struct Qdisc *__parent;
  70. struct netdev_queue *dev_queue;
  71. struct gnet_stats_rate_est64 rate_est;
  72. struct Qdisc *next_sched;
  73. struct sk_buff *gso_skb;
  74. /*
  75. * For performance sake on SMP, we put highly modified fields at the end
  76. */
  77. unsigned long state;
  78. struct sk_buff_head q;
  79. union {
  80. struct gnet_stats_basic_packed bstats;
  81. struct gnet_stats_basic_cpu __percpu *cpu_bstats;
  82. } __packed;
  83. unsigned int __state;
  84. union {
  85. struct gnet_stats_queue qstats;
  86. struct gnet_stats_queue __percpu *cpu_qstats;
  87. } __packed;
  88. struct rcu_head rcu_head;
  89. int padded;
  90. atomic_t refcnt;
  91. spinlock_t busylock ____cacheline_aligned_in_smp;
  92. };
  93. static inline bool qdisc_is_running(const struct Qdisc *qdisc)
  94. {
  95. return (qdisc->__state & __QDISC___STATE_RUNNING) ? true : false;
  96. }
  97. static inline bool qdisc_run_begin(struct Qdisc *qdisc)
  98. {
  99. if (qdisc_is_running(qdisc))
  100. return false;
  101. qdisc->__state |= __QDISC___STATE_RUNNING;
  102. return true;
  103. }
  104. static inline void qdisc_run_end(struct Qdisc *qdisc)
  105. {
  106. qdisc->__state &= ~__QDISC___STATE_RUNNING;
  107. }
  108. static inline bool qdisc_may_bulk(const struct Qdisc *qdisc)
  109. {
  110. return qdisc->flags & TCQ_F_ONETXQUEUE;
  111. }
  112. static inline int qdisc_avail_bulklimit(const struct netdev_queue *txq)
  113. {
  114. #ifdef CONFIG_BQL
  115. /* Non-BQL migrated drivers will return 0, too. */
  116. return dql_avail(&txq->dql);
  117. #else
  118. return 0;
  119. #endif
  120. }
  121. static inline bool qdisc_is_throttled(const struct Qdisc *qdisc)
  122. {
  123. return test_bit(__QDISC_STATE_THROTTLED, &qdisc->state) ? true : false;
  124. }
  125. static inline void qdisc_throttled(struct Qdisc *qdisc)
  126. {
  127. set_bit(__QDISC_STATE_THROTTLED, &qdisc->state);
  128. }
  129. static inline void qdisc_unthrottled(struct Qdisc *qdisc)
  130. {
  131. clear_bit(__QDISC_STATE_THROTTLED, &qdisc->state);
  132. }
  133. struct Qdisc_class_ops {
  134. /* Child qdisc manipulation */
  135. struct netdev_queue * (*select_queue)(struct Qdisc *, struct tcmsg *);
  136. int (*graft)(struct Qdisc *, unsigned long cl,
  137. struct Qdisc *, struct Qdisc **);
  138. struct Qdisc * (*leaf)(struct Qdisc *, unsigned long cl);
  139. void (*qlen_notify)(struct Qdisc *, unsigned long);
  140. /* Class manipulation routines */
  141. unsigned long (*get)(struct Qdisc *, u32 classid);
  142. void (*put)(struct Qdisc *, unsigned long);
  143. int (*change)(struct Qdisc *, u32, u32,
  144. struct nlattr **, unsigned long *);
  145. int (*delete)(struct Qdisc *, unsigned long);
  146. void (*walk)(struct Qdisc *, struct qdisc_walker * arg);
  147. /* Filter manipulation */
  148. struct tcf_proto __rcu ** (*tcf_chain)(struct Qdisc *, unsigned long);
  149. unsigned long (*bind_tcf)(struct Qdisc *, unsigned long,
  150. u32 classid);
  151. void (*unbind_tcf)(struct Qdisc *, unsigned long);
  152. /* rtnetlink specific */
  153. int (*dump)(struct Qdisc *, unsigned long,
  154. struct sk_buff *skb, struct tcmsg*);
  155. int (*dump_stats)(struct Qdisc *, unsigned long,
  156. struct gnet_dump *);
  157. };
  158. struct Qdisc_ops {
  159. struct Qdisc_ops *next;
  160. const struct Qdisc_class_ops *cl_ops;
  161. char id[IFNAMSIZ];
  162. int priv_size;
  163. int (*enqueue)(struct sk_buff *, struct Qdisc *);
  164. struct sk_buff * (*dequeue)(struct Qdisc *);
  165. struct sk_buff * (*peek)(struct Qdisc *);
  166. unsigned int (*drop)(struct Qdisc *);
  167. int (*init)(struct Qdisc *, struct nlattr *arg);
  168. void (*reset)(struct Qdisc *);
  169. void (*destroy)(struct Qdisc *);
  170. int (*change)(struct Qdisc *, struct nlattr *arg);
  171. void (*attach)(struct Qdisc *);
  172. int (*dump)(struct Qdisc *, struct sk_buff *);
  173. int (*dump_stats)(struct Qdisc *, struct gnet_dump *);
  174. struct module *owner;
  175. };
  176. struct tcf_result {
  177. unsigned long class;
  178. u32 classid;
  179. };
  180. struct tcf_proto_ops {
  181. struct list_head head;
  182. char kind[IFNAMSIZ];
  183. int (*classify)(struct sk_buff *,
  184. const struct tcf_proto *,
  185. struct tcf_result *);
  186. int (*init)(struct tcf_proto*);
  187. void (*destroy)(struct tcf_proto*);
  188. unsigned long (*get)(struct tcf_proto*, u32 handle);
  189. int (*change)(struct net *net, struct sk_buff *,
  190. struct tcf_proto*, unsigned long,
  191. u32 handle, struct nlattr **,
  192. unsigned long *, bool);
  193. int (*delete)(struct tcf_proto*, unsigned long);
  194. void (*walk)(struct tcf_proto*, struct tcf_walker *arg);
  195. /* rtnetlink specific */
  196. int (*dump)(struct net*, struct tcf_proto*, unsigned long,
  197. struct sk_buff *skb, struct tcmsg*);
  198. struct module *owner;
  199. };
  200. struct tcf_proto {
  201. /* Fast access part */
  202. struct tcf_proto __rcu *next;
  203. void __rcu *root;
  204. int (*classify)(struct sk_buff *,
  205. const struct tcf_proto *,
  206. struct tcf_result *);
  207. __be16 protocol;
  208. /* All the rest */
  209. u32 prio;
  210. u32 classid;
  211. struct Qdisc *q;
  212. void *data;
  213. const struct tcf_proto_ops *ops;
  214. struct rcu_head rcu;
  215. };
  216. struct qdisc_skb_cb {
  217. unsigned int pkt_len;
  218. u16 slave_dev_queue_mapping;
  219. u16 _pad;
  220. #define QDISC_CB_PRIV_LEN 20
  221. unsigned char data[QDISC_CB_PRIV_LEN];
  222. };
  223. static inline void qdisc_cb_private_validate(const struct sk_buff *skb, int sz)
  224. {
  225. struct qdisc_skb_cb *qcb;
  226. BUILD_BUG_ON(sizeof(skb->cb) < offsetof(struct qdisc_skb_cb, data) + sz);
  227. BUILD_BUG_ON(sizeof(qcb->data) < sz);
  228. }
  229. static inline int qdisc_qlen(const struct Qdisc *q)
  230. {
  231. return q->q.qlen;
  232. }
  233. static inline struct qdisc_skb_cb *qdisc_skb_cb(const struct sk_buff *skb)
  234. {
  235. return (struct qdisc_skb_cb *)skb->cb;
  236. }
  237. static inline spinlock_t *qdisc_lock(struct Qdisc *qdisc)
  238. {
  239. return &qdisc->q.lock;
  240. }
  241. static inline struct Qdisc *qdisc_root(const struct Qdisc *qdisc)
  242. {
  243. struct Qdisc *q = rcu_dereference_rtnl(qdisc->dev_queue->qdisc);
  244. return q;
  245. }
  246. static inline struct Qdisc *qdisc_root_sleeping(const struct Qdisc *qdisc)
  247. {
  248. return qdisc->dev_queue->qdisc_sleeping;
  249. }
  250. /* The qdisc root lock is a mechanism by which to top level
  251. * of a qdisc tree can be locked from any qdisc node in the
  252. * forest. This allows changing the configuration of some
  253. * aspect of the qdisc tree while blocking out asynchronous
  254. * qdisc access in the packet processing paths.
  255. *
  256. * It is only legal to do this when the root will not change
  257. * on us. Otherwise we'll potentially lock the wrong qdisc
  258. * root. This is enforced by holding the RTNL semaphore, which
  259. * all users of this lock accessor must do.
  260. */
  261. static inline spinlock_t *qdisc_root_lock(const struct Qdisc *qdisc)
  262. {
  263. struct Qdisc *root = qdisc_root(qdisc);
  264. ASSERT_RTNL();
  265. return qdisc_lock(root);
  266. }
  267. static inline spinlock_t *qdisc_root_sleeping_lock(const struct Qdisc *qdisc)
  268. {
  269. struct Qdisc *root = qdisc_root_sleeping(qdisc);
  270. ASSERT_RTNL();
  271. return qdisc_lock(root);
  272. }
  273. static inline struct net_device *qdisc_dev(const struct Qdisc *qdisc)
  274. {
  275. return qdisc->dev_queue->dev;
  276. }
  277. static inline void sch_tree_lock(const struct Qdisc *q)
  278. {
  279. spin_lock_bh(qdisc_root_sleeping_lock(q));
  280. }
  281. static inline void sch_tree_unlock(const struct Qdisc *q)
  282. {
  283. spin_unlock_bh(qdisc_root_sleeping_lock(q));
  284. }
  285. #define tcf_tree_lock(tp) sch_tree_lock((tp)->q)
  286. #define tcf_tree_unlock(tp) sch_tree_unlock((tp)->q)
  287. extern struct Qdisc noop_qdisc;
  288. extern struct Qdisc_ops noop_qdisc_ops;
  289. extern struct Qdisc_ops pfifo_fast_ops;
  290. extern struct Qdisc_ops mq_qdisc_ops;
  291. extern const struct Qdisc_ops *default_qdisc_ops;
  292. struct Qdisc_class_common {
  293. u32 classid;
  294. struct hlist_node hnode;
  295. };
  296. struct Qdisc_class_hash {
  297. struct hlist_head *hash;
  298. unsigned int hashsize;
  299. unsigned int hashmask;
  300. unsigned int hashelems;
  301. };
  302. static inline unsigned int qdisc_class_hash(u32 id, u32 mask)
  303. {
  304. id ^= id >> 8;
  305. id ^= id >> 4;
  306. return id & mask;
  307. }
  308. static inline struct Qdisc_class_common *
  309. qdisc_class_find(const struct Qdisc_class_hash *hash, u32 id)
  310. {
  311. struct Qdisc_class_common *cl;
  312. unsigned int h;
  313. h = qdisc_class_hash(id, hash->hashmask);
  314. hlist_for_each_entry(cl, &hash->hash[h], hnode) {
  315. if (cl->classid == id)
  316. return cl;
  317. }
  318. return NULL;
  319. }
  320. int qdisc_class_hash_init(struct Qdisc_class_hash *);
  321. void qdisc_class_hash_insert(struct Qdisc_class_hash *,
  322. struct Qdisc_class_common *);
  323. void qdisc_class_hash_remove(struct Qdisc_class_hash *,
  324. struct Qdisc_class_common *);
  325. void qdisc_class_hash_grow(struct Qdisc *, struct Qdisc_class_hash *);
  326. void qdisc_class_hash_destroy(struct Qdisc_class_hash *);
  327. void dev_init_scheduler(struct net_device *dev);
  328. void dev_shutdown(struct net_device *dev);
  329. void dev_activate(struct net_device *dev);
  330. void dev_deactivate(struct net_device *dev);
  331. void dev_deactivate_many(struct list_head *head);
  332. struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue,
  333. struct Qdisc *qdisc);
  334. void qdisc_reset(struct Qdisc *qdisc);
  335. void qdisc_destroy(struct Qdisc *qdisc);
  336. void qdisc_tree_decrease_qlen(struct Qdisc *qdisc, unsigned int n);
  337. struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
  338. const struct Qdisc_ops *ops);
  339. struct Qdisc *qdisc_create_dflt(struct netdev_queue *dev_queue,
  340. const struct Qdisc_ops *ops, u32 parentid);
  341. void __qdisc_calculate_pkt_len(struct sk_buff *skb,
  342. const struct qdisc_size_table *stab);
  343. void tcf_destroy(struct tcf_proto *tp);
  344. void tcf_destroy_chain(struct tcf_proto __rcu **fl);
  345. /* Reset all TX qdiscs greater then index of a device. */
  346. static inline void qdisc_reset_all_tx_gt(struct net_device *dev, unsigned int i)
  347. {
  348. struct Qdisc *qdisc;
  349. for (; i < dev->num_tx_queues; i++) {
  350. qdisc = rtnl_dereference(netdev_get_tx_queue(dev, i)->qdisc);
  351. if (qdisc) {
  352. spin_lock_bh(qdisc_lock(qdisc));
  353. qdisc_reset(qdisc);
  354. spin_unlock_bh(qdisc_lock(qdisc));
  355. }
  356. }
  357. }
  358. static inline void qdisc_reset_all_tx(struct net_device *dev)
  359. {
  360. qdisc_reset_all_tx_gt(dev, 0);
  361. }
  362. /* Are all TX queues of the device empty? */
  363. static inline bool qdisc_all_tx_empty(const struct net_device *dev)
  364. {
  365. unsigned int i;
  366. rcu_read_lock();
  367. for (i = 0; i < dev->num_tx_queues; i++) {
  368. struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
  369. const struct Qdisc *q = rcu_dereference(txq->qdisc);
  370. if (q->q.qlen) {
  371. rcu_read_unlock();
  372. return false;
  373. }
  374. }
  375. rcu_read_unlock();
  376. return true;
  377. }
  378. /* Are any of the TX qdiscs changing? */
  379. static inline bool qdisc_tx_changing(const struct net_device *dev)
  380. {
  381. unsigned int i;
  382. for (i = 0; i < dev->num_tx_queues; i++) {
  383. struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
  384. if (rcu_access_pointer(txq->qdisc) != txq->qdisc_sleeping)
  385. return true;
  386. }
  387. return false;
  388. }
  389. /* Is the device using the noop qdisc on all queues? */
  390. static inline bool qdisc_tx_is_noop(const struct net_device *dev)
  391. {
  392. unsigned int i;
  393. for (i = 0; i < dev->num_tx_queues; i++) {
  394. struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
  395. if (rcu_access_pointer(txq->qdisc) != &noop_qdisc)
  396. return false;
  397. }
  398. return true;
  399. }
  400. static inline unsigned int qdisc_pkt_len(const struct sk_buff *skb)
  401. {
  402. return qdisc_skb_cb(skb)->pkt_len;
  403. }
  404. /* additional qdisc xmit flags (NET_XMIT_MASK in linux/netdevice.h) */
  405. enum net_xmit_qdisc_t {
  406. __NET_XMIT_STOLEN = 0x00010000,
  407. __NET_XMIT_BYPASS = 0x00020000,
  408. };
  409. #ifdef CONFIG_NET_CLS_ACT
  410. #define net_xmit_drop_count(e) ((e) & __NET_XMIT_STOLEN ? 0 : 1)
  411. #else
  412. #define net_xmit_drop_count(e) (1)
  413. #endif
  414. static inline void qdisc_calculate_pkt_len(struct sk_buff *skb,
  415. const struct Qdisc *sch)
  416. {
  417. #ifdef CONFIG_NET_SCHED
  418. struct qdisc_size_table *stab = rcu_dereference_bh(sch->stab);
  419. if (stab)
  420. __qdisc_calculate_pkt_len(skb, stab);
  421. #endif
  422. }
  423. static inline int qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch)
  424. {
  425. qdisc_calculate_pkt_len(skb, sch);
  426. return sch->enqueue(skb, sch);
  427. }
  428. static inline int qdisc_enqueue_root(struct sk_buff *skb, struct Qdisc *sch)
  429. {
  430. qdisc_skb_cb(skb)->pkt_len = skb->len;
  431. return qdisc_enqueue(skb, sch) & NET_XMIT_MASK;
  432. }
  433. static inline bool qdisc_is_percpu_stats(const struct Qdisc *q)
  434. {
  435. return q->flags & TCQ_F_CPUSTATS;
  436. }
  437. static inline void bstats_update(struct gnet_stats_basic_packed *bstats,
  438. const struct sk_buff *skb)
  439. {
  440. bstats->bytes += qdisc_pkt_len(skb);
  441. bstats->packets += skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1;
  442. }
  443. static inline void qdisc_bstats_update_cpu(struct Qdisc *sch,
  444. const struct sk_buff *skb)
  445. {
  446. struct gnet_stats_basic_cpu *bstats =
  447. this_cpu_ptr(sch->cpu_bstats);
  448. u64_stats_update_begin(&bstats->syncp);
  449. bstats_update(&bstats->bstats, skb);
  450. u64_stats_update_end(&bstats->syncp);
  451. }
  452. static inline void qdisc_bstats_update(struct Qdisc *sch,
  453. const struct sk_buff *skb)
  454. {
  455. bstats_update(&sch->bstats, skb);
  456. }
  457. static inline void qdisc_qstats_backlog_dec(struct Qdisc *sch,
  458. const struct sk_buff *skb)
  459. {
  460. sch->qstats.backlog -= qdisc_pkt_len(skb);
  461. }
  462. static inline void qdisc_qstats_backlog_inc(struct Qdisc *sch,
  463. const struct sk_buff *skb)
  464. {
  465. sch->qstats.backlog += qdisc_pkt_len(skb);
  466. }
  467. static inline void __qdisc_qstats_drop(struct Qdisc *sch, int count)
  468. {
  469. sch->qstats.drops += count;
  470. }
  471. static inline void qdisc_qstats_drop(struct Qdisc *sch)
  472. {
  473. sch->qstats.drops++;
  474. }
  475. static inline void qdisc_qstats_drop_cpu(struct Qdisc *sch)
  476. {
  477. struct gnet_stats_queue *qstats = this_cpu_ptr(sch->cpu_qstats);
  478. qstats->drops++;
  479. }
  480. static inline void qdisc_qstats_overlimit(struct Qdisc *sch)
  481. {
  482. sch->qstats.overlimits++;
  483. }
  484. static inline int __qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch,
  485. struct sk_buff_head *list)
  486. {
  487. __skb_queue_tail(list, skb);
  488. qdisc_qstats_backlog_inc(sch, skb);
  489. return NET_XMIT_SUCCESS;
  490. }
  491. static inline int qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch)
  492. {
  493. return __qdisc_enqueue_tail(skb, sch, &sch->q);
  494. }
  495. static inline struct sk_buff *__qdisc_dequeue_head(struct Qdisc *sch,
  496. struct sk_buff_head *list)
  497. {
  498. struct sk_buff *skb = __skb_dequeue(list);
  499. if (likely(skb != NULL)) {
  500. qdisc_qstats_backlog_dec(sch, skb);
  501. qdisc_bstats_update(sch, skb);
  502. }
  503. return skb;
  504. }
  505. static inline struct sk_buff *qdisc_dequeue_head(struct Qdisc *sch)
  506. {
  507. return __qdisc_dequeue_head(sch, &sch->q);
  508. }
  509. static inline unsigned int __qdisc_queue_drop_head(struct Qdisc *sch,
  510. struct sk_buff_head *list)
  511. {
  512. struct sk_buff *skb = __skb_dequeue(list);
  513. if (likely(skb != NULL)) {
  514. unsigned int len = qdisc_pkt_len(skb);
  515. qdisc_qstats_backlog_dec(sch, skb);
  516. kfree_skb(skb);
  517. return len;
  518. }
  519. return 0;
  520. }
  521. static inline unsigned int qdisc_queue_drop_head(struct Qdisc *sch)
  522. {
  523. return __qdisc_queue_drop_head(sch, &sch->q);
  524. }
  525. static inline struct sk_buff *__qdisc_dequeue_tail(struct Qdisc *sch,
  526. struct sk_buff_head *list)
  527. {
  528. struct sk_buff *skb = __skb_dequeue_tail(list);
  529. if (likely(skb != NULL))
  530. qdisc_qstats_backlog_dec(sch, skb);
  531. return skb;
  532. }
  533. static inline struct sk_buff *qdisc_dequeue_tail(struct Qdisc *sch)
  534. {
  535. return __qdisc_dequeue_tail(sch, &sch->q);
  536. }
  537. static inline struct sk_buff *qdisc_peek_head(struct Qdisc *sch)
  538. {
  539. return skb_peek(&sch->q);
  540. }
  541. /* generic pseudo peek method for non-work-conserving qdisc */
  542. static inline struct sk_buff *qdisc_peek_dequeued(struct Qdisc *sch)
  543. {
  544. /* we can reuse ->gso_skb because peek isn't called for root qdiscs */
  545. if (!sch->gso_skb) {
  546. sch->gso_skb = sch->dequeue(sch);
  547. if (sch->gso_skb)
  548. /* it's still part of the queue */
  549. sch->q.qlen++;
  550. }
  551. return sch->gso_skb;
  552. }
  553. /* use instead of qdisc->dequeue() for all qdiscs queried with ->peek() */
  554. static inline struct sk_buff *qdisc_dequeue_peeked(struct Qdisc *sch)
  555. {
  556. struct sk_buff *skb = sch->gso_skb;
  557. if (skb) {
  558. sch->gso_skb = NULL;
  559. sch->q.qlen--;
  560. } else {
  561. skb = sch->dequeue(sch);
  562. }
  563. return skb;
  564. }
  565. static inline void __qdisc_reset_queue(struct Qdisc *sch,
  566. struct sk_buff_head *list)
  567. {
  568. /*
  569. * We do not know the backlog in bytes of this list, it
  570. * is up to the caller to correct it
  571. */
  572. __skb_queue_purge(list);
  573. }
  574. static inline void qdisc_reset_queue(struct Qdisc *sch)
  575. {
  576. __qdisc_reset_queue(sch, &sch->q);
  577. sch->qstats.backlog = 0;
  578. }
  579. static inline unsigned int __qdisc_queue_drop(struct Qdisc *sch,
  580. struct sk_buff_head *list)
  581. {
  582. struct sk_buff *skb = __qdisc_dequeue_tail(sch, list);
  583. if (likely(skb != NULL)) {
  584. unsigned int len = qdisc_pkt_len(skb);
  585. kfree_skb(skb);
  586. return len;
  587. }
  588. return 0;
  589. }
  590. static inline unsigned int qdisc_queue_drop(struct Qdisc *sch)
  591. {
  592. return __qdisc_queue_drop(sch, &sch->q);
  593. }
  594. static inline int qdisc_drop(struct sk_buff *skb, struct Qdisc *sch)
  595. {
  596. kfree_skb(skb);
  597. qdisc_qstats_drop(sch);
  598. return NET_XMIT_DROP;
  599. }
  600. static inline int qdisc_reshape_fail(struct sk_buff *skb, struct Qdisc *sch)
  601. {
  602. qdisc_qstats_drop(sch);
  603. #ifdef CONFIG_NET_CLS_ACT
  604. if (sch->reshape_fail == NULL || sch->reshape_fail(skb, sch))
  605. goto drop;
  606. return NET_XMIT_SUCCESS;
  607. drop:
  608. #endif
  609. kfree_skb(skb);
  610. return NET_XMIT_DROP;
  611. }
  612. /* Length to Time (L2T) lookup in a qdisc_rate_table, to determine how
  613. long it will take to send a packet given its size.
  614. */
  615. static inline u32 qdisc_l2t(struct qdisc_rate_table* rtab, unsigned int pktlen)
  616. {
  617. int slot = pktlen + rtab->rate.cell_align + rtab->rate.overhead;
  618. if (slot < 0)
  619. slot = 0;
  620. slot >>= rtab->rate.cell_log;
  621. if (slot > 255)
  622. return rtab->data[255]*(slot >> 8) + rtab->data[slot & 0xFF];
  623. return rtab->data[slot];
  624. }
  625. #ifdef CONFIG_NET_CLS_ACT
  626. static inline struct sk_buff *skb_act_clone(struct sk_buff *skb, gfp_t gfp_mask,
  627. int action)
  628. {
  629. struct sk_buff *n;
  630. n = skb_clone(skb, gfp_mask);
  631. if (n) {
  632. n->tc_verd = SET_TC_VERD(n->tc_verd, 0);
  633. n->tc_verd = CLR_TC_OK2MUNGE(n->tc_verd);
  634. n->tc_verd = CLR_TC_MUNGED(n->tc_verd);
  635. }
  636. return n;
  637. }
  638. #endif
  639. struct psched_ratecfg {
  640. u64 rate_bytes_ps; /* bytes per second */
  641. u32 mult;
  642. u16 overhead;
  643. u8 linklayer;
  644. u8 shift;
  645. };
  646. static inline u64 psched_l2t_ns(const struct psched_ratecfg *r,
  647. unsigned int len)
  648. {
  649. len += r->overhead;
  650. if (unlikely(r->linklayer == TC_LINKLAYER_ATM))
  651. return ((u64)(DIV_ROUND_UP(len,48)*53) * r->mult) >> r->shift;
  652. return ((u64)len * r->mult) >> r->shift;
  653. }
  654. void psched_ratecfg_precompute(struct psched_ratecfg *r,
  655. const struct tc_ratespec *conf,
  656. u64 rate64);
  657. static inline void psched_ratecfg_getrate(struct tc_ratespec *res,
  658. const struct psched_ratecfg *r)
  659. {
  660. memset(res, 0, sizeof(*res));
  661. /* legacy struct tc_ratespec has a 32bit @rate field
  662. * Qdisc using 64bit rate should add new attributes
  663. * in order to maintain compatibility.
  664. */
  665. res->rate = min_t(u64, r->rate_bytes_ps, ~0U);
  666. res->overhead = r->overhead;
  667. res->linklayer = (r->linklayer & TC_LINKLAYER_MASK);
  668. }
  669. #endif