sch_fq_codel.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673
  1. /*
  2. * Fair Queue CoDel discipline
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public License
  6. * as published by the Free Software Foundation; either version
  7. * 2 of the License, or (at your option) any later version.
  8. *
  9. * Copyright (C) 2012,2015 Eric Dumazet <edumazet@google.com>
  10. */
  11. #include <linux/module.h>
  12. #include <linux/types.h>
  13. #include <linux/kernel.h>
  14. #include <linux/jiffies.h>
  15. #include <linux/string.h>
  16. #include <linux/in.h>
  17. #include <linux/errno.h>
  18. #include <linux/init.h>
  19. #include <linux/skbuff.h>
  20. #include <linux/jhash.h>
  21. #include <linux/slab.h>
  22. #include <linux/vmalloc.h>
  23. #include <net/netlink.h>
  24. #include <net/pkt_sched.h>
  25. #include <net/codel.h>
  26. /* Fair Queue CoDel.
  27. *
  28. * Principles :
  29. * Packets are classified (internal classifier or external) on flows.
  30. * This is a Stochastic model (as we use a hash, several flows
  31. * might be hashed on same slot)
  32. * Each flow has a CoDel managed queue.
  33. * Flows are linked onto two (Round Robin) lists,
  34. * so that new flows have priority on old ones.
  35. *
  36. * For a given flow, packets are not reordered (CoDel uses a FIFO)
  37. * head drops only.
  38. * ECN capability is on by default.
  39. * Low memory footprint (64 bytes per flow)
  40. */
  41. struct fq_codel_flow {
  42. struct sk_buff *head;
  43. struct sk_buff *tail;
  44. struct list_head flowchain;
  45. int deficit;
  46. u32 dropped; /* number of drops (or ECN marks) on this flow */
  47. struct codel_vars cvars;
  48. }; /* please try to keep this structure <= 64 bytes */
  49. struct fq_codel_sched_data {
  50. struct tcf_proto __rcu *filter_list; /* optional external classifier */
  51. struct fq_codel_flow *flows; /* Flows table [flows_cnt] */
  52. u32 *backlogs; /* backlog table [flows_cnt] */
  53. u32 flows_cnt; /* number of flows */
  54. u32 perturbation; /* hash perturbation */
  55. u32 quantum; /* psched_mtu(qdisc_dev(sch)); */
  56. struct codel_params cparams;
  57. struct codel_stats cstats;
  58. u32 drop_overlimit;
  59. u32 new_flow_count;
  60. struct list_head new_flows; /* list of new flows */
  61. struct list_head old_flows; /* list of old flows */
  62. };
  63. static unsigned int fq_codel_hash(const struct fq_codel_sched_data *q,
  64. struct sk_buff *skb)
  65. {
  66. u32 hash = skb_get_hash_perturb(skb, q->perturbation);
  67. return reciprocal_scale(hash, q->flows_cnt);
  68. }
  69. static unsigned int fq_codel_classify(struct sk_buff *skb, struct Qdisc *sch,
  70. int *qerr)
  71. {
  72. struct fq_codel_sched_data *q = qdisc_priv(sch);
  73. struct tcf_proto *filter;
  74. struct tcf_result res;
  75. int result;
  76. if (TC_H_MAJ(skb->priority) == sch->handle &&
  77. TC_H_MIN(skb->priority) > 0 &&
  78. TC_H_MIN(skb->priority) <= q->flows_cnt)
  79. return TC_H_MIN(skb->priority);
  80. filter = rcu_dereference_bh(q->filter_list);
  81. if (!filter)
  82. return fq_codel_hash(q, skb) + 1;
  83. *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
  84. result = tc_classify(skb, filter, &res, false);
  85. if (result >= 0) {
  86. #ifdef CONFIG_NET_CLS_ACT
  87. switch (result) {
  88. case TC_ACT_STOLEN:
  89. case TC_ACT_QUEUED:
  90. *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
  91. case TC_ACT_SHOT:
  92. return 0;
  93. }
  94. #endif
  95. if (TC_H_MIN(res.classid) <= q->flows_cnt)
  96. return TC_H_MIN(res.classid);
  97. }
  98. return 0;
  99. }
  100. /* helper functions : might be changed when/if skb use a standard list_head */
  101. /* remove one skb from head of slot queue */
  102. static inline struct sk_buff *dequeue_head(struct fq_codel_flow *flow)
  103. {
  104. struct sk_buff *skb = flow->head;
  105. flow->head = skb->next;
  106. skb->next = NULL;
  107. return skb;
  108. }
  109. /* add skb to flow queue (tail add) */
  110. static inline void flow_queue_add(struct fq_codel_flow *flow,
  111. struct sk_buff *skb)
  112. {
  113. if (flow->head == NULL)
  114. flow->head = skb;
  115. else
  116. flow->tail->next = skb;
  117. flow->tail = skb;
  118. skb->next = NULL;
  119. }
  120. static unsigned int fq_codel_drop(struct Qdisc *sch)
  121. {
  122. struct fq_codel_sched_data *q = qdisc_priv(sch);
  123. struct sk_buff *skb;
  124. unsigned int maxbacklog = 0, idx = 0, i, len;
  125. struct fq_codel_flow *flow;
  126. /* Queue is full! Find the fat flow and drop packet from it.
  127. * This might sound expensive, but with 1024 flows, we scan
  128. * 4KB of memory, and we dont need to handle a complex tree
  129. * in fast path (packet queue/enqueue) with many cache misses.
  130. */
  131. for (i = 0; i < q->flows_cnt; i++) {
  132. if (q->backlogs[i] > maxbacklog) {
  133. maxbacklog = q->backlogs[i];
  134. idx = i;
  135. }
  136. }
  137. flow = &q->flows[idx];
  138. skb = dequeue_head(flow);
  139. len = qdisc_pkt_len(skb);
  140. q->backlogs[idx] -= len;
  141. sch->q.qlen--;
  142. qdisc_qstats_drop(sch);
  143. qdisc_qstats_backlog_dec(sch, skb);
  144. kfree_skb(skb);
  145. flow->dropped++;
  146. return idx;
  147. }
  148. static unsigned int fq_codel_qdisc_drop(struct Qdisc *sch)
  149. {
  150. unsigned int prev_backlog;
  151. prev_backlog = sch->qstats.backlog;
  152. fq_codel_drop(sch);
  153. return prev_backlog - sch->qstats.backlog;
  154. }
  155. static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch)
  156. {
  157. struct fq_codel_sched_data *q = qdisc_priv(sch);
  158. unsigned int idx, prev_backlog;
  159. struct fq_codel_flow *flow;
  160. int uninitialized_var(ret);
  161. idx = fq_codel_classify(skb, sch, &ret);
  162. if (idx == 0) {
  163. if (ret & __NET_XMIT_BYPASS)
  164. qdisc_qstats_drop(sch);
  165. kfree_skb(skb);
  166. return ret;
  167. }
  168. idx--;
  169. codel_set_enqueue_time(skb);
  170. flow = &q->flows[idx];
  171. flow_queue_add(flow, skb);
  172. q->backlogs[idx] += qdisc_pkt_len(skb);
  173. qdisc_qstats_backlog_inc(sch, skb);
  174. if (list_empty(&flow->flowchain)) {
  175. list_add_tail(&flow->flowchain, &q->new_flows);
  176. q->new_flow_count++;
  177. flow->deficit = q->quantum;
  178. flow->dropped = 0;
  179. }
  180. if (++sch->q.qlen <= sch->limit)
  181. return NET_XMIT_SUCCESS;
  182. prev_backlog = sch->qstats.backlog;
  183. q->drop_overlimit++;
  184. /* Return Congestion Notification only if we dropped a packet
  185. * from this flow.
  186. */
  187. if (fq_codel_drop(sch) == idx)
  188. return NET_XMIT_CN;
  189. /* As we dropped a packet, better let upper stack know this */
  190. qdisc_tree_reduce_backlog(sch, 1, prev_backlog - sch->qstats.backlog);
  191. return NET_XMIT_SUCCESS;
  192. }
  193. /* This is the specific function called from codel_dequeue()
  194. * to dequeue a packet from queue. Note: backlog is handled in
  195. * codel, we dont need to reduce it here.
  196. */
  197. static struct sk_buff *dequeue_func(struct codel_vars *vars, void *ctx)
  198. {
  199. struct Qdisc *sch = ctx;
  200. struct fq_codel_sched_data *q = qdisc_priv(sch);
  201. struct fq_codel_flow *flow;
  202. struct sk_buff *skb = NULL;
  203. flow = container_of(vars, struct fq_codel_flow, cvars);
  204. if (flow->head) {
  205. skb = dequeue_head(flow);
  206. q->backlogs[flow - q->flows] -= qdisc_pkt_len(skb);
  207. sch->q.qlen--;
  208. sch->qstats.backlog -= qdisc_pkt_len(skb);
  209. }
  210. return skb;
  211. }
  212. static void drop_func(struct sk_buff *skb, void *ctx)
  213. {
  214. struct Qdisc *sch = ctx;
  215. qdisc_drop(skb, sch);
  216. }
  217. static struct sk_buff *fq_codel_dequeue(struct Qdisc *sch)
  218. {
  219. struct fq_codel_sched_data *q = qdisc_priv(sch);
  220. struct sk_buff *skb;
  221. struct fq_codel_flow *flow;
  222. struct list_head *head;
  223. u32 prev_drop_count, prev_ecn_mark;
  224. unsigned int prev_backlog;
  225. begin:
  226. head = &q->new_flows;
  227. if (list_empty(head)) {
  228. head = &q->old_flows;
  229. if (list_empty(head))
  230. return NULL;
  231. }
  232. flow = list_first_entry(head, struct fq_codel_flow, flowchain);
  233. if (flow->deficit <= 0) {
  234. flow->deficit += q->quantum;
  235. list_move_tail(&flow->flowchain, &q->old_flows);
  236. goto begin;
  237. }
  238. prev_drop_count = q->cstats.drop_count;
  239. prev_ecn_mark = q->cstats.ecn_mark;
  240. prev_backlog = sch->qstats.backlog;
  241. skb = codel_dequeue(sch, &sch->qstats.backlog, &q->cparams,
  242. &flow->cvars, &q->cstats, qdisc_pkt_len,
  243. codel_get_enqueue_time, drop_func, dequeue_func);
  244. flow->dropped += q->cstats.drop_count - prev_drop_count;
  245. flow->dropped += q->cstats.ecn_mark - prev_ecn_mark;
  246. if (!skb) {
  247. /* force a pass through old_flows to prevent starvation */
  248. if ((head == &q->new_flows) && !list_empty(&q->old_flows))
  249. list_move_tail(&flow->flowchain, &q->old_flows);
  250. else
  251. list_del_init(&flow->flowchain);
  252. goto begin;
  253. }
  254. qdisc_bstats_update(sch, skb);
  255. flow->deficit -= qdisc_pkt_len(skb);
  256. /* We cant call qdisc_tree_reduce_backlog() if our qlen is 0,
  257. * or HTB crashes. Defer it for next round.
  258. */
  259. if (q->cstats.drop_count && sch->q.qlen) {
  260. qdisc_tree_reduce_backlog(sch, q->cstats.drop_count,
  261. q->cstats.drop_len);
  262. q->cstats.drop_count = 0;
  263. q->cstats.drop_len = 0;
  264. }
  265. return skb;
  266. }
  267. static void fq_codel_reset(struct Qdisc *sch)
  268. {
  269. struct fq_codel_sched_data *q = qdisc_priv(sch);
  270. int i;
  271. INIT_LIST_HEAD(&q->new_flows);
  272. INIT_LIST_HEAD(&q->old_flows);
  273. for (i = 0; i < q->flows_cnt; i++) {
  274. struct fq_codel_flow *flow = q->flows + i;
  275. while (flow->head) {
  276. struct sk_buff *skb = dequeue_head(flow);
  277. qdisc_qstats_backlog_dec(sch, skb);
  278. kfree_skb(skb);
  279. }
  280. INIT_LIST_HEAD(&flow->flowchain);
  281. codel_vars_init(&flow->cvars);
  282. }
  283. memset(q->backlogs, 0, q->flows_cnt * sizeof(u32));
  284. sch->q.qlen = 0;
  285. }
  286. static const struct nla_policy fq_codel_policy[TCA_FQ_CODEL_MAX + 1] = {
  287. [TCA_FQ_CODEL_TARGET] = { .type = NLA_U32 },
  288. [TCA_FQ_CODEL_LIMIT] = { .type = NLA_U32 },
  289. [TCA_FQ_CODEL_INTERVAL] = { .type = NLA_U32 },
  290. [TCA_FQ_CODEL_ECN] = { .type = NLA_U32 },
  291. [TCA_FQ_CODEL_FLOWS] = { .type = NLA_U32 },
  292. [TCA_FQ_CODEL_QUANTUM] = { .type = NLA_U32 },
  293. [TCA_FQ_CODEL_CE_THRESHOLD] = { .type = NLA_U32 },
  294. };
  295. static int fq_codel_change(struct Qdisc *sch, struct nlattr *opt)
  296. {
  297. struct fq_codel_sched_data *q = qdisc_priv(sch);
  298. struct nlattr *tb[TCA_FQ_CODEL_MAX + 1];
  299. int err;
  300. if (!opt)
  301. return -EINVAL;
  302. err = nla_parse_nested(tb, TCA_FQ_CODEL_MAX, opt, fq_codel_policy);
  303. if (err < 0)
  304. return err;
  305. if (tb[TCA_FQ_CODEL_FLOWS]) {
  306. if (q->flows)
  307. return -EINVAL;
  308. q->flows_cnt = nla_get_u32(tb[TCA_FQ_CODEL_FLOWS]);
  309. if (!q->flows_cnt ||
  310. q->flows_cnt > 65536)
  311. return -EINVAL;
  312. }
  313. sch_tree_lock(sch);
  314. if (tb[TCA_FQ_CODEL_TARGET]) {
  315. u64 target = nla_get_u32(tb[TCA_FQ_CODEL_TARGET]);
  316. q->cparams.target = (target * NSEC_PER_USEC) >> CODEL_SHIFT;
  317. }
  318. if (tb[TCA_FQ_CODEL_CE_THRESHOLD]) {
  319. u64 val = nla_get_u32(tb[TCA_FQ_CODEL_CE_THRESHOLD]);
  320. q->cparams.ce_threshold = (val * NSEC_PER_USEC) >> CODEL_SHIFT;
  321. }
  322. if (tb[TCA_FQ_CODEL_INTERVAL]) {
  323. u64 interval = nla_get_u32(tb[TCA_FQ_CODEL_INTERVAL]);
  324. q->cparams.interval = (interval * NSEC_PER_USEC) >> CODEL_SHIFT;
  325. }
  326. if (tb[TCA_FQ_CODEL_LIMIT])
  327. sch->limit = nla_get_u32(tb[TCA_FQ_CODEL_LIMIT]);
  328. if (tb[TCA_FQ_CODEL_ECN])
  329. q->cparams.ecn = !!nla_get_u32(tb[TCA_FQ_CODEL_ECN]);
  330. if (tb[TCA_FQ_CODEL_QUANTUM])
  331. q->quantum = max(256U, nla_get_u32(tb[TCA_FQ_CODEL_QUANTUM]));
  332. while (sch->q.qlen > sch->limit) {
  333. struct sk_buff *skb = fq_codel_dequeue(sch);
  334. q->cstats.drop_len += qdisc_pkt_len(skb);
  335. kfree_skb(skb);
  336. q->cstats.drop_count++;
  337. }
  338. qdisc_tree_reduce_backlog(sch, q->cstats.drop_count, q->cstats.drop_len);
  339. q->cstats.drop_count = 0;
  340. q->cstats.drop_len = 0;
  341. sch_tree_unlock(sch);
  342. return 0;
  343. }
  344. static void *fq_codel_zalloc(size_t sz)
  345. {
  346. void *ptr = kzalloc(sz, GFP_KERNEL | __GFP_NOWARN);
  347. if (!ptr)
  348. ptr = vzalloc(sz);
  349. return ptr;
  350. }
  351. static void fq_codel_free(void *addr)
  352. {
  353. kvfree(addr);
  354. }
  355. static void fq_codel_destroy(struct Qdisc *sch)
  356. {
  357. struct fq_codel_sched_data *q = qdisc_priv(sch);
  358. tcf_destroy_chain(&q->filter_list);
  359. fq_codel_free(q->backlogs);
  360. fq_codel_free(q->flows);
  361. }
  362. static int fq_codel_init(struct Qdisc *sch, struct nlattr *opt)
  363. {
  364. struct fq_codel_sched_data *q = qdisc_priv(sch);
  365. int i;
  366. sch->limit = 10*1024;
  367. q->flows_cnt = 1024;
  368. q->quantum = psched_mtu(qdisc_dev(sch));
  369. q->perturbation = prandom_u32();
  370. INIT_LIST_HEAD(&q->new_flows);
  371. INIT_LIST_HEAD(&q->old_flows);
  372. codel_params_init(&q->cparams);
  373. codel_stats_init(&q->cstats);
  374. q->cparams.ecn = true;
  375. q->cparams.mtu = psched_mtu(qdisc_dev(sch));
  376. if (opt) {
  377. int err = fq_codel_change(sch, opt);
  378. if (err)
  379. return err;
  380. }
  381. if (!q->flows) {
  382. q->flows = fq_codel_zalloc(q->flows_cnt *
  383. sizeof(struct fq_codel_flow));
  384. if (!q->flows)
  385. return -ENOMEM;
  386. q->backlogs = fq_codel_zalloc(q->flows_cnt * sizeof(u32));
  387. if (!q->backlogs) {
  388. fq_codel_free(q->flows);
  389. return -ENOMEM;
  390. }
  391. for (i = 0; i < q->flows_cnt; i++) {
  392. struct fq_codel_flow *flow = q->flows + i;
  393. INIT_LIST_HEAD(&flow->flowchain);
  394. codel_vars_init(&flow->cvars);
  395. }
  396. }
  397. if (sch->limit >= 1)
  398. sch->flags |= TCQ_F_CAN_BYPASS;
  399. else
  400. sch->flags &= ~TCQ_F_CAN_BYPASS;
  401. return 0;
  402. }
  403. static int fq_codel_dump(struct Qdisc *sch, struct sk_buff *skb)
  404. {
  405. struct fq_codel_sched_data *q = qdisc_priv(sch);
  406. struct nlattr *opts;
  407. opts = nla_nest_start(skb, TCA_OPTIONS);
  408. if (opts == NULL)
  409. goto nla_put_failure;
  410. if (nla_put_u32(skb, TCA_FQ_CODEL_TARGET,
  411. codel_time_to_us(q->cparams.target)) ||
  412. nla_put_u32(skb, TCA_FQ_CODEL_LIMIT,
  413. sch->limit) ||
  414. nla_put_u32(skb, TCA_FQ_CODEL_INTERVAL,
  415. codel_time_to_us(q->cparams.interval)) ||
  416. nla_put_u32(skb, TCA_FQ_CODEL_ECN,
  417. q->cparams.ecn) ||
  418. nla_put_u32(skb, TCA_FQ_CODEL_QUANTUM,
  419. q->quantum) ||
  420. nla_put_u32(skb, TCA_FQ_CODEL_FLOWS,
  421. q->flows_cnt))
  422. goto nla_put_failure;
  423. if (q->cparams.ce_threshold != CODEL_DISABLED_THRESHOLD &&
  424. nla_put_u32(skb, TCA_FQ_CODEL_CE_THRESHOLD,
  425. codel_time_to_us(q->cparams.ce_threshold)))
  426. goto nla_put_failure;
  427. return nla_nest_end(skb, opts);
  428. nla_put_failure:
  429. return -1;
  430. }
  431. static int fq_codel_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
  432. {
  433. struct fq_codel_sched_data *q = qdisc_priv(sch);
  434. struct tc_fq_codel_xstats st = {
  435. .type = TCA_FQ_CODEL_XSTATS_QDISC,
  436. };
  437. struct list_head *pos;
  438. st.qdisc_stats.maxpacket = q->cstats.maxpacket;
  439. st.qdisc_stats.drop_overlimit = q->drop_overlimit;
  440. st.qdisc_stats.ecn_mark = q->cstats.ecn_mark;
  441. st.qdisc_stats.new_flow_count = q->new_flow_count;
  442. st.qdisc_stats.ce_mark = q->cstats.ce_mark;
  443. list_for_each(pos, &q->new_flows)
  444. st.qdisc_stats.new_flows_len++;
  445. list_for_each(pos, &q->old_flows)
  446. st.qdisc_stats.old_flows_len++;
  447. return gnet_stats_copy_app(d, &st, sizeof(st));
  448. }
  449. static struct Qdisc *fq_codel_leaf(struct Qdisc *sch, unsigned long arg)
  450. {
  451. return NULL;
  452. }
  453. static unsigned long fq_codel_get(struct Qdisc *sch, u32 classid)
  454. {
  455. return 0;
  456. }
  457. static unsigned long fq_codel_bind(struct Qdisc *sch, unsigned long parent,
  458. u32 classid)
  459. {
  460. /* we cannot bypass queue discipline anymore */
  461. sch->flags &= ~TCQ_F_CAN_BYPASS;
  462. return 0;
  463. }
  464. static void fq_codel_put(struct Qdisc *q, unsigned long cl)
  465. {
  466. }
  467. static struct tcf_proto __rcu **fq_codel_find_tcf(struct Qdisc *sch,
  468. unsigned long cl)
  469. {
  470. struct fq_codel_sched_data *q = qdisc_priv(sch);
  471. if (cl)
  472. return NULL;
  473. return &q->filter_list;
  474. }
  475. static int fq_codel_dump_class(struct Qdisc *sch, unsigned long cl,
  476. struct sk_buff *skb, struct tcmsg *tcm)
  477. {
  478. tcm->tcm_handle |= TC_H_MIN(cl);
  479. return 0;
  480. }
  481. static int fq_codel_dump_class_stats(struct Qdisc *sch, unsigned long cl,
  482. struct gnet_dump *d)
  483. {
  484. struct fq_codel_sched_data *q = qdisc_priv(sch);
  485. u32 idx = cl - 1;
  486. struct gnet_stats_queue qs = { 0 };
  487. struct tc_fq_codel_xstats xstats;
  488. if (idx < q->flows_cnt) {
  489. const struct fq_codel_flow *flow = &q->flows[idx];
  490. const struct sk_buff *skb = flow->head;
  491. memset(&xstats, 0, sizeof(xstats));
  492. xstats.type = TCA_FQ_CODEL_XSTATS_CLASS;
  493. xstats.class_stats.deficit = flow->deficit;
  494. xstats.class_stats.ldelay =
  495. codel_time_to_us(flow->cvars.ldelay);
  496. xstats.class_stats.count = flow->cvars.count;
  497. xstats.class_stats.lastcount = flow->cvars.lastcount;
  498. xstats.class_stats.dropping = flow->cvars.dropping;
  499. if (flow->cvars.dropping) {
  500. codel_tdiff_t delta = flow->cvars.drop_next -
  501. codel_get_time();
  502. xstats.class_stats.drop_next = (delta >= 0) ?
  503. codel_time_to_us(delta) :
  504. -codel_time_to_us(-delta);
  505. }
  506. while (skb) {
  507. qs.qlen++;
  508. skb = skb->next;
  509. }
  510. qs.backlog = q->backlogs[idx];
  511. qs.drops = flow->dropped;
  512. }
  513. if (gnet_stats_copy_queue(d, NULL, &qs, 0) < 0)
  514. return -1;
  515. if (idx < q->flows_cnt)
  516. return gnet_stats_copy_app(d, &xstats, sizeof(xstats));
  517. return 0;
  518. }
  519. static void fq_codel_walk(struct Qdisc *sch, struct qdisc_walker *arg)
  520. {
  521. struct fq_codel_sched_data *q = qdisc_priv(sch);
  522. unsigned int i;
  523. if (arg->stop)
  524. return;
  525. for (i = 0; i < q->flows_cnt; i++) {
  526. if (list_empty(&q->flows[i].flowchain) ||
  527. arg->count < arg->skip) {
  528. arg->count++;
  529. continue;
  530. }
  531. if (arg->fn(sch, i + 1, arg) < 0) {
  532. arg->stop = 1;
  533. break;
  534. }
  535. arg->count++;
  536. }
  537. }
  538. static const struct Qdisc_class_ops fq_codel_class_ops = {
  539. .leaf = fq_codel_leaf,
  540. .get = fq_codel_get,
  541. .put = fq_codel_put,
  542. .tcf_chain = fq_codel_find_tcf,
  543. .bind_tcf = fq_codel_bind,
  544. .unbind_tcf = fq_codel_put,
  545. .dump = fq_codel_dump_class,
  546. .dump_stats = fq_codel_dump_class_stats,
  547. .walk = fq_codel_walk,
  548. };
  549. static struct Qdisc_ops fq_codel_qdisc_ops __read_mostly = {
  550. .cl_ops = &fq_codel_class_ops,
  551. .id = "fq_codel",
  552. .priv_size = sizeof(struct fq_codel_sched_data),
  553. .enqueue = fq_codel_enqueue,
  554. .dequeue = fq_codel_dequeue,
  555. .peek = qdisc_peek_dequeued,
  556. .drop = fq_codel_qdisc_drop,
  557. .init = fq_codel_init,
  558. .reset = fq_codel_reset,
  559. .destroy = fq_codel_destroy,
  560. .change = fq_codel_change,
  561. .dump = fq_codel_dump,
  562. .dump_stats = fq_codel_dump_stats,
  563. .owner = THIS_MODULE,
  564. };
  565. static int __init fq_codel_module_init(void)
  566. {
  567. return register_qdisc(&fq_codel_qdisc_ops);
  568. }
  569. static void __exit fq_codel_module_exit(void)
  570. {
  571. unregister_qdisc(&fq_codel_qdisc_ops);
  572. }
  573. module_init(fq_codel_module_init)
  574. module_exit(fq_codel_module_exit)
  575. MODULE_AUTHOR("Eric Dumazet");
  576. MODULE_LICENSE("GPL");