sch_fq_codel.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624
  1. /*
  2. * Fair Queue CoDel discipline
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public License
  6. * as published by the Free Software Foundation; either version
  7. * 2 of the License, or (at your option) any later version.
  8. *
  9. * Copyright (C) 2012 Eric Dumazet <edumazet@google.com>
  10. */
  11. #include <linux/module.h>
  12. #include <linux/types.h>
  13. #include <linux/kernel.h>
  14. #include <linux/jiffies.h>
  15. #include <linux/string.h>
  16. #include <linux/in.h>
  17. #include <linux/errno.h>
  18. #include <linux/init.h>
  19. #include <linux/skbuff.h>
  20. #include <linux/jhash.h>
  21. #include <linux/slab.h>
  22. #include <linux/vmalloc.h>
  23. #include <net/netlink.h>
  24. #include <net/pkt_sched.h>
  25. #include <net/flow_keys.h>
  26. #include <net/codel.h>
  27. /* Fair Queue CoDel.
  28. *
  29. * Principles :
  30. * Packets are classified (internal classifier or external) on flows.
  31. * This is a Stochastic model (as we use a hash, several flows
  32. * might be hashed on same slot)
  33. * Each flow has a CoDel managed queue.
  34. * Flows are linked onto two (Round Robin) lists,
  35. * so that new flows have priority on old ones.
  36. *
  37. * For a given flow, packets are not reordered (CoDel uses a FIFO)
  38. * head drops only.
  39. * ECN capability is on by default.
  40. * Low memory footprint (64 bytes per flow)
  41. */
  42. struct fq_codel_flow {
  43. struct sk_buff *head;
  44. struct sk_buff *tail;
  45. struct list_head flowchain;
  46. int deficit;
  47. u32 dropped; /* number of drops (or ECN marks) on this flow */
  48. struct codel_vars cvars;
  49. }; /* please try to keep this structure <= 64 bytes */
  50. struct fq_codel_sched_data {
  51. struct tcf_proto __rcu *filter_list; /* optional external classifier */
  52. struct fq_codel_flow *flows; /* Flows table [flows_cnt] */
  53. u32 *backlogs; /* backlog table [flows_cnt] */
  54. u32 flows_cnt; /* number of flows */
  55. u32 perturbation; /* hash perturbation */
  56. u32 quantum; /* psched_mtu(qdisc_dev(sch)); */
  57. struct codel_params cparams;
  58. struct codel_stats cstats;
  59. u32 drop_overlimit;
  60. u32 new_flow_count;
  61. struct list_head new_flows; /* list of new flows */
  62. struct list_head old_flows; /* list of old flows */
  63. };
  64. static unsigned int fq_codel_hash(const struct fq_codel_sched_data *q,
  65. const struct sk_buff *skb)
  66. {
  67. struct flow_keys keys;
  68. unsigned int hash;
  69. skb_flow_dissect(skb, &keys);
  70. hash = jhash_3words((__force u32)keys.dst,
  71. (__force u32)keys.src ^ keys.ip_proto,
  72. (__force u32)keys.ports, q->perturbation);
  73. return reciprocal_scale(hash, q->flows_cnt);
  74. }
  75. static unsigned int fq_codel_classify(struct sk_buff *skb, struct Qdisc *sch,
  76. int *qerr)
  77. {
  78. struct fq_codel_sched_data *q = qdisc_priv(sch);
  79. struct tcf_proto *filter;
  80. struct tcf_result res;
  81. int result;
  82. if (TC_H_MAJ(skb->priority) == sch->handle &&
  83. TC_H_MIN(skb->priority) > 0 &&
  84. TC_H_MIN(skb->priority) <= q->flows_cnt)
  85. return TC_H_MIN(skb->priority);
  86. filter = rcu_dereference(q->filter_list);
  87. if (!filter)
  88. return fq_codel_hash(q, skb) + 1;
  89. *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
  90. result = tc_classify(skb, filter, &res);
  91. if (result >= 0) {
  92. #ifdef CONFIG_NET_CLS_ACT
  93. switch (result) {
  94. case TC_ACT_STOLEN:
  95. case TC_ACT_QUEUED:
  96. *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
  97. case TC_ACT_SHOT:
  98. return 0;
  99. }
  100. #endif
  101. if (TC_H_MIN(res.classid) <= q->flows_cnt)
  102. return TC_H_MIN(res.classid);
  103. }
  104. return 0;
  105. }
  106. /* helper functions : might be changed when/if skb use a standard list_head */
  107. /* remove one skb from head of slot queue */
  108. static inline struct sk_buff *dequeue_head(struct fq_codel_flow *flow)
  109. {
  110. struct sk_buff *skb = flow->head;
  111. flow->head = skb->next;
  112. skb->next = NULL;
  113. return skb;
  114. }
  115. /* add skb to flow queue (tail add) */
  116. static inline void flow_queue_add(struct fq_codel_flow *flow,
  117. struct sk_buff *skb)
  118. {
  119. if (flow->head == NULL)
  120. flow->head = skb;
  121. else
  122. flow->tail->next = skb;
  123. flow->tail = skb;
  124. skb->next = NULL;
  125. }
  126. static unsigned int fq_codel_drop(struct Qdisc *sch)
  127. {
  128. struct fq_codel_sched_data *q = qdisc_priv(sch);
  129. struct sk_buff *skb;
  130. unsigned int maxbacklog = 0, idx = 0, i, len;
  131. struct fq_codel_flow *flow;
  132. /* Queue is full! Find the fat flow and drop packet from it.
  133. * This might sound expensive, but with 1024 flows, we scan
  134. * 4KB of memory, and we dont need to handle a complex tree
  135. * in fast path (packet queue/enqueue) with many cache misses.
  136. */
  137. for (i = 0; i < q->flows_cnt; i++) {
  138. if (q->backlogs[i] > maxbacklog) {
  139. maxbacklog = q->backlogs[i];
  140. idx = i;
  141. }
  142. }
  143. flow = &q->flows[idx];
  144. skb = dequeue_head(flow);
  145. len = qdisc_pkt_len(skb);
  146. q->backlogs[idx] -= len;
  147. kfree_skb(skb);
  148. sch->q.qlen--;
  149. qdisc_qstats_drop(sch);
  150. qdisc_qstats_backlog_dec(sch, skb);
  151. flow->dropped++;
  152. return idx;
  153. }
  154. static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch)
  155. {
  156. struct fq_codel_sched_data *q = qdisc_priv(sch);
  157. unsigned int idx;
  158. struct fq_codel_flow *flow;
  159. int uninitialized_var(ret);
  160. idx = fq_codel_classify(skb, sch, &ret);
  161. if (idx == 0) {
  162. if (ret & __NET_XMIT_BYPASS)
  163. qdisc_qstats_drop(sch);
  164. kfree_skb(skb);
  165. return ret;
  166. }
  167. idx--;
  168. codel_set_enqueue_time(skb);
  169. flow = &q->flows[idx];
  170. flow_queue_add(flow, skb);
  171. q->backlogs[idx] += qdisc_pkt_len(skb);
  172. qdisc_qstats_backlog_inc(sch, skb);
  173. if (list_empty(&flow->flowchain)) {
  174. list_add_tail(&flow->flowchain, &q->new_flows);
  175. q->new_flow_count++;
  176. flow->deficit = q->quantum;
  177. flow->dropped = 0;
  178. }
  179. if (++sch->q.qlen <= sch->limit)
  180. return NET_XMIT_SUCCESS;
  181. q->drop_overlimit++;
  182. /* Return Congestion Notification only if we dropped a packet
  183. * from this flow.
  184. */
  185. if (fq_codel_drop(sch) == idx)
  186. return NET_XMIT_CN;
  187. /* As we dropped a packet, better let upper stack know this */
  188. qdisc_tree_decrease_qlen(sch, 1);
  189. return NET_XMIT_SUCCESS;
  190. }
  191. /* This is the specific function called from codel_dequeue()
  192. * to dequeue a packet from queue. Note: backlog is handled in
  193. * codel, we dont need to reduce it here.
  194. */
  195. static struct sk_buff *dequeue(struct codel_vars *vars, struct Qdisc *sch)
  196. {
  197. struct fq_codel_sched_data *q = qdisc_priv(sch);
  198. struct fq_codel_flow *flow;
  199. struct sk_buff *skb = NULL;
  200. flow = container_of(vars, struct fq_codel_flow, cvars);
  201. if (flow->head) {
  202. skb = dequeue_head(flow);
  203. q->backlogs[flow - q->flows] -= qdisc_pkt_len(skb);
  204. sch->q.qlen--;
  205. }
  206. return skb;
  207. }
  208. static struct sk_buff *fq_codel_dequeue(struct Qdisc *sch)
  209. {
  210. struct fq_codel_sched_data *q = qdisc_priv(sch);
  211. struct sk_buff *skb;
  212. struct fq_codel_flow *flow;
  213. struct list_head *head;
  214. u32 prev_drop_count, prev_ecn_mark;
  215. begin:
  216. head = &q->new_flows;
  217. if (list_empty(head)) {
  218. head = &q->old_flows;
  219. if (list_empty(head))
  220. return NULL;
  221. }
  222. flow = list_first_entry(head, struct fq_codel_flow, flowchain);
  223. if (flow->deficit <= 0) {
  224. flow->deficit += q->quantum;
  225. list_move_tail(&flow->flowchain, &q->old_flows);
  226. goto begin;
  227. }
  228. prev_drop_count = q->cstats.drop_count;
  229. prev_ecn_mark = q->cstats.ecn_mark;
  230. skb = codel_dequeue(sch, &q->cparams, &flow->cvars, &q->cstats,
  231. dequeue);
  232. flow->dropped += q->cstats.drop_count - prev_drop_count;
  233. flow->dropped += q->cstats.ecn_mark - prev_ecn_mark;
  234. if (!skb) {
  235. /* force a pass through old_flows to prevent starvation */
  236. if ((head == &q->new_flows) && !list_empty(&q->old_flows))
  237. list_move_tail(&flow->flowchain, &q->old_flows);
  238. else
  239. list_del_init(&flow->flowchain);
  240. goto begin;
  241. }
  242. qdisc_bstats_update(sch, skb);
  243. flow->deficit -= qdisc_pkt_len(skb);
  244. /* We cant call qdisc_tree_decrease_qlen() if our qlen is 0,
  245. * or HTB crashes. Defer it for next round.
  246. */
  247. if (q->cstats.drop_count && sch->q.qlen) {
  248. qdisc_tree_decrease_qlen(sch, q->cstats.drop_count);
  249. q->cstats.drop_count = 0;
  250. }
  251. return skb;
  252. }
  253. static void fq_codel_reset(struct Qdisc *sch)
  254. {
  255. struct sk_buff *skb;
  256. while ((skb = fq_codel_dequeue(sch)) != NULL)
  257. kfree_skb(skb);
  258. }
  259. static const struct nla_policy fq_codel_policy[TCA_FQ_CODEL_MAX + 1] = {
  260. [TCA_FQ_CODEL_TARGET] = { .type = NLA_U32 },
  261. [TCA_FQ_CODEL_LIMIT] = { .type = NLA_U32 },
  262. [TCA_FQ_CODEL_INTERVAL] = { .type = NLA_U32 },
  263. [TCA_FQ_CODEL_ECN] = { .type = NLA_U32 },
  264. [TCA_FQ_CODEL_FLOWS] = { .type = NLA_U32 },
  265. [TCA_FQ_CODEL_QUANTUM] = { .type = NLA_U32 },
  266. };
  267. static int fq_codel_change(struct Qdisc *sch, struct nlattr *opt)
  268. {
  269. struct fq_codel_sched_data *q = qdisc_priv(sch);
  270. struct nlattr *tb[TCA_FQ_CODEL_MAX + 1];
  271. int err;
  272. if (!opt)
  273. return -EINVAL;
  274. err = nla_parse_nested(tb, TCA_FQ_CODEL_MAX, opt, fq_codel_policy);
  275. if (err < 0)
  276. return err;
  277. if (tb[TCA_FQ_CODEL_FLOWS]) {
  278. if (q->flows)
  279. return -EINVAL;
  280. q->flows_cnt = nla_get_u32(tb[TCA_FQ_CODEL_FLOWS]);
  281. if (!q->flows_cnt ||
  282. q->flows_cnt > 65536)
  283. return -EINVAL;
  284. }
  285. sch_tree_lock(sch);
  286. if (tb[TCA_FQ_CODEL_TARGET]) {
  287. u64 target = nla_get_u32(tb[TCA_FQ_CODEL_TARGET]);
  288. q->cparams.target = (target * NSEC_PER_USEC) >> CODEL_SHIFT;
  289. }
  290. if (tb[TCA_FQ_CODEL_INTERVAL]) {
  291. u64 interval = nla_get_u32(tb[TCA_FQ_CODEL_INTERVAL]);
  292. q->cparams.interval = (interval * NSEC_PER_USEC) >> CODEL_SHIFT;
  293. }
  294. if (tb[TCA_FQ_CODEL_LIMIT])
  295. sch->limit = nla_get_u32(tb[TCA_FQ_CODEL_LIMIT]);
  296. if (tb[TCA_FQ_CODEL_ECN])
  297. q->cparams.ecn = !!nla_get_u32(tb[TCA_FQ_CODEL_ECN]);
  298. if (tb[TCA_FQ_CODEL_QUANTUM])
  299. q->quantum = max(256U, nla_get_u32(tb[TCA_FQ_CODEL_QUANTUM]));
  300. while (sch->q.qlen > sch->limit) {
  301. struct sk_buff *skb = fq_codel_dequeue(sch);
  302. kfree_skb(skb);
  303. q->cstats.drop_count++;
  304. }
  305. qdisc_tree_decrease_qlen(sch, q->cstats.drop_count);
  306. q->cstats.drop_count = 0;
  307. sch_tree_unlock(sch);
  308. return 0;
  309. }
  310. static void *fq_codel_zalloc(size_t sz)
  311. {
  312. void *ptr = kzalloc(sz, GFP_KERNEL | __GFP_NOWARN);
  313. if (!ptr)
  314. ptr = vzalloc(sz);
  315. return ptr;
  316. }
  317. static void fq_codel_free(void *addr)
  318. {
  319. kvfree(addr);
  320. }
  321. static void fq_codel_destroy(struct Qdisc *sch)
  322. {
  323. struct fq_codel_sched_data *q = qdisc_priv(sch);
  324. tcf_destroy_chain(&q->filter_list);
  325. fq_codel_free(q->backlogs);
  326. fq_codel_free(q->flows);
  327. }
  328. static int fq_codel_init(struct Qdisc *sch, struct nlattr *opt)
  329. {
  330. struct fq_codel_sched_data *q = qdisc_priv(sch);
  331. int i;
  332. sch->limit = 10*1024;
  333. q->flows_cnt = 1024;
  334. q->quantum = psched_mtu(qdisc_dev(sch));
  335. q->perturbation = prandom_u32();
  336. INIT_LIST_HEAD(&q->new_flows);
  337. INIT_LIST_HEAD(&q->old_flows);
  338. codel_params_init(&q->cparams);
  339. codel_stats_init(&q->cstats);
  340. q->cparams.ecn = true;
  341. if (opt) {
  342. int err = fq_codel_change(sch, opt);
  343. if (err)
  344. return err;
  345. }
  346. if (!q->flows) {
  347. q->flows = fq_codel_zalloc(q->flows_cnt *
  348. sizeof(struct fq_codel_flow));
  349. if (!q->flows)
  350. return -ENOMEM;
  351. q->backlogs = fq_codel_zalloc(q->flows_cnt * sizeof(u32));
  352. if (!q->backlogs) {
  353. fq_codel_free(q->flows);
  354. return -ENOMEM;
  355. }
  356. for (i = 0; i < q->flows_cnt; i++) {
  357. struct fq_codel_flow *flow = q->flows + i;
  358. INIT_LIST_HEAD(&flow->flowchain);
  359. codel_vars_init(&flow->cvars);
  360. }
  361. }
  362. if (sch->limit >= 1)
  363. sch->flags |= TCQ_F_CAN_BYPASS;
  364. else
  365. sch->flags &= ~TCQ_F_CAN_BYPASS;
  366. return 0;
  367. }
  368. static int fq_codel_dump(struct Qdisc *sch, struct sk_buff *skb)
  369. {
  370. struct fq_codel_sched_data *q = qdisc_priv(sch);
  371. struct nlattr *opts;
  372. opts = nla_nest_start(skb, TCA_OPTIONS);
  373. if (opts == NULL)
  374. goto nla_put_failure;
  375. if (nla_put_u32(skb, TCA_FQ_CODEL_TARGET,
  376. codel_time_to_us(q->cparams.target)) ||
  377. nla_put_u32(skb, TCA_FQ_CODEL_LIMIT,
  378. sch->limit) ||
  379. nla_put_u32(skb, TCA_FQ_CODEL_INTERVAL,
  380. codel_time_to_us(q->cparams.interval)) ||
  381. nla_put_u32(skb, TCA_FQ_CODEL_ECN,
  382. q->cparams.ecn) ||
  383. nla_put_u32(skb, TCA_FQ_CODEL_QUANTUM,
  384. q->quantum) ||
  385. nla_put_u32(skb, TCA_FQ_CODEL_FLOWS,
  386. q->flows_cnt))
  387. goto nla_put_failure;
  388. return nla_nest_end(skb, opts);
  389. nla_put_failure:
  390. return -1;
  391. }
  392. static int fq_codel_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
  393. {
  394. struct fq_codel_sched_data *q = qdisc_priv(sch);
  395. struct tc_fq_codel_xstats st = {
  396. .type = TCA_FQ_CODEL_XSTATS_QDISC,
  397. };
  398. struct list_head *pos;
  399. st.qdisc_stats.maxpacket = q->cstats.maxpacket;
  400. st.qdisc_stats.drop_overlimit = q->drop_overlimit;
  401. st.qdisc_stats.ecn_mark = q->cstats.ecn_mark;
  402. st.qdisc_stats.new_flow_count = q->new_flow_count;
  403. list_for_each(pos, &q->new_flows)
  404. st.qdisc_stats.new_flows_len++;
  405. list_for_each(pos, &q->old_flows)
  406. st.qdisc_stats.old_flows_len++;
  407. return gnet_stats_copy_app(d, &st, sizeof(st));
  408. }
  409. static struct Qdisc *fq_codel_leaf(struct Qdisc *sch, unsigned long arg)
  410. {
  411. return NULL;
  412. }
  413. static unsigned long fq_codel_get(struct Qdisc *sch, u32 classid)
  414. {
  415. return 0;
  416. }
  417. static unsigned long fq_codel_bind(struct Qdisc *sch, unsigned long parent,
  418. u32 classid)
  419. {
  420. /* we cannot bypass queue discipline anymore */
  421. sch->flags &= ~TCQ_F_CAN_BYPASS;
  422. return 0;
  423. }
  424. static void fq_codel_put(struct Qdisc *q, unsigned long cl)
  425. {
  426. }
  427. static struct tcf_proto __rcu **fq_codel_find_tcf(struct Qdisc *sch,
  428. unsigned long cl)
  429. {
  430. struct fq_codel_sched_data *q = qdisc_priv(sch);
  431. if (cl)
  432. return NULL;
  433. return &q->filter_list;
  434. }
  435. static int fq_codel_dump_class(struct Qdisc *sch, unsigned long cl,
  436. struct sk_buff *skb, struct tcmsg *tcm)
  437. {
  438. tcm->tcm_handle |= TC_H_MIN(cl);
  439. return 0;
  440. }
  441. static int fq_codel_dump_class_stats(struct Qdisc *sch, unsigned long cl,
  442. struct gnet_dump *d)
  443. {
  444. struct fq_codel_sched_data *q = qdisc_priv(sch);
  445. u32 idx = cl - 1;
  446. struct gnet_stats_queue qs = { 0 };
  447. struct tc_fq_codel_xstats xstats;
  448. if (idx < q->flows_cnt) {
  449. const struct fq_codel_flow *flow = &q->flows[idx];
  450. const struct sk_buff *skb = flow->head;
  451. memset(&xstats, 0, sizeof(xstats));
  452. xstats.type = TCA_FQ_CODEL_XSTATS_CLASS;
  453. xstats.class_stats.deficit = flow->deficit;
  454. xstats.class_stats.ldelay =
  455. codel_time_to_us(flow->cvars.ldelay);
  456. xstats.class_stats.count = flow->cvars.count;
  457. xstats.class_stats.lastcount = flow->cvars.lastcount;
  458. xstats.class_stats.dropping = flow->cvars.dropping;
  459. if (flow->cvars.dropping) {
  460. codel_tdiff_t delta = flow->cvars.drop_next -
  461. codel_get_time();
  462. xstats.class_stats.drop_next = (delta >= 0) ?
  463. codel_time_to_us(delta) :
  464. -codel_time_to_us(-delta);
  465. }
  466. while (skb) {
  467. qs.qlen++;
  468. skb = skb->next;
  469. }
  470. qs.backlog = q->backlogs[idx];
  471. qs.drops = flow->dropped;
  472. }
  473. if (gnet_stats_copy_queue(d, NULL, &qs, 0) < 0)
  474. return -1;
  475. if (idx < q->flows_cnt)
  476. return gnet_stats_copy_app(d, &xstats, sizeof(xstats));
  477. return 0;
  478. }
  479. static void fq_codel_walk(struct Qdisc *sch, struct qdisc_walker *arg)
  480. {
  481. struct fq_codel_sched_data *q = qdisc_priv(sch);
  482. unsigned int i;
  483. if (arg->stop)
  484. return;
  485. for (i = 0; i < q->flows_cnt; i++) {
  486. if (list_empty(&q->flows[i].flowchain) ||
  487. arg->count < arg->skip) {
  488. arg->count++;
  489. continue;
  490. }
  491. if (arg->fn(sch, i + 1, arg) < 0) {
  492. arg->stop = 1;
  493. break;
  494. }
  495. arg->count++;
  496. }
  497. }
  498. static const struct Qdisc_class_ops fq_codel_class_ops = {
  499. .leaf = fq_codel_leaf,
  500. .get = fq_codel_get,
  501. .put = fq_codel_put,
  502. .tcf_chain = fq_codel_find_tcf,
  503. .bind_tcf = fq_codel_bind,
  504. .unbind_tcf = fq_codel_put,
  505. .dump = fq_codel_dump_class,
  506. .dump_stats = fq_codel_dump_class_stats,
  507. .walk = fq_codel_walk,
  508. };
  509. static struct Qdisc_ops fq_codel_qdisc_ops __read_mostly = {
  510. .cl_ops = &fq_codel_class_ops,
  511. .id = "fq_codel",
  512. .priv_size = sizeof(struct fq_codel_sched_data),
  513. .enqueue = fq_codel_enqueue,
  514. .dequeue = fq_codel_dequeue,
  515. .peek = qdisc_peek_dequeued,
  516. .drop = fq_codel_drop,
  517. .init = fq_codel_init,
  518. .reset = fq_codel_reset,
  519. .destroy = fq_codel_destroy,
  520. .change = fq_codel_change,
  521. .dump = fq_codel_dump,
  522. .dump_stats = fq_codel_dump_stats,
  523. .owner = THIS_MODULE,
  524. };
  525. static int __init fq_codel_module_init(void)
  526. {
  527. return register_qdisc(&fq_codel_qdisc_ops);
  528. }
  529. static void __exit fq_codel_module_exit(void)
  530. {
  531. unregister_qdisc(&fq_codel_qdisc_ops);
  532. }
  533. module_init(fq_codel_module_init)
  534. module_exit(fq_codel_module_exit)
  535. MODULE_AUTHOR("Eric Dumazet");
  536. MODULE_LICENSE("GPL");