sch_qfq.c 42 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531
  1. /*
  2. * net/sched/sch_qfq.c Quick Fair Queueing Plus Scheduler.
  3. *
  4. * Copyright (c) 2009 Fabio Checconi, Luigi Rizzo, and Paolo Valente.
  5. * Copyright (c) 2012 Paolo Valente.
  6. *
  7. * This program is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU General Public License
  9. * version 2 as published by the Free Software Foundation.
  10. */
  11. #include <linux/module.h>
  12. #include <linux/init.h>
  13. #include <linux/bitops.h>
  14. #include <linux/errno.h>
  15. #include <linux/netdevice.h>
  16. #include <linux/pkt_sched.h>
  17. #include <net/sch_generic.h>
  18. #include <net/pkt_sched.h>
  19. #include <net/pkt_cls.h>
  20. /* Quick Fair Queueing Plus
  21. ========================
  22. Sources:
  23. [1] Paolo Valente,
  24. "Reducing the Execution Time of Fair-Queueing Schedulers."
  25. http://algo.ing.unimo.it/people/paolo/agg-sched/agg-sched.pdf
  26. Sources for QFQ:
  27. [2] Fabio Checconi, Luigi Rizzo, and Paolo Valente: "QFQ: Efficient
  28. Packet Scheduling with Tight Bandwidth Distribution Guarantees."
  29. See also:
  30. http://retis.sssup.it/~fabio/linux/qfq/
  31. */
  32. /*
  33. QFQ+ divides classes into aggregates of at most MAX_AGG_CLASSES
  34. classes. Each aggregate is timestamped with a virtual start time S
  35. and a virtual finish time F, and scheduled according to its
  36. timestamps. S and F are computed as a function of a system virtual
  37. time function V. The classes within each aggregate are instead
  38. scheduled with DRR.
  39. To speed up operations, QFQ+ divides also aggregates into a limited
  40. number of groups. Which group a class belongs to depends on the
  41. ratio between the maximum packet length for the class and the weight
  42. of the class. Groups have their own S and F. In the end, QFQ+
  43. schedules groups, then aggregates within groups, then classes within
  44. aggregates. See [1] and [2] for a full description.
  45. Virtual time computations.
  46. S, F and V are all computed in fixed point arithmetic with
  47. FRAC_BITS decimal bits.
  48. QFQ_MAX_INDEX is the maximum index allowed for a group. We need
  49. one bit per index.
  50. QFQ_MAX_WSHIFT is the maximum power of two supported as a weight.
  51. The layout of the bits is as below:
  52. [ MTU_SHIFT ][ FRAC_BITS ]
  53. [ MAX_INDEX ][ MIN_SLOT_SHIFT ]
  54. ^.__grp->index = 0
  55. *.__grp->slot_shift
  56. where MIN_SLOT_SHIFT is derived by difference from the others.
  57. The max group index corresponds to Lmax/w_min, where
  58. Lmax=1<<MTU_SHIFT, w_min = 1 .
  59. From this, and knowing how many groups (MAX_INDEX) we want,
  60. we can derive the shift corresponding to each group.
  61. Because we often need to compute
  62. F = S + len/w_i and V = V + len/wsum
  63. instead of storing w_i store the value
  64. inv_w = (1<<FRAC_BITS)/w_i
  65. so we can do F = S + len * inv_w * wsum.
  66. We use W_TOT in the formulas so we can easily move between
  67. static and adaptive weight sum.
  68. The per-scheduler-instance data contain all the data structures
  69. for the scheduler: bitmaps and bucket lists.
  70. */
  71. /*
  72. * Maximum number of consecutive slots occupied by backlogged classes
  73. * inside a group.
  74. */
  75. #define QFQ_MAX_SLOTS 32
  76. /*
  77. * Shifts used for aggregate<->group mapping. We allow class weights that are
  78. * in the range [1, 2^MAX_WSHIFT], and we try to map each aggregate i to the
  79. * group with the smallest index that can support the L_i / r_i configured
  80. * for the classes in the aggregate.
  81. *
  82. * grp->index is the index of the group; and grp->slot_shift
  83. * is the shift for the corresponding (scaled) sigma_i.
  84. */
  85. #define QFQ_MAX_INDEX 24
  86. #define QFQ_MAX_WSHIFT 10
  87. #define QFQ_MAX_WEIGHT (1<<QFQ_MAX_WSHIFT) /* see qfq_slot_insert */
  88. #define QFQ_MAX_WSUM (64*QFQ_MAX_WEIGHT)
  89. #define FRAC_BITS 30 /* fixed point arithmetic */
  90. #define ONE_FP (1UL << FRAC_BITS)
  91. #define QFQ_MTU_SHIFT 16 /* to support TSO/GSO */
  92. #define QFQ_MIN_LMAX 512 /* see qfq_slot_insert */
  93. #define QFQ_MAX_AGG_CLASSES 8 /* max num classes per aggregate allowed */
  94. /*
  95. * Possible group states. These values are used as indexes for the bitmaps
  96. * array of struct qfq_queue.
  97. */
  98. enum qfq_state { ER, IR, EB, IB, QFQ_MAX_STATE };
  99. struct qfq_group;
  100. struct qfq_aggregate;
  101. struct qfq_class {
  102. struct Qdisc_class_common common;
  103. unsigned int filter_cnt;
  104. struct gnet_stats_basic_packed bstats;
  105. struct gnet_stats_queue qstats;
  106. struct net_rate_estimator __rcu *rate_est;
  107. struct Qdisc *qdisc;
  108. struct list_head alist; /* Link for active-classes list. */
  109. struct qfq_aggregate *agg; /* Parent aggregate. */
  110. int deficit; /* DRR deficit counter. */
  111. };
  112. struct qfq_aggregate {
  113. struct hlist_node next; /* Link for the slot list. */
  114. u64 S, F; /* flow timestamps (exact) */
  115. /* group we belong to. In principle we would need the index,
  116. * which is log_2(lmax/weight), but we never reference it
  117. * directly, only the group.
  118. */
  119. struct qfq_group *grp;
  120. /* these are copied from the flowset. */
  121. u32 class_weight; /* Weight of each class in this aggregate. */
  122. /* Max pkt size for the classes in this aggregate, DRR quantum. */
  123. int lmax;
  124. u32 inv_w; /* ONE_FP/(sum of weights of classes in aggr.). */
  125. u32 budgetmax; /* Max budget for this aggregate. */
  126. u32 initial_budget, budget; /* Initial and current budget. */
  127. int num_classes; /* Number of classes in this aggr. */
  128. struct list_head active; /* DRR queue of active classes. */
  129. struct hlist_node nonfull_next; /* See nonfull_aggs in qfq_sched. */
  130. };
  131. struct qfq_group {
  132. u64 S, F; /* group timestamps (approx). */
  133. unsigned int slot_shift; /* Slot shift. */
  134. unsigned int index; /* Group index. */
  135. unsigned int front; /* Index of the front slot. */
  136. unsigned long full_slots; /* non-empty slots */
  137. /* Array of RR lists of active aggregates. */
  138. struct hlist_head slots[QFQ_MAX_SLOTS];
  139. };
  140. struct qfq_sched {
  141. struct tcf_proto __rcu *filter_list;
  142. struct tcf_block *block;
  143. struct Qdisc_class_hash clhash;
  144. u64 oldV, V; /* Precise virtual times. */
  145. struct qfq_aggregate *in_serv_agg; /* Aggregate being served. */
  146. u32 wsum; /* weight sum */
  147. u32 iwsum; /* inverse weight sum */
  148. unsigned long bitmaps[QFQ_MAX_STATE]; /* Group bitmaps. */
  149. struct qfq_group groups[QFQ_MAX_INDEX + 1]; /* The groups. */
  150. u32 min_slot_shift; /* Index of the group-0 bit in the bitmaps. */
  151. u32 max_agg_classes; /* Max number of classes per aggr. */
  152. struct hlist_head nonfull_aggs; /* Aggs with room for more classes. */
  153. };
  154. /*
  155. * Possible reasons why the timestamps of an aggregate are updated
  156. * enqueue: the aggregate switches from idle to active and must scheduled
  157. * for service
  158. * requeue: the aggregate finishes its budget, so it stops being served and
  159. * must be rescheduled for service
  160. */
  161. enum update_reason {enqueue, requeue};
  162. static struct qfq_class *qfq_find_class(struct Qdisc *sch, u32 classid)
  163. {
  164. struct qfq_sched *q = qdisc_priv(sch);
  165. struct Qdisc_class_common *clc;
  166. clc = qdisc_class_find(&q->clhash, classid);
  167. if (clc == NULL)
  168. return NULL;
  169. return container_of(clc, struct qfq_class, common);
  170. }
  171. static void qfq_purge_queue(struct qfq_class *cl)
  172. {
  173. unsigned int len = cl->qdisc->q.qlen;
  174. unsigned int backlog = cl->qdisc->qstats.backlog;
  175. qdisc_reset(cl->qdisc);
  176. qdisc_tree_reduce_backlog(cl->qdisc, len, backlog);
  177. }
  178. static const struct nla_policy qfq_policy[TCA_QFQ_MAX + 1] = {
  179. [TCA_QFQ_WEIGHT] = { .type = NLA_U32 },
  180. [TCA_QFQ_LMAX] = { .type = NLA_U32 },
  181. };
  182. /*
  183. * Calculate a flow index, given its weight and maximum packet length.
  184. * index = log_2(maxlen/weight) but we need to apply the scaling.
  185. * This is used only once at flow creation.
  186. */
  187. static int qfq_calc_index(u32 inv_w, unsigned int maxlen, u32 min_slot_shift)
  188. {
  189. u64 slot_size = (u64)maxlen * inv_w;
  190. unsigned long size_map;
  191. int index = 0;
  192. size_map = slot_size >> min_slot_shift;
  193. if (!size_map)
  194. goto out;
  195. index = __fls(size_map) + 1; /* basically a log_2 */
  196. index -= !(slot_size - (1ULL << (index + min_slot_shift - 1)));
  197. if (index < 0)
  198. index = 0;
  199. out:
  200. pr_debug("qfq calc_index: W = %lu, L = %u, I = %d\n",
  201. (unsigned long) ONE_FP/inv_w, maxlen, index);
  202. return index;
  203. }
  204. static void qfq_deactivate_agg(struct qfq_sched *, struct qfq_aggregate *);
  205. static void qfq_activate_agg(struct qfq_sched *, struct qfq_aggregate *,
  206. enum update_reason);
  207. static void qfq_init_agg(struct qfq_sched *q, struct qfq_aggregate *agg,
  208. u32 lmax, u32 weight)
  209. {
  210. INIT_LIST_HEAD(&agg->active);
  211. hlist_add_head(&agg->nonfull_next, &q->nonfull_aggs);
  212. agg->lmax = lmax;
  213. agg->class_weight = weight;
  214. }
  215. static struct qfq_aggregate *qfq_find_agg(struct qfq_sched *q,
  216. u32 lmax, u32 weight)
  217. {
  218. struct qfq_aggregate *agg;
  219. hlist_for_each_entry(agg, &q->nonfull_aggs, nonfull_next)
  220. if (agg->lmax == lmax && agg->class_weight == weight)
  221. return agg;
  222. return NULL;
  223. }
  224. /* Update aggregate as a function of the new number of classes. */
  225. static void qfq_update_agg(struct qfq_sched *q, struct qfq_aggregate *agg,
  226. int new_num_classes)
  227. {
  228. u32 new_agg_weight;
  229. if (new_num_classes == q->max_agg_classes)
  230. hlist_del_init(&agg->nonfull_next);
  231. if (agg->num_classes > new_num_classes &&
  232. new_num_classes == q->max_agg_classes - 1) /* agg no more full */
  233. hlist_add_head(&agg->nonfull_next, &q->nonfull_aggs);
  234. /* The next assignment may let
  235. * agg->initial_budget > agg->budgetmax
  236. * hold, we will take it into account in charge_actual_service().
  237. */
  238. agg->budgetmax = new_num_classes * agg->lmax;
  239. new_agg_weight = agg->class_weight * new_num_classes;
  240. agg->inv_w = ONE_FP/new_agg_weight;
  241. if (agg->grp == NULL) {
  242. int i = qfq_calc_index(agg->inv_w, agg->budgetmax,
  243. q->min_slot_shift);
  244. agg->grp = &q->groups[i];
  245. }
  246. q->wsum +=
  247. (int) agg->class_weight * (new_num_classes - agg->num_classes);
  248. q->iwsum = ONE_FP / q->wsum;
  249. agg->num_classes = new_num_classes;
  250. }
  251. /* Add class to aggregate. */
  252. static void qfq_add_to_agg(struct qfq_sched *q,
  253. struct qfq_aggregate *agg,
  254. struct qfq_class *cl)
  255. {
  256. cl->agg = agg;
  257. qfq_update_agg(q, agg, agg->num_classes+1);
  258. if (cl->qdisc->q.qlen > 0) { /* adding an active class */
  259. list_add_tail(&cl->alist, &agg->active);
  260. if (list_first_entry(&agg->active, struct qfq_class, alist) ==
  261. cl && q->in_serv_agg != agg) /* agg was inactive */
  262. qfq_activate_agg(q, agg, enqueue); /* schedule agg */
  263. }
  264. }
  265. static struct qfq_aggregate *qfq_choose_next_agg(struct qfq_sched *);
  266. static void qfq_destroy_agg(struct qfq_sched *q, struct qfq_aggregate *agg)
  267. {
  268. hlist_del_init(&agg->nonfull_next);
  269. q->wsum -= agg->class_weight;
  270. if (q->wsum != 0)
  271. q->iwsum = ONE_FP / q->wsum;
  272. if (q->in_serv_agg == agg)
  273. q->in_serv_agg = qfq_choose_next_agg(q);
  274. kfree(agg);
  275. }
  276. /* Deschedule class from within its parent aggregate. */
  277. static void qfq_deactivate_class(struct qfq_sched *q, struct qfq_class *cl)
  278. {
  279. struct qfq_aggregate *agg = cl->agg;
  280. list_del(&cl->alist); /* remove from RR queue of the aggregate */
  281. if (list_empty(&agg->active)) /* agg is now inactive */
  282. qfq_deactivate_agg(q, agg);
  283. }
  284. /* Remove class from its parent aggregate. */
  285. static void qfq_rm_from_agg(struct qfq_sched *q, struct qfq_class *cl)
  286. {
  287. struct qfq_aggregate *agg = cl->agg;
  288. cl->agg = NULL;
  289. if (agg->num_classes == 1) { /* agg being emptied, destroy it */
  290. qfq_destroy_agg(q, agg);
  291. return;
  292. }
  293. qfq_update_agg(q, agg, agg->num_classes-1);
  294. }
  295. /* Deschedule class and remove it from its parent aggregate. */
  296. static void qfq_deact_rm_from_agg(struct qfq_sched *q, struct qfq_class *cl)
  297. {
  298. if (cl->qdisc->q.qlen > 0) /* class is active */
  299. qfq_deactivate_class(q, cl);
  300. qfq_rm_from_agg(q, cl);
  301. }
  302. /* Move class to a new aggregate, matching the new class weight and/or lmax */
  303. static int qfq_change_agg(struct Qdisc *sch, struct qfq_class *cl, u32 weight,
  304. u32 lmax)
  305. {
  306. struct qfq_sched *q = qdisc_priv(sch);
  307. struct qfq_aggregate *new_agg = qfq_find_agg(q, lmax, weight);
  308. if (new_agg == NULL) { /* create new aggregate */
  309. new_agg = kzalloc(sizeof(*new_agg), GFP_ATOMIC);
  310. if (new_agg == NULL)
  311. return -ENOBUFS;
  312. qfq_init_agg(q, new_agg, lmax, weight);
  313. }
  314. qfq_deact_rm_from_agg(q, cl);
  315. qfq_add_to_agg(q, new_agg, cl);
  316. return 0;
  317. }
  318. static int qfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
  319. struct nlattr **tca, unsigned long *arg)
  320. {
  321. struct qfq_sched *q = qdisc_priv(sch);
  322. struct qfq_class *cl = (struct qfq_class *)*arg;
  323. bool existing = false;
  324. struct nlattr *tb[TCA_QFQ_MAX + 1];
  325. struct qfq_aggregate *new_agg = NULL;
  326. u32 weight, lmax, inv_w;
  327. int err;
  328. int delta_w;
  329. if (tca[TCA_OPTIONS] == NULL) {
  330. pr_notice("qfq: no options\n");
  331. return -EINVAL;
  332. }
  333. err = nla_parse_nested(tb, TCA_QFQ_MAX, tca[TCA_OPTIONS], qfq_policy,
  334. NULL);
  335. if (err < 0)
  336. return err;
  337. if (tb[TCA_QFQ_WEIGHT]) {
  338. weight = nla_get_u32(tb[TCA_QFQ_WEIGHT]);
  339. if (!weight || weight > (1UL << QFQ_MAX_WSHIFT)) {
  340. pr_notice("qfq: invalid weight %u\n", weight);
  341. return -EINVAL;
  342. }
  343. } else
  344. weight = 1;
  345. if (tb[TCA_QFQ_LMAX]) {
  346. lmax = nla_get_u32(tb[TCA_QFQ_LMAX]);
  347. if (lmax < QFQ_MIN_LMAX || lmax > (1UL << QFQ_MTU_SHIFT)) {
  348. pr_notice("qfq: invalid max length %u\n", lmax);
  349. return -EINVAL;
  350. }
  351. } else
  352. lmax = psched_mtu(qdisc_dev(sch));
  353. inv_w = ONE_FP / weight;
  354. weight = ONE_FP / inv_w;
  355. if (cl != NULL &&
  356. lmax == cl->agg->lmax &&
  357. weight == cl->agg->class_weight)
  358. return 0; /* nothing to change */
  359. delta_w = weight - (cl ? cl->agg->class_weight : 0);
  360. if (q->wsum + delta_w > QFQ_MAX_WSUM) {
  361. pr_notice("qfq: total weight out of range (%d + %u)\n",
  362. delta_w, q->wsum);
  363. return -EINVAL;
  364. }
  365. if (cl != NULL) { /* modify existing class */
  366. if (tca[TCA_RATE]) {
  367. err = gen_replace_estimator(&cl->bstats, NULL,
  368. &cl->rate_est,
  369. NULL,
  370. qdisc_root_sleeping_running(sch),
  371. tca[TCA_RATE]);
  372. if (err)
  373. return err;
  374. }
  375. existing = true;
  376. goto set_change_agg;
  377. }
  378. /* create and init new class */
  379. cl = kzalloc(sizeof(struct qfq_class), GFP_KERNEL);
  380. if (cl == NULL)
  381. return -ENOBUFS;
  382. cl->common.classid = classid;
  383. cl->deficit = lmax;
  384. cl->qdisc = qdisc_create_dflt(sch->dev_queue,
  385. &pfifo_qdisc_ops, classid);
  386. if (cl->qdisc == NULL)
  387. cl->qdisc = &noop_qdisc;
  388. if (tca[TCA_RATE]) {
  389. err = gen_new_estimator(&cl->bstats, NULL,
  390. &cl->rate_est,
  391. NULL,
  392. qdisc_root_sleeping_running(sch),
  393. tca[TCA_RATE]);
  394. if (err)
  395. goto destroy_class;
  396. }
  397. if (cl->qdisc != &noop_qdisc)
  398. qdisc_hash_add(cl->qdisc, true);
  399. sch_tree_lock(sch);
  400. qdisc_class_hash_insert(&q->clhash, &cl->common);
  401. sch_tree_unlock(sch);
  402. qdisc_class_hash_grow(sch, &q->clhash);
  403. set_change_agg:
  404. sch_tree_lock(sch);
  405. new_agg = qfq_find_agg(q, lmax, weight);
  406. if (new_agg == NULL) { /* create new aggregate */
  407. sch_tree_unlock(sch);
  408. new_agg = kzalloc(sizeof(*new_agg), GFP_KERNEL);
  409. if (new_agg == NULL) {
  410. err = -ENOBUFS;
  411. gen_kill_estimator(&cl->rate_est);
  412. goto destroy_class;
  413. }
  414. sch_tree_lock(sch);
  415. qfq_init_agg(q, new_agg, lmax, weight);
  416. }
  417. if (existing)
  418. qfq_deact_rm_from_agg(q, cl);
  419. qfq_add_to_agg(q, new_agg, cl);
  420. sch_tree_unlock(sch);
  421. *arg = (unsigned long)cl;
  422. return 0;
  423. destroy_class:
  424. qdisc_destroy(cl->qdisc);
  425. kfree(cl);
  426. return err;
  427. }
  428. static void qfq_destroy_class(struct Qdisc *sch, struct qfq_class *cl)
  429. {
  430. struct qfq_sched *q = qdisc_priv(sch);
  431. qfq_rm_from_agg(q, cl);
  432. gen_kill_estimator(&cl->rate_est);
  433. qdisc_destroy(cl->qdisc);
  434. kfree(cl);
  435. }
  436. static int qfq_delete_class(struct Qdisc *sch, unsigned long arg)
  437. {
  438. struct qfq_sched *q = qdisc_priv(sch);
  439. struct qfq_class *cl = (struct qfq_class *)arg;
  440. if (cl->filter_cnt > 0)
  441. return -EBUSY;
  442. sch_tree_lock(sch);
  443. qfq_purge_queue(cl);
  444. qdisc_class_hash_remove(&q->clhash, &cl->common);
  445. sch_tree_unlock(sch);
  446. qfq_destroy_class(sch, cl);
  447. return 0;
  448. }
  449. static unsigned long qfq_search_class(struct Qdisc *sch, u32 classid)
  450. {
  451. return (unsigned long)qfq_find_class(sch, classid);
  452. }
  453. static struct tcf_block *qfq_tcf_block(struct Qdisc *sch, unsigned long cl)
  454. {
  455. struct qfq_sched *q = qdisc_priv(sch);
  456. if (cl)
  457. return NULL;
  458. return q->block;
  459. }
  460. static unsigned long qfq_bind_tcf(struct Qdisc *sch, unsigned long parent,
  461. u32 classid)
  462. {
  463. struct qfq_class *cl = qfq_find_class(sch, classid);
  464. if (cl != NULL)
  465. cl->filter_cnt++;
  466. return (unsigned long)cl;
  467. }
  468. static void qfq_unbind_tcf(struct Qdisc *sch, unsigned long arg)
  469. {
  470. struct qfq_class *cl = (struct qfq_class *)arg;
  471. cl->filter_cnt--;
  472. }
  473. static int qfq_graft_class(struct Qdisc *sch, unsigned long arg,
  474. struct Qdisc *new, struct Qdisc **old)
  475. {
  476. struct qfq_class *cl = (struct qfq_class *)arg;
  477. if (new == NULL) {
  478. new = qdisc_create_dflt(sch->dev_queue,
  479. &pfifo_qdisc_ops, cl->common.classid);
  480. if (new == NULL)
  481. new = &noop_qdisc;
  482. }
  483. *old = qdisc_replace(sch, new, &cl->qdisc);
  484. return 0;
  485. }
  486. static struct Qdisc *qfq_class_leaf(struct Qdisc *sch, unsigned long arg)
  487. {
  488. struct qfq_class *cl = (struct qfq_class *)arg;
  489. return cl->qdisc;
  490. }
  491. static int qfq_dump_class(struct Qdisc *sch, unsigned long arg,
  492. struct sk_buff *skb, struct tcmsg *tcm)
  493. {
  494. struct qfq_class *cl = (struct qfq_class *)arg;
  495. struct nlattr *nest;
  496. tcm->tcm_parent = TC_H_ROOT;
  497. tcm->tcm_handle = cl->common.classid;
  498. tcm->tcm_info = cl->qdisc->handle;
  499. nest = nla_nest_start(skb, TCA_OPTIONS);
  500. if (nest == NULL)
  501. goto nla_put_failure;
  502. if (nla_put_u32(skb, TCA_QFQ_WEIGHT, cl->agg->class_weight) ||
  503. nla_put_u32(skb, TCA_QFQ_LMAX, cl->agg->lmax))
  504. goto nla_put_failure;
  505. return nla_nest_end(skb, nest);
  506. nla_put_failure:
  507. nla_nest_cancel(skb, nest);
  508. return -EMSGSIZE;
  509. }
  510. static int qfq_dump_class_stats(struct Qdisc *sch, unsigned long arg,
  511. struct gnet_dump *d)
  512. {
  513. struct qfq_class *cl = (struct qfq_class *)arg;
  514. struct tc_qfq_stats xstats;
  515. memset(&xstats, 0, sizeof(xstats));
  516. xstats.weight = cl->agg->class_weight;
  517. xstats.lmax = cl->agg->lmax;
  518. if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch),
  519. d, NULL, &cl->bstats) < 0 ||
  520. gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 ||
  521. gnet_stats_copy_queue(d, NULL,
  522. &cl->qdisc->qstats, cl->qdisc->q.qlen) < 0)
  523. return -1;
  524. return gnet_stats_copy_app(d, &xstats, sizeof(xstats));
  525. }
  526. static void qfq_walk(struct Qdisc *sch, struct qdisc_walker *arg)
  527. {
  528. struct qfq_sched *q = qdisc_priv(sch);
  529. struct qfq_class *cl;
  530. unsigned int i;
  531. if (arg->stop)
  532. return;
  533. for (i = 0; i < q->clhash.hashsize; i++) {
  534. hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) {
  535. if (arg->count < arg->skip) {
  536. arg->count++;
  537. continue;
  538. }
  539. if (arg->fn(sch, (unsigned long)cl, arg) < 0) {
  540. arg->stop = 1;
  541. return;
  542. }
  543. arg->count++;
  544. }
  545. }
  546. }
  547. static struct qfq_class *qfq_classify(struct sk_buff *skb, struct Qdisc *sch,
  548. int *qerr)
  549. {
  550. struct qfq_sched *q = qdisc_priv(sch);
  551. struct qfq_class *cl;
  552. struct tcf_result res;
  553. struct tcf_proto *fl;
  554. int result;
  555. if (TC_H_MAJ(skb->priority ^ sch->handle) == 0) {
  556. pr_debug("qfq_classify: found %d\n", skb->priority);
  557. cl = qfq_find_class(sch, skb->priority);
  558. if (cl != NULL)
  559. return cl;
  560. }
  561. *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
  562. fl = rcu_dereference_bh(q->filter_list);
  563. result = tcf_classify(skb, fl, &res, false);
  564. if (result >= 0) {
  565. #ifdef CONFIG_NET_CLS_ACT
  566. switch (result) {
  567. case TC_ACT_QUEUED:
  568. case TC_ACT_STOLEN:
  569. case TC_ACT_TRAP:
  570. *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
  571. case TC_ACT_SHOT:
  572. return NULL;
  573. }
  574. #endif
  575. cl = (struct qfq_class *)res.class;
  576. if (cl == NULL)
  577. cl = qfq_find_class(sch, res.classid);
  578. return cl;
  579. }
  580. return NULL;
  581. }
  582. /* Generic comparison function, handling wraparound. */
  583. static inline int qfq_gt(u64 a, u64 b)
  584. {
  585. return (s64)(a - b) > 0;
  586. }
  587. /* Round a precise timestamp to its slotted value. */
  588. static inline u64 qfq_round_down(u64 ts, unsigned int shift)
  589. {
  590. return ts & ~((1ULL << shift) - 1);
  591. }
  592. /* return the pointer to the group with lowest index in the bitmap */
  593. static inline struct qfq_group *qfq_ffs(struct qfq_sched *q,
  594. unsigned long bitmap)
  595. {
  596. int index = __ffs(bitmap);
  597. return &q->groups[index];
  598. }
  599. /* Calculate a mask to mimic what would be ffs_from(). */
  600. static inline unsigned long mask_from(unsigned long bitmap, int from)
  601. {
  602. return bitmap & ~((1UL << from) - 1);
  603. }
  604. /*
  605. * The state computation relies on ER=0, IR=1, EB=2, IB=3
  606. * First compute eligibility comparing grp->S, q->V,
  607. * then check if someone is blocking us and possibly add EB
  608. */
  609. static int qfq_calc_state(struct qfq_sched *q, const struct qfq_group *grp)
  610. {
  611. /* if S > V we are not eligible */
  612. unsigned int state = qfq_gt(grp->S, q->V);
  613. unsigned long mask = mask_from(q->bitmaps[ER], grp->index);
  614. struct qfq_group *next;
  615. if (mask) {
  616. next = qfq_ffs(q, mask);
  617. if (qfq_gt(grp->F, next->F))
  618. state |= EB;
  619. }
  620. return state;
  621. }
  622. /*
  623. * In principle
  624. * q->bitmaps[dst] |= q->bitmaps[src] & mask;
  625. * q->bitmaps[src] &= ~mask;
  626. * but we should make sure that src != dst
  627. */
  628. static inline void qfq_move_groups(struct qfq_sched *q, unsigned long mask,
  629. int src, int dst)
  630. {
  631. q->bitmaps[dst] |= q->bitmaps[src] & mask;
  632. q->bitmaps[src] &= ~mask;
  633. }
  634. static void qfq_unblock_groups(struct qfq_sched *q, int index, u64 old_F)
  635. {
  636. unsigned long mask = mask_from(q->bitmaps[ER], index + 1);
  637. struct qfq_group *next;
  638. if (mask) {
  639. next = qfq_ffs(q, mask);
  640. if (!qfq_gt(next->F, old_F))
  641. return;
  642. }
  643. mask = (1UL << index) - 1;
  644. qfq_move_groups(q, mask, EB, ER);
  645. qfq_move_groups(q, mask, IB, IR);
  646. }
  647. /*
  648. * perhaps
  649. *
  650. old_V ^= q->V;
  651. old_V >>= q->min_slot_shift;
  652. if (old_V) {
  653. ...
  654. }
  655. *
  656. */
  657. static void qfq_make_eligible(struct qfq_sched *q)
  658. {
  659. unsigned long vslot = q->V >> q->min_slot_shift;
  660. unsigned long old_vslot = q->oldV >> q->min_slot_shift;
  661. if (vslot != old_vslot) {
  662. unsigned long mask;
  663. int last_flip_pos = fls(vslot ^ old_vslot);
  664. if (last_flip_pos > 31) /* higher than the number of groups */
  665. mask = ~0UL; /* make all groups eligible */
  666. else
  667. mask = (1UL << last_flip_pos) - 1;
  668. qfq_move_groups(q, mask, IR, ER);
  669. qfq_move_groups(q, mask, IB, EB);
  670. }
  671. }
  672. /*
  673. * The index of the slot in which the input aggregate agg is to be
  674. * inserted must not be higher than QFQ_MAX_SLOTS-2. There is a '-2'
  675. * and not a '-1' because the start time of the group may be moved
  676. * backward by one slot after the aggregate has been inserted, and
  677. * this would cause non-empty slots to be right-shifted by one
  678. * position.
  679. *
  680. * QFQ+ fully satisfies this bound to the slot index if the parameters
  681. * of the classes are not changed dynamically, and if QFQ+ never
  682. * happens to postpone the service of agg unjustly, i.e., it never
  683. * happens that the aggregate becomes backlogged and eligible, or just
  684. * eligible, while an aggregate with a higher approximated finish time
  685. * is being served. In particular, in this case QFQ+ guarantees that
  686. * the timestamps of agg are low enough that the slot index is never
  687. * higher than 2. Unfortunately, QFQ+ cannot provide the same
  688. * guarantee if it happens to unjustly postpone the service of agg, or
  689. * if the parameters of some class are changed.
  690. *
  691. * As for the first event, i.e., an out-of-order service, the
  692. * upper bound to the slot index guaranteed by QFQ+ grows to
  693. * 2 +
  694. * QFQ_MAX_AGG_CLASSES * ((1<<QFQ_MTU_SHIFT)/QFQ_MIN_LMAX) *
  695. * (current_max_weight/current_wsum) <= 2 + 8 * 128 * 1.
  696. *
  697. * The following function deals with this problem by backward-shifting
  698. * the timestamps of agg, if needed, so as to guarantee that the slot
  699. * index is never higher than QFQ_MAX_SLOTS-2. This backward-shift may
  700. * cause the service of other aggregates to be postponed, yet the
  701. * worst-case guarantees of these aggregates are not violated. In
  702. * fact, in case of no out-of-order service, the timestamps of agg
  703. * would have been even lower than they are after the backward shift,
  704. * because QFQ+ would have guaranteed a maximum value equal to 2 for
  705. * the slot index, and 2 < QFQ_MAX_SLOTS-2. Hence the aggregates whose
  706. * service is postponed because of the backward-shift would have
  707. * however waited for the service of agg before being served.
  708. *
  709. * The other event that may cause the slot index to be higher than 2
  710. * for agg is a recent change of the parameters of some class. If the
  711. * weight of a class is increased or the lmax (max_pkt_size) of the
  712. * class is decreased, then a new aggregate with smaller slot size
  713. * than the original parent aggregate of the class may happen to be
  714. * activated. The activation of this aggregate should be properly
  715. * delayed to when the service of the class has finished in the ideal
  716. * system tracked by QFQ+. If the activation of the aggregate is not
  717. * delayed to this reference time instant, then this aggregate may be
  718. * unjustly served before other aggregates waiting for service. This
  719. * may cause the above bound to the slot index to be violated for some
  720. * of these unlucky aggregates.
  721. *
  722. * Instead of delaying the activation of the new aggregate, which is
  723. * quite complex, the above-discussed capping of the slot index is
  724. * used to handle also the consequences of a change of the parameters
  725. * of a class.
  726. */
  727. static void qfq_slot_insert(struct qfq_group *grp, struct qfq_aggregate *agg,
  728. u64 roundedS)
  729. {
  730. u64 slot = (roundedS - grp->S) >> grp->slot_shift;
  731. unsigned int i; /* slot index in the bucket list */
  732. if (unlikely(slot > QFQ_MAX_SLOTS - 2)) {
  733. u64 deltaS = roundedS - grp->S -
  734. ((u64)(QFQ_MAX_SLOTS - 2)<<grp->slot_shift);
  735. agg->S -= deltaS;
  736. agg->F -= deltaS;
  737. slot = QFQ_MAX_SLOTS - 2;
  738. }
  739. i = (grp->front + slot) % QFQ_MAX_SLOTS;
  740. hlist_add_head(&agg->next, &grp->slots[i]);
  741. __set_bit(slot, &grp->full_slots);
  742. }
  743. /* Maybe introduce hlist_first_entry?? */
  744. static struct qfq_aggregate *qfq_slot_head(struct qfq_group *grp)
  745. {
  746. return hlist_entry(grp->slots[grp->front].first,
  747. struct qfq_aggregate, next);
  748. }
  749. /*
  750. * remove the entry from the slot
  751. */
  752. static void qfq_front_slot_remove(struct qfq_group *grp)
  753. {
  754. struct qfq_aggregate *agg = qfq_slot_head(grp);
  755. BUG_ON(!agg);
  756. hlist_del(&agg->next);
  757. if (hlist_empty(&grp->slots[grp->front]))
  758. __clear_bit(0, &grp->full_slots);
  759. }
  760. /*
  761. * Returns the first aggregate in the first non-empty bucket of the
  762. * group. As a side effect, adjusts the bucket list so the first
  763. * non-empty bucket is at position 0 in full_slots.
  764. */
  765. static struct qfq_aggregate *qfq_slot_scan(struct qfq_group *grp)
  766. {
  767. unsigned int i;
  768. pr_debug("qfq slot_scan: grp %u full %#lx\n",
  769. grp->index, grp->full_slots);
  770. if (grp->full_slots == 0)
  771. return NULL;
  772. i = __ffs(grp->full_slots); /* zero based */
  773. if (i > 0) {
  774. grp->front = (grp->front + i) % QFQ_MAX_SLOTS;
  775. grp->full_slots >>= i;
  776. }
  777. return qfq_slot_head(grp);
  778. }
  779. /*
  780. * adjust the bucket list. When the start time of a group decreases,
  781. * we move the index down (modulo QFQ_MAX_SLOTS) so we don't need to
  782. * move the objects. The mask of occupied slots must be shifted
  783. * because we use ffs() to find the first non-empty slot.
  784. * This covers decreases in the group's start time, but what about
  785. * increases of the start time ?
  786. * Here too we should make sure that i is less than 32
  787. */
  788. static void qfq_slot_rotate(struct qfq_group *grp, u64 roundedS)
  789. {
  790. unsigned int i = (grp->S - roundedS) >> grp->slot_shift;
  791. grp->full_slots <<= i;
  792. grp->front = (grp->front - i) % QFQ_MAX_SLOTS;
  793. }
  794. static void qfq_update_eligible(struct qfq_sched *q)
  795. {
  796. struct qfq_group *grp;
  797. unsigned long ineligible;
  798. ineligible = q->bitmaps[IR] | q->bitmaps[IB];
  799. if (ineligible) {
  800. if (!q->bitmaps[ER]) {
  801. grp = qfq_ffs(q, ineligible);
  802. if (qfq_gt(grp->S, q->V))
  803. q->V = grp->S;
  804. }
  805. qfq_make_eligible(q);
  806. }
  807. }
  808. /* Dequeue head packet of the head class in the DRR queue of the aggregate. */
  809. static void agg_dequeue(struct qfq_aggregate *agg,
  810. struct qfq_class *cl, unsigned int len)
  811. {
  812. qdisc_dequeue_peeked(cl->qdisc);
  813. cl->deficit -= (int) len;
  814. if (cl->qdisc->q.qlen == 0) /* no more packets, remove from list */
  815. list_del(&cl->alist);
  816. else if (cl->deficit < qdisc_pkt_len(cl->qdisc->ops->peek(cl->qdisc))) {
  817. cl->deficit += agg->lmax;
  818. list_move_tail(&cl->alist, &agg->active);
  819. }
  820. }
  821. static inline struct sk_buff *qfq_peek_skb(struct qfq_aggregate *agg,
  822. struct qfq_class **cl,
  823. unsigned int *len)
  824. {
  825. struct sk_buff *skb;
  826. *cl = list_first_entry(&agg->active, struct qfq_class, alist);
  827. skb = (*cl)->qdisc->ops->peek((*cl)->qdisc);
  828. if (skb == NULL)
  829. WARN_ONCE(1, "qfq_dequeue: non-workconserving leaf\n");
  830. else
  831. *len = qdisc_pkt_len(skb);
  832. return skb;
  833. }
  834. /* Update F according to the actual service received by the aggregate. */
  835. static inline void charge_actual_service(struct qfq_aggregate *agg)
  836. {
  837. /* Compute the service received by the aggregate, taking into
  838. * account that, after decreasing the number of classes in
  839. * agg, it may happen that
  840. * agg->initial_budget - agg->budget > agg->bugdetmax
  841. */
  842. u32 service_received = min(agg->budgetmax,
  843. agg->initial_budget - agg->budget);
  844. agg->F = agg->S + (u64)service_received * agg->inv_w;
  845. }
  846. /* Assign a reasonable start time for a new aggregate in group i.
  847. * Admissible values for \hat(F) are multiples of \sigma_i
  848. * no greater than V+\sigma_i . Larger values mean that
  849. * we had a wraparound so we consider the timestamp to be stale.
  850. *
  851. * If F is not stale and F >= V then we set S = F.
  852. * Otherwise we should assign S = V, but this may violate
  853. * the ordering in EB (see [2]). So, if we have groups in ER,
  854. * set S to the F_j of the first group j which would be blocking us.
  855. * We are guaranteed not to move S backward because
  856. * otherwise our group i would still be blocked.
  857. */
  858. static void qfq_update_start(struct qfq_sched *q, struct qfq_aggregate *agg)
  859. {
  860. unsigned long mask;
  861. u64 limit, roundedF;
  862. int slot_shift = agg->grp->slot_shift;
  863. roundedF = qfq_round_down(agg->F, slot_shift);
  864. limit = qfq_round_down(q->V, slot_shift) + (1ULL << slot_shift);
  865. if (!qfq_gt(agg->F, q->V) || qfq_gt(roundedF, limit)) {
  866. /* timestamp was stale */
  867. mask = mask_from(q->bitmaps[ER], agg->grp->index);
  868. if (mask) {
  869. struct qfq_group *next = qfq_ffs(q, mask);
  870. if (qfq_gt(roundedF, next->F)) {
  871. if (qfq_gt(limit, next->F))
  872. agg->S = next->F;
  873. else /* preserve timestamp correctness */
  874. agg->S = limit;
  875. return;
  876. }
  877. }
  878. agg->S = q->V;
  879. } else /* timestamp is not stale */
  880. agg->S = agg->F;
  881. }
  882. /* Update the timestamps of agg before scheduling/rescheduling it for
  883. * service. In particular, assign to agg->F its maximum possible
  884. * value, i.e., the virtual finish time with which the aggregate
  885. * should be labeled if it used all its budget once in service.
  886. */
  887. static inline void
  888. qfq_update_agg_ts(struct qfq_sched *q,
  889. struct qfq_aggregate *agg, enum update_reason reason)
  890. {
  891. if (reason != requeue)
  892. qfq_update_start(q, agg);
  893. else /* just charge agg for the service received */
  894. agg->S = agg->F;
  895. agg->F = agg->S + (u64)agg->budgetmax * agg->inv_w;
  896. }
  897. static void qfq_schedule_agg(struct qfq_sched *q, struct qfq_aggregate *agg);
  898. static struct sk_buff *qfq_dequeue(struct Qdisc *sch)
  899. {
  900. struct qfq_sched *q = qdisc_priv(sch);
  901. struct qfq_aggregate *in_serv_agg = q->in_serv_agg;
  902. struct qfq_class *cl;
  903. struct sk_buff *skb = NULL;
  904. /* next-packet len, 0 means no more active classes in in-service agg */
  905. unsigned int len = 0;
  906. if (in_serv_agg == NULL)
  907. return NULL;
  908. if (!list_empty(&in_serv_agg->active))
  909. skb = qfq_peek_skb(in_serv_agg, &cl, &len);
  910. /*
  911. * If there are no active classes in the in-service aggregate,
  912. * or if the aggregate has not enough budget to serve its next
  913. * class, then choose the next aggregate to serve.
  914. */
  915. if (len == 0 || in_serv_agg->budget < len) {
  916. charge_actual_service(in_serv_agg);
  917. /* recharge the budget of the aggregate */
  918. in_serv_agg->initial_budget = in_serv_agg->budget =
  919. in_serv_agg->budgetmax;
  920. if (!list_empty(&in_serv_agg->active)) {
  921. /*
  922. * Still active: reschedule for
  923. * service. Possible optimization: if no other
  924. * aggregate is active, then there is no point
  925. * in rescheduling this aggregate, and we can
  926. * just keep it as the in-service one. This
  927. * should be however a corner case, and to
  928. * handle it, we would need to maintain an
  929. * extra num_active_aggs field.
  930. */
  931. qfq_update_agg_ts(q, in_serv_agg, requeue);
  932. qfq_schedule_agg(q, in_serv_agg);
  933. } else if (sch->q.qlen == 0) { /* no aggregate to serve */
  934. q->in_serv_agg = NULL;
  935. return NULL;
  936. }
  937. /*
  938. * If we get here, there are other aggregates queued:
  939. * choose the new aggregate to serve.
  940. */
  941. in_serv_agg = q->in_serv_agg = qfq_choose_next_agg(q);
  942. skb = qfq_peek_skb(in_serv_agg, &cl, &len);
  943. }
  944. if (!skb)
  945. return NULL;
  946. qdisc_qstats_backlog_dec(sch, skb);
  947. sch->q.qlen--;
  948. qdisc_bstats_update(sch, skb);
  949. agg_dequeue(in_serv_agg, cl, len);
  950. /* If lmax is lowered, through qfq_change_class, for a class
  951. * owning pending packets with larger size than the new value
  952. * of lmax, then the following condition may hold.
  953. */
  954. if (unlikely(in_serv_agg->budget < len))
  955. in_serv_agg->budget = 0;
  956. else
  957. in_serv_agg->budget -= len;
  958. q->V += (u64)len * q->iwsum;
  959. pr_debug("qfq dequeue: len %u F %lld now %lld\n",
  960. len, (unsigned long long) in_serv_agg->F,
  961. (unsigned long long) q->V);
  962. return skb;
  963. }
  964. static struct qfq_aggregate *qfq_choose_next_agg(struct qfq_sched *q)
  965. {
  966. struct qfq_group *grp;
  967. struct qfq_aggregate *agg, *new_front_agg;
  968. u64 old_F;
  969. qfq_update_eligible(q);
  970. q->oldV = q->V;
  971. if (!q->bitmaps[ER])
  972. return NULL;
  973. grp = qfq_ffs(q, q->bitmaps[ER]);
  974. old_F = grp->F;
  975. agg = qfq_slot_head(grp);
  976. /* agg starts to be served, remove it from schedule */
  977. qfq_front_slot_remove(grp);
  978. new_front_agg = qfq_slot_scan(grp);
  979. if (new_front_agg == NULL) /* group is now inactive, remove from ER */
  980. __clear_bit(grp->index, &q->bitmaps[ER]);
  981. else {
  982. u64 roundedS = qfq_round_down(new_front_agg->S,
  983. grp->slot_shift);
  984. unsigned int s;
  985. if (grp->S == roundedS)
  986. return agg;
  987. grp->S = roundedS;
  988. grp->F = roundedS + (2ULL << grp->slot_shift);
  989. __clear_bit(grp->index, &q->bitmaps[ER]);
  990. s = qfq_calc_state(q, grp);
  991. __set_bit(grp->index, &q->bitmaps[s]);
  992. }
  993. qfq_unblock_groups(q, grp->index, old_F);
  994. return agg;
  995. }
  996. static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch,
  997. struct sk_buff **to_free)
  998. {
  999. struct qfq_sched *q = qdisc_priv(sch);
  1000. struct qfq_class *cl;
  1001. struct qfq_aggregate *agg;
  1002. int err = 0;
  1003. cl = qfq_classify(skb, sch, &err);
  1004. if (cl == NULL) {
  1005. if (err & __NET_XMIT_BYPASS)
  1006. qdisc_qstats_drop(sch);
  1007. __qdisc_drop(skb, to_free);
  1008. return err;
  1009. }
  1010. pr_debug("qfq_enqueue: cl = %x\n", cl->common.classid);
  1011. if (unlikely(cl->agg->lmax < qdisc_pkt_len(skb))) {
  1012. pr_debug("qfq: increasing maxpkt from %u to %u for class %u",
  1013. cl->agg->lmax, qdisc_pkt_len(skb), cl->common.classid);
  1014. err = qfq_change_agg(sch, cl, cl->agg->class_weight,
  1015. qdisc_pkt_len(skb));
  1016. if (err) {
  1017. cl->qstats.drops++;
  1018. return qdisc_drop(skb, sch, to_free);
  1019. }
  1020. }
  1021. err = qdisc_enqueue(skb, cl->qdisc, to_free);
  1022. if (unlikely(err != NET_XMIT_SUCCESS)) {
  1023. pr_debug("qfq_enqueue: enqueue failed %d\n", err);
  1024. if (net_xmit_drop_count(err)) {
  1025. cl->qstats.drops++;
  1026. qdisc_qstats_drop(sch);
  1027. }
  1028. return err;
  1029. }
  1030. bstats_update(&cl->bstats, skb);
  1031. qdisc_qstats_backlog_inc(sch, skb);
  1032. ++sch->q.qlen;
  1033. agg = cl->agg;
  1034. /* if the queue was not empty, then done here */
  1035. if (cl->qdisc->q.qlen != 1) {
  1036. if (unlikely(skb == cl->qdisc->ops->peek(cl->qdisc)) &&
  1037. list_first_entry(&agg->active, struct qfq_class, alist)
  1038. == cl && cl->deficit < qdisc_pkt_len(skb))
  1039. list_move_tail(&cl->alist, &agg->active);
  1040. return err;
  1041. }
  1042. /* schedule class for service within the aggregate */
  1043. cl->deficit = agg->lmax;
  1044. list_add_tail(&cl->alist, &agg->active);
  1045. if (list_first_entry(&agg->active, struct qfq_class, alist) != cl ||
  1046. q->in_serv_agg == agg)
  1047. return err; /* non-empty or in service, nothing else to do */
  1048. qfq_activate_agg(q, agg, enqueue);
  1049. return err;
  1050. }
  1051. /*
  1052. * Schedule aggregate according to its timestamps.
  1053. */
  1054. static void qfq_schedule_agg(struct qfq_sched *q, struct qfq_aggregate *agg)
  1055. {
  1056. struct qfq_group *grp = agg->grp;
  1057. u64 roundedS;
  1058. int s;
  1059. roundedS = qfq_round_down(agg->S, grp->slot_shift);
  1060. /*
  1061. * Insert agg in the correct bucket.
  1062. * If agg->S >= grp->S we don't need to adjust the
  1063. * bucket list and simply go to the insertion phase.
  1064. * Otherwise grp->S is decreasing, we must make room
  1065. * in the bucket list, and also recompute the group state.
  1066. * Finally, if there were no flows in this group and nobody
  1067. * was in ER make sure to adjust V.
  1068. */
  1069. if (grp->full_slots) {
  1070. if (!qfq_gt(grp->S, agg->S))
  1071. goto skip_update;
  1072. /* create a slot for this agg->S */
  1073. qfq_slot_rotate(grp, roundedS);
  1074. /* group was surely ineligible, remove */
  1075. __clear_bit(grp->index, &q->bitmaps[IR]);
  1076. __clear_bit(grp->index, &q->bitmaps[IB]);
  1077. } else if (!q->bitmaps[ER] && qfq_gt(roundedS, q->V) &&
  1078. q->in_serv_agg == NULL)
  1079. q->V = roundedS;
  1080. grp->S = roundedS;
  1081. grp->F = roundedS + (2ULL << grp->slot_shift);
  1082. s = qfq_calc_state(q, grp);
  1083. __set_bit(grp->index, &q->bitmaps[s]);
  1084. pr_debug("qfq enqueue: new state %d %#lx S %lld F %lld V %lld\n",
  1085. s, q->bitmaps[s],
  1086. (unsigned long long) agg->S,
  1087. (unsigned long long) agg->F,
  1088. (unsigned long long) q->V);
  1089. skip_update:
  1090. qfq_slot_insert(grp, agg, roundedS);
  1091. }
  1092. /* Update agg ts and schedule agg for service */
  1093. static void qfq_activate_agg(struct qfq_sched *q, struct qfq_aggregate *agg,
  1094. enum update_reason reason)
  1095. {
  1096. agg->initial_budget = agg->budget = agg->budgetmax; /* recharge budg. */
  1097. qfq_update_agg_ts(q, agg, reason);
  1098. if (q->in_serv_agg == NULL) { /* no aggr. in service or scheduled */
  1099. q->in_serv_agg = agg; /* start serving this aggregate */
  1100. /* update V: to be in service, agg must be eligible */
  1101. q->oldV = q->V = agg->S;
  1102. } else if (agg != q->in_serv_agg)
  1103. qfq_schedule_agg(q, agg);
  1104. }
  1105. static void qfq_slot_remove(struct qfq_sched *q, struct qfq_group *grp,
  1106. struct qfq_aggregate *agg)
  1107. {
  1108. unsigned int i, offset;
  1109. u64 roundedS;
  1110. roundedS = qfq_round_down(agg->S, grp->slot_shift);
  1111. offset = (roundedS - grp->S) >> grp->slot_shift;
  1112. i = (grp->front + offset) % QFQ_MAX_SLOTS;
  1113. hlist_del(&agg->next);
  1114. if (hlist_empty(&grp->slots[i]))
  1115. __clear_bit(offset, &grp->full_slots);
  1116. }
  1117. /*
  1118. * Called to forcibly deschedule an aggregate. If the aggregate is
  1119. * not in the front bucket, or if the latter has other aggregates in
  1120. * the front bucket, we can simply remove the aggregate with no other
  1121. * side effects.
  1122. * Otherwise we must propagate the event up.
  1123. */
  1124. static void qfq_deactivate_agg(struct qfq_sched *q, struct qfq_aggregate *agg)
  1125. {
  1126. struct qfq_group *grp = agg->grp;
  1127. unsigned long mask;
  1128. u64 roundedS;
  1129. int s;
  1130. if (agg == q->in_serv_agg) {
  1131. charge_actual_service(agg);
  1132. q->in_serv_agg = qfq_choose_next_agg(q);
  1133. return;
  1134. }
  1135. agg->F = agg->S;
  1136. qfq_slot_remove(q, grp, agg);
  1137. if (!grp->full_slots) {
  1138. __clear_bit(grp->index, &q->bitmaps[IR]);
  1139. __clear_bit(grp->index, &q->bitmaps[EB]);
  1140. __clear_bit(grp->index, &q->bitmaps[IB]);
  1141. if (test_bit(grp->index, &q->bitmaps[ER]) &&
  1142. !(q->bitmaps[ER] & ~((1UL << grp->index) - 1))) {
  1143. mask = q->bitmaps[ER] & ((1UL << grp->index) - 1);
  1144. if (mask)
  1145. mask = ~((1UL << __fls(mask)) - 1);
  1146. else
  1147. mask = ~0UL;
  1148. qfq_move_groups(q, mask, EB, ER);
  1149. qfq_move_groups(q, mask, IB, IR);
  1150. }
  1151. __clear_bit(grp->index, &q->bitmaps[ER]);
  1152. } else if (hlist_empty(&grp->slots[grp->front])) {
  1153. agg = qfq_slot_scan(grp);
  1154. roundedS = qfq_round_down(agg->S, grp->slot_shift);
  1155. if (grp->S != roundedS) {
  1156. __clear_bit(grp->index, &q->bitmaps[ER]);
  1157. __clear_bit(grp->index, &q->bitmaps[IR]);
  1158. __clear_bit(grp->index, &q->bitmaps[EB]);
  1159. __clear_bit(grp->index, &q->bitmaps[IB]);
  1160. grp->S = roundedS;
  1161. grp->F = roundedS + (2ULL << grp->slot_shift);
  1162. s = qfq_calc_state(q, grp);
  1163. __set_bit(grp->index, &q->bitmaps[s]);
  1164. }
  1165. }
  1166. }
  1167. static void qfq_qlen_notify(struct Qdisc *sch, unsigned long arg)
  1168. {
  1169. struct qfq_sched *q = qdisc_priv(sch);
  1170. struct qfq_class *cl = (struct qfq_class *)arg;
  1171. qfq_deactivate_class(q, cl);
  1172. }
  1173. static int qfq_init_qdisc(struct Qdisc *sch, struct nlattr *opt)
  1174. {
  1175. struct qfq_sched *q = qdisc_priv(sch);
  1176. struct qfq_group *grp;
  1177. int i, j, err;
  1178. u32 max_cl_shift, maxbudg_shift, max_classes;
  1179. err = tcf_block_get(&q->block, &q->filter_list);
  1180. if (err)
  1181. return err;
  1182. err = qdisc_class_hash_init(&q->clhash);
  1183. if (err < 0)
  1184. return err;
  1185. if (qdisc_dev(sch)->tx_queue_len + 1 > QFQ_MAX_AGG_CLASSES)
  1186. max_classes = QFQ_MAX_AGG_CLASSES;
  1187. else
  1188. max_classes = qdisc_dev(sch)->tx_queue_len + 1;
  1189. /* max_cl_shift = floor(log_2(max_classes)) */
  1190. max_cl_shift = __fls(max_classes);
  1191. q->max_agg_classes = 1<<max_cl_shift;
  1192. /* maxbudg_shift = log2(max_len * max_classes_per_agg) */
  1193. maxbudg_shift = QFQ_MTU_SHIFT + max_cl_shift;
  1194. q->min_slot_shift = FRAC_BITS + maxbudg_shift - QFQ_MAX_INDEX;
  1195. for (i = 0; i <= QFQ_MAX_INDEX; i++) {
  1196. grp = &q->groups[i];
  1197. grp->index = i;
  1198. grp->slot_shift = q->min_slot_shift + i;
  1199. for (j = 0; j < QFQ_MAX_SLOTS; j++)
  1200. INIT_HLIST_HEAD(&grp->slots[j]);
  1201. }
  1202. INIT_HLIST_HEAD(&q->nonfull_aggs);
  1203. return 0;
  1204. }
  1205. static void qfq_reset_qdisc(struct Qdisc *sch)
  1206. {
  1207. struct qfq_sched *q = qdisc_priv(sch);
  1208. struct qfq_class *cl;
  1209. unsigned int i;
  1210. for (i = 0; i < q->clhash.hashsize; i++) {
  1211. hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) {
  1212. if (cl->qdisc->q.qlen > 0)
  1213. qfq_deactivate_class(q, cl);
  1214. qdisc_reset(cl->qdisc);
  1215. }
  1216. }
  1217. sch->qstats.backlog = 0;
  1218. sch->q.qlen = 0;
  1219. }
  1220. static void qfq_destroy_qdisc(struct Qdisc *sch)
  1221. {
  1222. struct qfq_sched *q = qdisc_priv(sch);
  1223. struct qfq_class *cl;
  1224. struct hlist_node *next;
  1225. unsigned int i;
  1226. tcf_block_put(q->block);
  1227. for (i = 0; i < q->clhash.hashsize; i++) {
  1228. hlist_for_each_entry_safe(cl, next, &q->clhash.hash[i],
  1229. common.hnode) {
  1230. qfq_destroy_class(sch, cl);
  1231. }
  1232. }
  1233. qdisc_class_hash_destroy(&q->clhash);
  1234. }
  1235. static const struct Qdisc_class_ops qfq_class_ops = {
  1236. .change = qfq_change_class,
  1237. .delete = qfq_delete_class,
  1238. .find = qfq_search_class,
  1239. .tcf_block = qfq_tcf_block,
  1240. .bind_tcf = qfq_bind_tcf,
  1241. .unbind_tcf = qfq_unbind_tcf,
  1242. .graft = qfq_graft_class,
  1243. .leaf = qfq_class_leaf,
  1244. .qlen_notify = qfq_qlen_notify,
  1245. .dump = qfq_dump_class,
  1246. .dump_stats = qfq_dump_class_stats,
  1247. .walk = qfq_walk,
  1248. };
  1249. static struct Qdisc_ops qfq_qdisc_ops __read_mostly = {
  1250. .cl_ops = &qfq_class_ops,
  1251. .id = "qfq",
  1252. .priv_size = sizeof(struct qfq_sched),
  1253. .enqueue = qfq_enqueue,
  1254. .dequeue = qfq_dequeue,
  1255. .peek = qdisc_peek_dequeued,
  1256. .init = qfq_init_qdisc,
  1257. .reset = qfq_reset_qdisc,
  1258. .destroy = qfq_destroy_qdisc,
  1259. .owner = THIS_MODULE,
  1260. };
  1261. static int __init qfq_init(void)
  1262. {
  1263. return register_qdisc(&qfq_qdisc_ops);
  1264. }
  1265. static void __exit qfq_exit(void)
  1266. {
  1267. unregister_qdisc(&qfq_qdisc_ops);
  1268. }
  1269. module_init(qfq_init);
  1270. module_exit(qfq_exit);
  1271. MODULE_LICENSE("GPL");