bfq-cgroup.c 34 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232
  1. /*
  2. * cgroups support for the BFQ I/O scheduler.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public License as
  6. * published by the Free Software Foundation; either version 2 of the
  7. * License, or (at your option) any later version.
  8. *
  9. * This program is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  12. * General Public License for more details.
  13. */
  14. #include <linux/module.h>
  15. #include <linux/slab.h>
  16. #include <linux/blkdev.h>
  17. #include <linux/cgroup.h>
  18. #include <linux/elevator.h>
  19. #include <linux/ktime.h>
  20. #include <linux/rbtree.h>
  21. #include <linux/ioprio.h>
  22. #include <linux/sbitmap.h>
  23. #include <linux/delay.h>
  24. #include "bfq-iosched.h"
  25. #if defined(CONFIG_BFQ_GROUP_IOSCHED) && defined(CONFIG_DEBUG_BLK_CGROUP)
  26. /* bfqg stats flags */
  27. enum bfqg_stats_flags {
  28. BFQG_stats_waiting = 0,
  29. BFQG_stats_idling,
  30. BFQG_stats_empty,
  31. };
  32. #define BFQG_FLAG_FNS(name) \
  33. static void bfqg_stats_mark_##name(struct bfqg_stats *stats) \
  34. { \
  35. stats->flags |= (1 << BFQG_stats_##name); \
  36. } \
  37. static void bfqg_stats_clear_##name(struct bfqg_stats *stats) \
  38. { \
  39. stats->flags &= ~(1 << BFQG_stats_##name); \
  40. } \
  41. static int bfqg_stats_##name(struct bfqg_stats *stats) \
  42. { \
  43. return (stats->flags & (1 << BFQG_stats_##name)) != 0; \
  44. } \
  45. BFQG_FLAG_FNS(waiting)
  46. BFQG_FLAG_FNS(idling)
  47. BFQG_FLAG_FNS(empty)
  48. #undef BFQG_FLAG_FNS
  49. /* This should be called with the scheduler lock held. */
  50. static void bfqg_stats_update_group_wait_time(struct bfqg_stats *stats)
  51. {
  52. unsigned long long now;
  53. if (!bfqg_stats_waiting(stats))
  54. return;
  55. now = sched_clock();
  56. if (time_after64(now, stats->start_group_wait_time))
  57. blkg_stat_add(&stats->group_wait_time,
  58. now - stats->start_group_wait_time);
  59. bfqg_stats_clear_waiting(stats);
  60. }
  61. /* This should be called with the scheduler lock held. */
  62. static void bfqg_stats_set_start_group_wait_time(struct bfq_group *bfqg,
  63. struct bfq_group *curr_bfqg)
  64. {
  65. struct bfqg_stats *stats = &bfqg->stats;
  66. if (bfqg_stats_waiting(stats))
  67. return;
  68. if (bfqg == curr_bfqg)
  69. return;
  70. stats->start_group_wait_time = sched_clock();
  71. bfqg_stats_mark_waiting(stats);
  72. }
  73. /* This should be called with the scheduler lock held. */
  74. static void bfqg_stats_end_empty_time(struct bfqg_stats *stats)
  75. {
  76. unsigned long long now;
  77. if (!bfqg_stats_empty(stats))
  78. return;
  79. now = sched_clock();
  80. if (time_after64(now, stats->start_empty_time))
  81. blkg_stat_add(&stats->empty_time,
  82. now - stats->start_empty_time);
  83. bfqg_stats_clear_empty(stats);
  84. }
  85. void bfqg_stats_update_dequeue(struct bfq_group *bfqg)
  86. {
  87. blkg_stat_add(&bfqg->stats.dequeue, 1);
  88. }
  89. void bfqg_stats_set_start_empty_time(struct bfq_group *bfqg)
  90. {
  91. struct bfqg_stats *stats = &bfqg->stats;
  92. if (blkg_rwstat_total(&stats->queued))
  93. return;
  94. /*
  95. * group is already marked empty. This can happen if bfqq got new
  96. * request in parent group and moved to this group while being added
  97. * to service tree. Just ignore the event and move on.
  98. */
  99. if (bfqg_stats_empty(stats))
  100. return;
  101. stats->start_empty_time = sched_clock();
  102. bfqg_stats_mark_empty(stats);
  103. }
  104. void bfqg_stats_update_idle_time(struct bfq_group *bfqg)
  105. {
  106. struct bfqg_stats *stats = &bfqg->stats;
  107. if (bfqg_stats_idling(stats)) {
  108. unsigned long long now = sched_clock();
  109. if (time_after64(now, stats->start_idle_time))
  110. blkg_stat_add(&stats->idle_time,
  111. now - stats->start_idle_time);
  112. bfqg_stats_clear_idling(stats);
  113. }
  114. }
  115. void bfqg_stats_set_start_idle_time(struct bfq_group *bfqg)
  116. {
  117. struct bfqg_stats *stats = &bfqg->stats;
  118. stats->start_idle_time = sched_clock();
  119. bfqg_stats_mark_idling(stats);
  120. }
  121. void bfqg_stats_update_avg_queue_size(struct bfq_group *bfqg)
  122. {
  123. struct bfqg_stats *stats = &bfqg->stats;
  124. blkg_stat_add(&stats->avg_queue_size_sum,
  125. blkg_rwstat_total(&stats->queued));
  126. blkg_stat_add(&stats->avg_queue_size_samples, 1);
  127. bfqg_stats_update_group_wait_time(stats);
  128. }
  129. void bfqg_stats_update_io_add(struct bfq_group *bfqg, struct bfq_queue *bfqq,
  130. unsigned int op)
  131. {
  132. blkg_rwstat_add(&bfqg->stats.queued, op, 1);
  133. bfqg_stats_end_empty_time(&bfqg->stats);
  134. if (!(bfqq == ((struct bfq_data *)bfqg->bfqd)->in_service_queue))
  135. bfqg_stats_set_start_group_wait_time(bfqg, bfqq_group(bfqq));
  136. }
  137. void bfqg_stats_update_io_remove(struct bfq_group *bfqg, unsigned int op)
  138. {
  139. blkg_rwstat_add(&bfqg->stats.queued, op, -1);
  140. }
  141. void bfqg_stats_update_io_merged(struct bfq_group *bfqg, unsigned int op)
  142. {
  143. blkg_rwstat_add(&bfqg->stats.merged, op, 1);
  144. }
  145. void bfqg_stats_update_completion(struct bfq_group *bfqg, uint64_t start_time,
  146. uint64_t io_start_time, unsigned int op)
  147. {
  148. struct bfqg_stats *stats = &bfqg->stats;
  149. unsigned long long now = sched_clock();
  150. if (time_after64(now, io_start_time))
  151. blkg_rwstat_add(&stats->service_time, op,
  152. now - io_start_time);
  153. if (time_after64(io_start_time, start_time))
  154. blkg_rwstat_add(&stats->wait_time, op,
  155. io_start_time - start_time);
  156. }
  157. #else /* CONFIG_BFQ_GROUP_IOSCHED && CONFIG_DEBUG_BLK_CGROUP */
  158. void bfqg_stats_update_io_add(struct bfq_group *bfqg, struct bfq_queue *bfqq,
  159. unsigned int op) { }
  160. void bfqg_stats_update_io_remove(struct bfq_group *bfqg, unsigned int op) { }
  161. void bfqg_stats_update_io_merged(struct bfq_group *bfqg, unsigned int op) { }
  162. void bfqg_stats_update_completion(struct bfq_group *bfqg, uint64_t start_time,
  163. uint64_t io_start_time, unsigned int op) { }
  164. void bfqg_stats_update_dequeue(struct bfq_group *bfqg) { }
  165. void bfqg_stats_set_start_empty_time(struct bfq_group *bfqg) { }
  166. void bfqg_stats_update_idle_time(struct bfq_group *bfqg) { }
  167. void bfqg_stats_set_start_idle_time(struct bfq_group *bfqg) { }
  168. void bfqg_stats_update_avg_queue_size(struct bfq_group *bfqg) { }
  169. #endif /* CONFIG_BFQ_GROUP_IOSCHED && CONFIG_DEBUG_BLK_CGROUP */
  170. #ifdef CONFIG_BFQ_GROUP_IOSCHED
  171. /*
  172. * blk-cgroup policy-related handlers
  173. * The following functions help in converting between blk-cgroup
  174. * internal structures and BFQ-specific structures.
  175. */
  176. static struct bfq_group *pd_to_bfqg(struct blkg_policy_data *pd)
  177. {
  178. return pd ? container_of(pd, struct bfq_group, pd) : NULL;
  179. }
  180. struct blkcg_gq *bfqg_to_blkg(struct bfq_group *bfqg)
  181. {
  182. return pd_to_blkg(&bfqg->pd);
  183. }
  184. static struct bfq_group *blkg_to_bfqg(struct blkcg_gq *blkg)
  185. {
  186. return pd_to_bfqg(blkg_to_pd(blkg, &blkcg_policy_bfq));
  187. }
  188. /*
  189. * bfq_group handlers
  190. * The following functions help in navigating the bfq_group hierarchy
  191. * by allowing to find the parent of a bfq_group or the bfq_group
  192. * associated to a bfq_queue.
  193. */
  194. static struct bfq_group *bfqg_parent(struct bfq_group *bfqg)
  195. {
  196. struct blkcg_gq *pblkg = bfqg_to_blkg(bfqg)->parent;
  197. return pblkg ? blkg_to_bfqg(pblkg) : NULL;
  198. }
  199. struct bfq_group *bfqq_group(struct bfq_queue *bfqq)
  200. {
  201. struct bfq_entity *group_entity = bfqq->entity.parent;
  202. return group_entity ? container_of(group_entity, struct bfq_group,
  203. entity) :
  204. bfqq->bfqd->root_group;
  205. }
  206. /*
  207. * The following two functions handle get and put of a bfq_group by
  208. * wrapping the related blk-cgroup hooks.
  209. */
  210. static void bfqg_get(struct bfq_group *bfqg)
  211. {
  212. bfqg->ref++;
  213. }
  214. static void bfqg_put(struct bfq_group *bfqg)
  215. {
  216. bfqg->ref--;
  217. if (bfqg->ref == 0)
  218. kfree(bfqg);
  219. }
  220. static void bfqg_and_blkg_get(struct bfq_group *bfqg)
  221. {
  222. /* see comments in bfq_bic_update_cgroup for why refcounting bfqg */
  223. bfqg_get(bfqg);
  224. blkg_get(bfqg_to_blkg(bfqg));
  225. }
  226. void bfqg_and_blkg_put(struct bfq_group *bfqg)
  227. {
  228. bfqg_put(bfqg);
  229. blkg_put(bfqg_to_blkg(bfqg));
  230. }
  231. /* @stats = 0 */
  232. static void bfqg_stats_reset(struct bfqg_stats *stats)
  233. {
  234. #ifdef CONFIG_DEBUG_BLK_CGROUP
  235. /* queued stats shouldn't be cleared */
  236. blkg_rwstat_reset(&stats->merged);
  237. blkg_rwstat_reset(&stats->service_time);
  238. blkg_rwstat_reset(&stats->wait_time);
  239. blkg_stat_reset(&stats->time);
  240. blkg_stat_reset(&stats->avg_queue_size_sum);
  241. blkg_stat_reset(&stats->avg_queue_size_samples);
  242. blkg_stat_reset(&stats->dequeue);
  243. blkg_stat_reset(&stats->group_wait_time);
  244. blkg_stat_reset(&stats->idle_time);
  245. blkg_stat_reset(&stats->empty_time);
  246. #endif
  247. }
  248. /* @to += @from */
  249. static void bfqg_stats_add_aux(struct bfqg_stats *to, struct bfqg_stats *from)
  250. {
  251. if (!to || !from)
  252. return;
  253. #ifdef CONFIG_DEBUG_BLK_CGROUP
  254. /* queued stats shouldn't be cleared */
  255. blkg_rwstat_add_aux(&to->merged, &from->merged);
  256. blkg_rwstat_add_aux(&to->service_time, &from->service_time);
  257. blkg_rwstat_add_aux(&to->wait_time, &from->wait_time);
  258. blkg_stat_add_aux(&from->time, &from->time);
  259. blkg_stat_add_aux(&to->avg_queue_size_sum, &from->avg_queue_size_sum);
  260. blkg_stat_add_aux(&to->avg_queue_size_samples,
  261. &from->avg_queue_size_samples);
  262. blkg_stat_add_aux(&to->dequeue, &from->dequeue);
  263. blkg_stat_add_aux(&to->group_wait_time, &from->group_wait_time);
  264. blkg_stat_add_aux(&to->idle_time, &from->idle_time);
  265. blkg_stat_add_aux(&to->empty_time, &from->empty_time);
  266. #endif
  267. }
  268. /*
  269. * Transfer @bfqg's stats to its parent's aux counts so that the ancestors'
  270. * recursive stats can still account for the amount used by this bfqg after
  271. * it's gone.
  272. */
  273. static void bfqg_stats_xfer_dead(struct bfq_group *bfqg)
  274. {
  275. struct bfq_group *parent;
  276. if (!bfqg) /* root_group */
  277. return;
  278. parent = bfqg_parent(bfqg);
  279. lockdep_assert_held(bfqg_to_blkg(bfqg)->q->queue_lock);
  280. if (unlikely(!parent))
  281. return;
  282. bfqg_stats_add_aux(&parent->stats, &bfqg->stats);
  283. bfqg_stats_reset(&bfqg->stats);
  284. }
  285. void bfq_init_entity(struct bfq_entity *entity, struct bfq_group *bfqg)
  286. {
  287. struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
  288. entity->weight = entity->new_weight;
  289. entity->orig_weight = entity->new_weight;
  290. if (bfqq) {
  291. bfqq->ioprio = bfqq->new_ioprio;
  292. bfqq->ioprio_class = bfqq->new_ioprio_class;
  293. /*
  294. * Make sure that bfqg and its associated blkg do not
  295. * disappear before entity.
  296. */
  297. bfqg_and_blkg_get(bfqg);
  298. }
  299. entity->parent = bfqg->my_entity; /* NULL for root group */
  300. entity->sched_data = &bfqg->sched_data;
  301. }
  302. static void bfqg_stats_exit(struct bfqg_stats *stats)
  303. {
  304. #ifdef CONFIG_DEBUG_BLK_CGROUP
  305. blkg_rwstat_exit(&stats->merged);
  306. blkg_rwstat_exit(&stats->service_time);
  307. blkg_rwstat_exit(&stats->wait_time);
  308. blkg_rwstat_exit(&stats->queued);
  309. blkg_stat_exit(&stats->time);
  310. blkg_stat_exit(&stats->avg_queue_size_sum);
  311. blkg_stat_exit(&stats->avg_queue_size_samples);
  312. blkg_stat_exit(&stats->dequeue);
  313. blkg_stat_exit(&stats->group_wait_time);
  314. blkg_stat_exit(&stats->idle_time);
  315. blkg_stat_exit(&stats->empty_time);
  316. #endif
  317. }
  318. static int bfqg_stats_init(struct bfqg_stats *stats, gfp_t gfp)
  319. {
  320. #ifdef CONFIG_DEBUG_BLK_CGROUP
  321. if (blkg_rwstat_init(&stats->merged, gfp) ||
  322. blkg_rwstat_init(&stats->service_time, gfp) ||
  323. blkg_rwstat_init(&stats->wait_time, gfp) ||
  324. blkg_rwstat_init(&stats->queued, gfp) ||
  325. blkg_stat_init(&stats->time, gfp) ||
  326. blkg_stat_init(&stats->avg_queue_size_sum, gfp) ||
  327. blkg_stat_init(&stats->avg_queue_size_samples, gfp) ||
  328. blkg_stat_init(&stats->dequeue, gfp) ||
  329. blkg_stat_init(&stats->group_wait_time, gfp) ||
  330. blkg_stat_init(&stats->idle_time, gfp) ||
  331. blkg_stat_init(&stats->empty_time, gfp)) {
  332. bfqg_stats_exit(stats);
  333. return -ENOMEM;
  334. }
  335. #endif
  336. return 0;
  337. }
  338. static struct bfq_group_data *cpd_to_bfqgd(struct blkcg_policy_data *cpd)
  339. {
  340. return cpd ? container_of(cpd, struct bfq_group_data, pd) : NULL;
  341. }
  342. static struct bfq_group_data *blkcg_to_bfqgd(struct blkcg *blkcg)
  343. {
  344. return cpd_to_bfqgd(blkcg_to_cpd(blkcg, &blkcg_policy_bfq));
  345. }
  346. static struct blkcg_policy_data *bfq_cpd_alloc(gfp_t gfp)
  347. {
  348. struct bfq_group_data *bgd;
  349. bgd = kzalloc(sizeof(*bgd), gfp);
  350. if (!bgd)
  351. return NULL;
  352. return &bgd->pd;
  353. }
  354. static void bfq_cpd_init(struct blkcg_policy_data *cpd)
  355. {
  356. struct bfq_group_data *d = cpd_to_bfqgd(cpd);
  357. d->weight = cgroup_subsys_on_dfl(io_cgrp_subsys) ?
  358. CGROUP_WEIGHT_DFL : BFQ_WEIGHT_LEGACY_DFL;
  359. }
  360. static void bfq_cpd_free(struct blkcg_policy_data *cpd)
  361. {
  362. kfree(cpd_to_bfqgd(cpd));
  363. }
  364. static struct blkg_policy_data *bfq_pd_alloc(gfp_t gfp, int node)
  365. {
  366. struct bfq_group *bfqg;
  367. bfqg = kzalloc_node(sizeof(*bfqg), gfp, node);
  368. if (!bfqg)
  369. return NULL;
  370. if (bfqg_stats_init(&bfqg->stats, gfp)) {
  371. kfree(bfqg);
  372. return NULL;
  373. }
  374. /* see comments in bfq_bic_update_cgroup for why refcounting */
  375. bfqg_get(bfqg);
  376. return &bfqg->pd;
  377. }
  378. static void bfq_pd_init(struct blkg_policy_data *pd)
  379. {
  380. struct blkcg_gq *blkg = pd_to_blkg(pd);
  381. struct bfq_group *bfqg = blkg_to_bfqg(blkg);
  382. struct bfq_data *bfqd = blkg->q->elevator->elevator_data;
  383. struct bfq_entity *entity = &bfqg->entity;
  384. struct bfq_group_data *d = blkcg_to_bfqgd(blkg->blkcg);
  385. entity->orig_weight = entity->weight = entity->new_weight = d->weight;
  386. entity->my_sched_data = &bfqg->sched_data;
  387. bfqg->my_entity = entity; /*
  388. * the root_group's will be set to NULL
  389. * in bfq_init_queue()
  390. */
  391. bfqg->bfqd = bfqd;
  392. bfqg->active_entities = 0;
  393. bfqg->rq_pos_tree = RB_ROOT;
  394. }
  395. static void bfq_pd_free(struct blkg_policy_data *pd)
  396. {
  397. struct bfq_group *bfqg = pd_to_bfqg(pd);
  398. bfqg_stats_exit(&bfqg->stats);
  399. bfqg_put(bfqg);
  400. }
  401. static void bfq_pd_reset_stats(struct blkg_policy_data *pd)
  402. {
  403. struct bfq_group *bfqg = pd_to_bfqg(pd);
  404. bfqg_stats_reset(&bfqg->stats);
  405. }
  406. static void bfq_group_set_parent(struct bfq_group *bfqg,
  407. struct bfq_group *parent)
  408. {
  409. struct bfq_entity *entity;
  410. entity = &bfqg->entity;
  411. entity->parent = parent->my_entity;
  412. entity->sched_data = &parent->sched_data;
  413. }
  414. static struct bfq_group *bfq_lookup_bfqg(struct bfq_data *bfqd,
  415. struct blkcg *blkcg)
  416. {
  417. struct blkcg_gq *blkg;
  418. blkg = blkg_lookup(blkcg, bfqd->queue);
  419. if (likely(blkg))
  420. return blkg_to_bfqg(blkg);
  421. return NULL;
  422. }
  423. struct bfq_group *bfq_find_set_group(struct bfq_data *bfqd,
  424. struct blkcg *blkcg)
  425. {
  426. struct bfq_group *bfqg, *parent;
  427. struct bfq_entity *entity;
  428. bfqg = bfq_lookup_bfqg(bfqd, blkcg);
  429. if (unlikely(!bfqg))
  430. return NULL;
  431. /*
  432. * Update chain of bfq_groups as we might be handling a leaf group
  433. * which, along with some of its relatives, has not been hooked yet
  434. * to the private hierarchy of BFQ.
  435. */
  436. entity = &bfqg->entity;
  437. for_each_entity(entity) {
  438. bfqg = container_of(entity, struct bfq_group, entity);
  439. if (bfqg != bfqd->root_group) {
  440. parent = bfqg_parent(bfqg);
  441. if (!parent)
  442. parent = bfqd->root_group;
  443. bfq_group_set_parent(bfqg, parent);
  444. }
  445. }
  446. return bfqg;
  447. }
  448. /**
  449. * bfq_bfqq_move - migrate @bfqq to @bfqg.
  450. * @bfqd: queue descriptor.
  451. * @bfqq: the queue to move.
  452. * @bfqg: the group to move to.
  453. *
  454. * Move @bfqq to @bfqg, deactivating it from its old group and reactivating
  455. * it on the new one. Avoid putting the entity on the old group idle tree.
  456. *
  457. * Must be called under the scheduler lock, to make sure that the blkg
  458. * owning @bfqg does not disappear (see comments in
  459. * bfq_bic_update_cgroup on guaranteeing the consistency of blkg
  460. * objects).
  461. */
  462. void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq,
  463. struct bfq_group *bfqg)
  464. {
  465. struct bfq_entity *entity = &bfqq->entity;
  466. /* If bfqq is empty, then bfq_bfqq_expire also invokes
  467. * bfq_del_bfqq_busy, thereby removing bfqq and its entity
  468. * from data structures related to current group. Otherwise we
  469. * need to remove bfqq explicitly with bfq_deactivate_bfqq, as
  470. * we do below.
  471. */
  472. if (bfqq == bfqd->in_service_queue)
  473. bfq_bfqq_expire(bfqd, bfqd->in_service_queue,
  474. false, BFQQE_PREEMPTED);
  475. if (bfq_bfqq_busy(bfqq))
  476. bfq_deactivate_bfqq(bfqd, bfqq, false, false);
  477. else if (entity->on_st)
  478. bfq_put_idle_entity(bfq_entity_service_tree(entity), entity);
  479. bfqg_and_blkg_put(bfqq_group(bfqq));
  480. entity->parent = bfqg->my_entity;
  481. entity->sched_data = &bfqg->sched_data;
  482. /* pin down bfqg and its associated blkg */
  483. bfqg_and_blkg_get(bfqg);
  484. if (bfq_bfqq_busy(bfqq)) {
  485. bfq_pos_tree_add_move(bfqd, bfqq);
  486. bfq_activate_bfqq(bfqd, bfqq);
  487. }
  488. if (!bfqd->in_service_queue && !bfqd->rq_in_driver)
  489. bfq_schedule_dispatch(bfqd);
  490. }
  491. /**
  492. * __bfq_bic_change_cgroup - move @bic to @cgroup.
  493. * @bfqd: the queue descriptor.
  494. * @bic: the bic to move.
  495. * @blkcg: the blk-cgroup to move to.
  496. *
  497. * Move bic to blkcg, assuming that bfqd->lock is held; which makes
  498. * sure that the reference to cgroup is valid across the call (see
  499. * comments in bfq_bic_update_cgroup on this issue)
  500. *
  501. * NOTE: an alternative approach might have been to store the current
  502. * cgroup in bfqq and getting a reference to it, reducing the lookup
  503. * time here, at the price of slightly more complex code.
  504. */
  505. static struct bfq_group *__bfq_bic_change_cgroup(struct bfq_data *bfqd,
  506. struct bfq_io_cq *bic,
  507. struct blkcg *blkcg)
  508. {
  509. struct bfq_queue *async_bfqq = bic_to_bfqq(bic, 0);
  510. struct bfq_queue *sync_bfqq = bic_to_bfqq(bic, 1);
  511. struct bfq_group *bfqg;
  512. struct bfq_entity *entity;
  513. bfqg = bfq_find_set_group(bfqd, blkcg);
  514. if (unlikely(!bfqg))
  515. bfqg = bfqd->root_group;
  516. if (async_bfqq) {
  517. entity = &async_bfqq->entity;
  518. if (entity->sched_data != &bfqg->sched_data) {
  519. bic_set_bfqq(bic, NULL, 0);
  520. bfq_log_bfqq(bfqd, async_bfqq,
  521. "bic_change_group: %p %d",
  522. async_bfqq, async_bfqq->ref);
  523. bfq_put_queue(async_bfqq);
  524. }
  525. }
  526. if (sync_bfqq) {
  527. entity = &sync_bfqq->entity;
  528. if (entity->sched_data != &bfqg->sched_data)
  529. bfq_bfqq_move(bfqd, sync_bfqq, bfqg);
  530. }
  531. return bfqg;
  532. }
  533. void bfq_bic_update_cgroup(struct bfq_io_cq *bic, struct bio *bio)
  534. {
  535. struct bfq_data *bfqd = bic_to_bfqd(bic);
  536. struct bfq_group *bfqg = NULL;
  537. uint64_t serial_nr;
  538. rcu_read_lock();
  539. serial_nr = bio_blkcg(bio)->css.serial_nr;
  540. /*
  541. * Check whether blkcg has changed. The condition may trigger
  542. * spuriously on a newly created cic but there's no harm.
  543. */
  544. if (unlikely(!bfqd) || likely(bic->blkcg_serial_nr == serial_nr))
  545. goto out;
  546. bfqg = __bfq_bic_change_cgroup(bfqd, bic, bio_blkcg(bio));
  547. /*
  548. * Update blkg_path for bfq_log_* functions. We cache this
  549. * path, and update it here, for the following
  550. * reasons. Operations on blkg objects in blk-cgroup are
  551. * protected with the request_queue lock, and not with the
  552. * lock that protects the instances of this scheduler
  553. * (bfqd->lock). This exposes BFQ to the following sort of
  554. * race.
  555. *
  556. * The blkg_lookup performed in bfq_get_queue, protected
  557. * through rcu, may happen to return the address of a copy of
  558. * the original blkg. If this is the case, then the
  559. * bfqg_and_blkg_get performed in bfq_get_queue, to pin down
  560. * the blkg, is useless: it does not prevent blk-cgroup code
  561. * from destroying both the original blkg and all objects
  562. * directly or indirectly referred by the copy of the
  563. * blkg.
  564. *
  565. * On the bright side, destroy operations on a blkg invoke, as
  566. * a first step, hooks of the scheduler associated with the
  567. * blkg. And these hooks are executed with bfqd->lock held for
  568. * BFQ. As a consequence, for any blkg associated with the
  569. * request queue this instance of the scheduler is attached
  570. * to, we are guaranteed that such a blkg is not destroyed, and
  571. * that all the pointers it contains are consistent, while we
  572. * are holding bfqd->lock. A blkg_lookup performed with
  573. * bfqd->lock held then returns a fully consistent blkg, which
  574. * remains consistent until this lock is held.
  575. *
  576. * Thanks to the last fact, and to the fact that: (1) bfqg has
  577. * been obtained through a blkg_lookup in the above
  578. * assignment, and (2) bfqd->lock is being held, here we can
  579. * safely use the policy data for the involved blkg (i.e., the
  580. * field bfqg->pd) to get to the blkg associated with bfqg,
  581. * and then we can safely use any field of blkg. After we
  582. * release bfqd->lock, even just getting blkg through this
  583. * bfqg may cause dangling references to be traversed, as
  584. * bfqg->pd may not exist any more.
  585. *
  586. * In view of the above facts, here we cache, in the bfqg, any
  587. * blkg data we may need for this bic, and for its associated
  588. * bfq_queue. As of now, we need to cache only the path of the
  589. * blkg, which is used in the bfq_log_* functions.
  590. *
  591. * Finally, note that bfqg itself needs to be protected from
  592. * destruction on the blkg_free of the original blkg (which
  593. * invokes bfq_pd_free). We use an additional private
  594. * refcounter for bfqg, to let it disappear only after no
  595. * bfq_queue refers to it any longer.
  596. */
  597. blkg_path(bfqg_to_blkg(bfqg), bfqg->blkg_path, sizeof(bfqg->blkg_path));
  598. bic->blkcg_serial_nr = serial_nr;
  599. out:
  600. rcu_read_unlock();
  601. }
  602. /**
  603. * bfq_flush_idle_tree - deactivate any entity on the idle tree of @st.
  604. * @st: the service tree being flushed.
  605. */
  606. static void bfq_flush_idle_tree(struct bfq_service_tree *st)
  607. {
  608. struct bfq_entity *entity = st->first_idle;
  609. for (; entity ; entity = st->first_idle)
  610. __bfq_deactivate_entity(entity, false);
  611. }
  612. /**
  613. * bfq_reparent_leaf_entity - move leaf entity to the root_group.
  614. * @bfqd: the device data structure with the root group.
  615. * @entity: the entity to move.
  616. */
  617. static void bfq_reparent_leaf_entity(struct bfq_data *bfqd,
  618. struct bfq_entity *entity)
  619. {
  620. struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
  621. bfq_bfqq_move(bfqd, bfqq, bfqd->root_group);
  622. }
  623. /**
  624. * bfq_reparent_active_entities - move to the root group all active
  625. * entities.
  626. * @bfqd: the device data structure with the root group.
  627. * @bfqg: the group to move from.
  628. * @st: the service tree with the entities.
  629. */
  630. static void bfq_reparent_active_entities(struct bfq_data *bfqd,
  631. struct bfq_group *bfqg,
  632. struct bfq_service_tree *st)
  633. {
  634. struct rb_root *active = &st->active;
  635. struct bfq_entity *entity = NULL;
  636. if (!RB_EMPTY_ROOT(&st->active))
  637. entity = bfq_entity_of(rb_first(active));
  638. for (; entity ; entity = bfq_entity_of(rb_first(active)))
  639. bfq_reparent_leaf_entity(bfqd, entity);
  640. if (bfqg->sched_data.in_service_entity)
  641. bfq_reparent_leaf_entity(bfqd,
  642. bfqg->sched_data.in_service_entity);
  643. }
  644. /**
  645. * bfq_pd_offline - deactivate the entity associated with @pd,
  646. * and reparent its children entities.
  647. * @pd: descriptor of the policy going offline.
  648. *
  649. * blkio already grabs the queue_lock for us, so no need to use
  650. * RCU-based magic
  651. */
  652. static void bfq_pd_offline(struct blkg_policy_data *pd)
  653. {
  654. struct bfq_service_tree *st;
  655. struct bfq_group *bfqg = pd_to_bfqg(pd);
  656. struct bfq_data *bfqd = bfqg->bfqd;
  657. struct bfq_entity *entity = bfqg->my_entity;
  658. unsigned long flags;
  659. int i;
  660. spin_lock_irqsave(&bfqd->lock, flags);
  661. if (!entity) /* root group */
  662. goto put_async_queues;
  663. /*
  664. * Empty all service_trees belonging to this group before
  665. * deactivating the group itself.
  666. */
  667. for (i = 0; i < BFQ_IOPRIO_CLASSES; i++) {
  668. st = bfqg->sched_data.service_tree + i;
  669. /*
  670. * The idle tree may still contain bfq_queues belonging
  671. * to exited task because they never migrated to a different
  672. * cgroup from the one being destroyed now.
  673. */
  674. bfq_flush_idle_tree(st);
  675. /*
  676. * It may happen that some queues are still active
  677. * (busy) upon group destruction (if the corresponding
  678. * processes have been forced to terminate). We move
  679. * all the leaf entities corresponding to these queues
  680. * to the root_group.
  681. * Also, it may happen that the group has an entity
  682. * in service, which is disconnected from the active
  683. * tree: it must be moved, too.
  684. * There is no need to put the sync queues, as the
  685. * scheduler has taken no reference.
  686. */
  687. bfq_reparent_active_entities(bfqd, bfqg, st);
  688. }
  689. __bfq_deactivate_entity(entity, false);
  690. put_async_queues:
  691. bfq_put_async_queues(bfqd, bfqg);
  692. spin_unlock_irqrestore(&bfqd->lock, flags);
  693. /*
  694. * @blkg is going offline and will be ignored by
  695. * blkg_[rw]stat_recursive_sum(). Transfer stats to the parent so
  696. * that they don't get lost. If IOs complete after this point, the
  697. * stats for them will be lost. Oh well...
  698. */
  699. bfqg_stats_xfer_dead(bfqg);
  700. }
  701. void bfq_end_wr_async(struct bfq_data *bfqd)
  702. {
  703. struct blkcg_gq *blkg;
  704. list_for_each_entry(blkg, &bfqd->queue->blkg_list, q_node) {
  705. struct bfq_group *bfqg = blkg_to_bfqg(blkg);
  706. bfq_end_wr_async_queues(bfqd, bfqg);
  707. }
  708. bfq_end_wr_async_queues(bfqd, bfqd->root_group);
  709. }
  710. static int bfq_io_show_weight(struct seq_file *sf, void *v)
  711. {
  712. struct blkcg *blkcg = css_to_blkcg(seq_css(sf));
  713. struct bfq_group_data *bfqgd = blkcg_to_bfqgd(blkcg);
  714. unsigned int val = 0;
  715. if (bfqgd)
  716. val = bfqgd->weight;
  717. seq_printf(sf, "%u\n", val);
  718. return 0;
  719. }
  720. static int bfq_io_set_weight_legacy(struct cgroup_subsys_state *css,
  721. struct cftype *cftype,
  722. u64 val)
  723. {
  724. struct blkcg *blkcg = css_to_blkcg(css);
  725. struct bfq_group_data *bfqgd = blkcg_to_bfqgd(blkcg);
  726. struct blkcg_gq *blkg;
  727. int ret = -ERANGE;
  728. if (val < BFQ_MIN_WEIGHT || val > BFQ_MAX_WEIGHT)
  729. return ret;
  730. ret = 0;
  731. spin_lock_irq(&blkcg->lock);
  732. bfqgd->weight = (unsigned short)val;
  733. hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) {
  734. struct bfq_group *bfqg = blkg_to_bfqg(blkg);
  735. if (!bfqg)
  736. continue;
  737. /*
  738. * Setting the prio_changed flag of the entity
  739. * to 1 with new_weight == weight would re-set
  740. * the value of the weight to its ioprio mapping.
  741. * Set the flag only if necessary.
  742. */
  743. if ((unsigned short)val != bfqg->entity.new_weight) {
  744. bfqg->entity.new_weight = (unsigned short)val;
  745. /*
  746. * Make sure that the above new value has been
  747. * stored in bfqg->entity.new_weight before
  748. * setting the prio_changed flag. In fact,
  749. * this flag may be read asynchronously (in
  750. * critical sections protected by a different
  751. * lock than that held here), and finding this
  752. * flag set may cause the execution of the code
  753. * for updating parameters whose value may
  754. * depend also on bfqg->entity.new_weight (in
  755. * __bfq_entity_update_weight_prio).
  756. * This barrier makes sure that the new value
  757. * of bfqg->entity.new_weight is correctly
  758. * seen in that code.
  759. */
  760. smp_wmb();
  761. bfqg->entity.prio_changed = 1;
  762. }
  763. }
  764. spin_unlock_irq(&blkcg->lock);
  765. return ret;
  766. }
  767. static ssize_t bfq_io_set_weight(struct kernfs_open_file *of,
  768. char *buf, size_t nbytes,
  769. loff_t off)
  770. {
  771. u64 weight;
  772. /* First unsigned long found in the file is used */
  773. int ret = kstrtoull(strim(buf), 0, &weight);
  774. if (ret)
  775. return ret;
  776. return bfq_io_set_weight_legacy(of_css(of), NULL, weight);
  777. }
  778. #ifdef CONFIG_DEBUG_BLK_CGROUP
  779. static int bfqg_print_stat(struct seq_file *sf, void *v)
  780. {
  781. blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), blkg_prfill_stat,
  782. &blkcg_policy_bfq, seq_cft(sf)->private, false);
  783. return 0;
  784. }
  785. static int bfqg_print_rwstat(struct seq_file *sf, void *v)
  786. {
  787. blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), blkg_prfill_rwstat,
  788. &blkcg_policy_bfq, seq_cft(sf)->private, true);
  789. return 0;
  790. }
  791. static u64 bfqg_prfill_stat_recursive(struct seq_file *sf,
  792. struct blkg_policy_data *pd, int off)
  793. {
  794. u64 sum = blkg_stat_recursive_sum(pd_to_blkg(pd),
  795. &blkcg_policy_bfq, off);
  796. return __blkg_prfill_u64(sf, pd, sum);
  797. }
  798. static u64 bfqg_prfill_rwstat_recursive(struct seq_file *sf,
  799. struct blkg_policy_data *pd, int off)
  800. {
  801. struct blkg_rwstat sum = blkg_rwstat_recursive_sum(pd_to_blkg(pd),
  802. &blkcg_policy_bfq,
  803. off);
  804. return __blkg_prfill_rwstat(sf, pd, &sum);
  805. }
  806. static int bfqg_print_stat_recursive(struct seq_file *sf, void *v)
  807. {
  808. blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
  809. bfqg_prfill_stat_recursive, &blkcg_policy_bfq,
  810. seq_cft(sf)->private, false);
  811. return 0;
  812. }
  813. static int bfqg_print_rwstat_recursive(struct seq_file *sf, void *v)
  814. {
  815. blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
  816. bfqg_prfill_rwstat_recursive, &blkcg_policy_bfq,
  817. seq_cft(sf)->private, true);
  818. return 0;
  819. }
  820. static u64 bfqg_prfill_sectors(struct seq_file *sf, struct blkg_policy_data *pd,
  821. int off)
  822. {
  823. u64 sum = blkg_rwstat_total(&pd->blkg->stat_bytes);
  824. return __blkg_prfill_u64(sf, pd, sum >> 9);
  825. }
  826. static int bfqg_print_stat_sectors(struct seq_file *sf, void *v)
  827. {
  828. blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
  829. bfqg_prfill_sectors, &blkcg_policy_bfq, 0, false);
  830. return 0;
  831. }
  832. static u64 bfqg_prfill_sectors_recursive(struct seq_file *sf,
  833. struct blkg_policy_data *pd, int off)
  834. {
  835. struct blkg_rwstat tmp = blkg_rwstat_recursive_sum(pd->blkg, NULL,
  836. offsetof(struct blkcg_gq, stat_bytes));
  837. u64 sum = atomic64_read(&tmp.aux_cnt[BLKG_RWSTAT_READ]) +
  838. atomic64_read(&tmp.aux_cnt[BLKG_RWSTAT_WRITE]);
  839. return __blkg_prfill_u64(sf, pd, sum >> 9);
  840. }
  841. static int bfqg_print_stat_sectors_recursive(struct seq_file *sf, void *v)
  842. {
  843. blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
  844. bfqg_prfill_sectors_recursive, &blkcg_policy_bfq, 0,
  845. false);
  846. return 0;
  847. }
  848. static u64 bfqg_prfill_avg_queue_size(struct seq_file *sf,
  849. struct blkg_policy_data *pd, int off)
  850. {
  851. struct bfq_group *bfqg = pd_to_bfqg(pd);
  852. u64 samples = blkg_stat_read(&bfqg->stats.avg_queue_size_samples);
  853. u64 v = 0;
  854. if (samples) {
  855. v = blkg_stat_read(&bfqg->stats.avg_queue_size_sum);
  856. v = div64_u64(v, samples);
  857. }
  858. __blkg_prfill_u64(sf, pd, v);
  859. return 0;
  860. }
  861. /* print avg_queue_size */
  862. static int bfqg_print_avg_queue_size(struct seq_file *sf, void *v)
  863. {
  864. blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
  865. bfqg_prfill_avg_queue_size, &blkcg_policy_bfq,
  866. 0, false);
  867. return 0;
  868. }
  869. #endif /* CONFIG_DEBUG_BLK_CGROUP */
  870. struct bfq_group *bfq_create_group_hierarchy(struct bfq_data *bfqd, int node)
  871. {
  872. int ret;
  873. ret = blkcg_activate_policy(bfqd->queue, &blkcg_policy_bfq);
  874. if (ret)
  875. return NULL;
  876. return blkg_to_bfqg(bfqd->queue->root_blkg);
  877. }
  878. struct blkcg_policy blkcg_policy_bfq = {
  879. .dfl_cftypes = bfq_blkg_files,
  880. .legacy_cftypes = bfq_blkcg_legacy_files,
  881. .cpd_alloc_fn = bfq_cpd_alloc,
  882. .cpd_init_fn = bfq_cpd_init,
  883. .cpd_bind_fn = bfq_cpd_init,
  884. .cpd_free_fn = bfq_cpd_free,
  885. .pd_alloc_fn = bfq_pd_alloc,
  886. .pd_init_fn = bfq_pd_init,
  887. .pd_offline_fn = bfq_pd_offline,
  888. .pd_free_fn = bfq_pd_free,
  889. .pd_reset_stats_fn = bfq_pd_reset_stats,
  890. };
  891. struct cftype bfq_blkcg_legacy_files[] = {
  892. {
  893. .name = "bfq.weight",
  894. .flags = CFTYPE_NOT_ON_ROOT,
  895. .seq_show = bfq_io_show_weight,
  896. .write_u64 = bfq_io_set_weight_legacy,
  897. },
  898. /* statistics, covers only the tasks in the bfqg */
  899. {
  900. .name = "bfq.io_service_bytes",
  901. .private = (unsigned long)&blkcg_policy_bfq,
  902. .seq_show = blkg_print_stat_bytes,
  903. },
  904. {
  905. .name = "bfq.io_serviced",
  906. .private = (unsigned long)&blkcg_policy_bfq,
  907. .seq_show = blkg_print_stat_ios,
  908. },
  909. #ifdef CONFIG_DEBUG_BLK_CGROUP
  910. {
  911. .name = "bfq.time",
  912. .private = offsetof(struct bfq_group, stats.time),
  913. .seq_show = bfqg_print_stat,
  914. },
  915. {
  916. .name = "bfq.sectors",
  917. .seq_show = bfqg_print_stat_sectors,
  918. },
  919. {
  920. .name = "bfq.io_service_time",
  921. .private = offsetof(struct bfq_group, stats.service_time),
  922. .seq_show = bfqg_print_rwstat,
  923. },
  924. {
  925. .name = "bfq.io_wait_time",
  926. .private = offsetof(struct bfq_group, stats.wait_time),
  927. .seq_show = bfqg_print_rwstat,
  928. },
  929. {
  930. .name = "bfq.io_merged",
  931. .private = offsetof(struct bfq_group, stats.merged),
  932. .seq_show = bfqg_print_rwstat,
  933. },
  934. {
  935. .name = "bfq.io_queued",
  936. .private = offsetof(struct bfq_group, stats.queued),
  937. .seq_show = bfqg_print_rwstat,
  938. },
  939. #endif /* CONFIG_DEBUG_BLK_CGROUP */
  940. /* the same statictics which cover the bfqg and its descendants */
  941. {
  942. .name = "bfq.io_service_bytes_recursive",
  943. .private = (unsigned long)&blkcg_policy_bfq,
  944. .seq_show = blkg_print_stat_bytes_recursive,
  945. },
  946. {
  947. .name = "bfq.io_serviced_recursive",
  948. .private = (unsigned long)&blkcg_policy_bfq,
  949. .seq_show = blkg_print_stat_ios_recursive,
  950. },
  951. #ifdef CONFIG_DEBUG_BLK_CGROUP
  952. {
  953. .name = "bfq.time_recursive",
  954. .private = offsetof(struct bfq_group, stats.time),
  955. .seq_show = bfqg_print_stat_recursive,
  956. },
  957. {
  958. .name = "bfq.sectors_recursive",
  959. .seq_show = bfqg_print_stat_sectors_recursive,
  960. },
  961. {
  962. .name = "bfq.io_service_time_recursive",
  963. .private = offsetof(struct bfq_group, stats.service_time),
  964. .seq_show = bfqg_print_rwstat_recursive,
  965. },
  966. {
  967. .name = "bfq.io_wait_time_recursive",
  968. .private = offsetof(struct bfq_group, stats.wait_time),
  969. .seq_show = bfqg_print_rwstat_recursive,
  970. },
  971. {
  972. .name = "bfq.io_merged_recursive",
  973. .private = offsetof(struct bfq_group, stats.merged),
  974. .seq_show = bfqg_print_rwstat_recursive,
  975. },
  976. {
  977. .name = "bfq.io_queued_recursive",
  978. .private = offsetof(struct bfq_group, stats.queued),
  979. .seq_show = bfqg_print_rwstat_recursive,
  980. },
  981. {
  982. .name = "bfq.avg_queue_size",
  983. .seq_show = bfqg_print_avg_queue_size,
  984. },
  985. {
  986. .name = "bfq.group_wait_time",
  987. .private = offsetof(struct bfq_group, stats.group_wait_time),
  988. .seq_show = bfqg_print_stat,
  989. },
  990. {
  991. .name = "bfq.idle_time",
  992. .private = offsetof(struct bfq_group, stats.idle_time),
  993. .seq_show = bfqg_print_stat,
  994. },
  995. {
  996. .name = "bfq.empty_time",
  997. .private = offsetof(struct bfq_group, stats.empty_time),
  998. .seq_show = bfqg_print_stat,
  999. },
  1000. {
  1001. .name = "bfq.dequeue",
  1002. .private = offsetof(struct bfq_group, stats.dequeue),
  1003. .seq_show = bfqg_print_stat,
  1004. },
  1005. #endif /* CONFIG_DEBUG_BLK_CGROUP */
  1006. { } /* terminate */
  1007. };
  1008. struct cftype bfq_blkg_files[] = {
  1009. {
  1010. .name = "bfq.weight",
  1011. .flags = CFTYPE_NOT_ON_ROOT,
  1012. .seq_show = bfq_io_show_weight,
  1013. .write = bfq_io_set_weight,
  1014. },
  1015. {} /* terminate */
  1016. };
  1017. #else /* CONFIG_BFQ_GROUP_IOSCHED */
  1018. void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq,
  1019. struct bfq_group *bfqg) {}
  1020. void bfq_init_entity(struct bfq_entity *entity, struct bfq_group *bfqg)
  1021. {
  1022. struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
  1023. entity->weight = entity->new_weight;
  1024. entity->orig_weight = entity->new_weight;
  1025. if (bfqq) {
  1026. bfqq->ioprio = bfqq->new_ioprio;
  1027. bfqq->ioprio_class = bfqq->new_ioprio_class;
  1028. }
  1029. entity->sched_data = &bfqg->sched_data;
  1030. }
  1031. void bfq_bic_update_cgroup(struct bfq_io_cq *bic, struct bio *bio) {}
  1032. void bfq_end_wr_async(struct bfq_data *bfqd)
  1033. {
  1034. bfq_end_wr_async_queues(bfqd, bfqd->root_group);
  1035. }
  1036. struct bfq_group *bfq_find_set_group(struct bfq_data *bfqd, struct blkcg *blkcg)
  1037. {
  1038. return bfqd->root_group;
  1039. }
  1040. struct bfq_group *bfqq_group(struct bfq_queue *bfqq)
  1041. {
  1042. return bfqq->bfqd->root_group;
  1043. }
  1044. struct bfq_group *bfq_create_group_hierarchy(struct bfq_data *bfqd, int node)
  1045. {
  1046. struct bfq_group *bfqg;
  1047. int i;
  1048. bfqg = kmalloc_node(sizeof(*bfqg), GFP_KERNEL | __GFP_ZERO, node);
  1049. if (!bfqg)
  1050. return NULL;
  1051. for (i = 0; i < BFQ_IOPRIO_CLASSES; i++)
  1052. bfqg->sched_data.service_tree[i] = BFQ_SERVICE_TREE_INIT;
  1053. return bfqg;
  1054. }
  1055. #endif /* CONFIG_BFQ_GROUP_IOSCHED */