blk-iolatency.c 28 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069
  1. /*
  2. * Block rq-qos base io controller
  3. *
  4. * This works similar to wbt with a few exceptions
  5. *
  6. * - It's bio based, so the latency covers the whole block layer in addition to
  7. * the actual io.
  8. * - We will throttle all IO that comes in here if we need to.
  9. * - We use the mean latency over the 100ms window. This is because writes can
  10. * be particularly fast, which could give us a false sense of the impact of
  11. * other workloads on our protected workload.
  12. * - By default there's no throttling, we set the queue_depth to UINT_MAX so
  13. * that we can have as many outstanding bio's as we're allowed to. Only at
  14. * throttle time do we pay attention to the actual queue depth.
  15. *
  16. * The hierarchy works like the cpu controller does, we track the latency at
  17. * every configured node, and each configured node has it's own independent
  18. * queue depth. This means that we only care about our latency targets at the
  19. * peer level. Some group at the bottom of the hierarchy isn't going to affect
  20. * a group at the end of some other path if we're only configred at leaf level.
  21. *
  22. * Consider the following
  23. *
  24. * root blkg
  25. * / \
  26. * fast (target=5ms) slow (target=10ms)
  27. * / \ / \
  28. * a b normal(15ms) unloved
  29. *
  30. * "a" and "b" have no target, but their combined io under "fast" cannot exceed
  31. * an average latency of 5ms. If it does then we will throttle the "slow"
  32. * group. In the case of "normal", if it exceeds its 15ms target, we will
  33. * throttle "unloved", but nobody else.
  34. *
  35. * In this example "fast", "slow", and "normal" will be the only groups actually
  36. * accounting their io latencies. We have to walk up the heirarchy to the root
  37. * on every submit and complete so we can do the appropriate stat recording and
  38. * adjust the queue depth of ourselves if needed.
  39. *
  40. * There are 2 ways we throttle IO.
  41. *
  42. * 1) Queue depth throttling. As we throttle down we will adjust the maximum
  43. * number of IO's we're allowed to have in flight. This starts at (u64)-1 down
  44. * to 1. If the group is only ever submitting IO for itself then this is the
  45. * only way we throttle.
  46. *
  47. * 2) Induced delay throttling. This is for the case that a group is generating
  48. * IO that has to be issued by the root cg to avoid priority inversion. So think
  49. * REQ_META or REQ_SWAP. If we are already at qd == 1 and we're getting a lot
  50. * of work done for us on behalf of the root cg and are being asked to scale
  51. * down more then we induce a latency at userspace return. We accumulate the
  52. * total amount of time we need to be punished by doing
  53. *
  54. * total_time += min_lat_nsec - actual_io_completion
  55. *
  56. * and then at throttle time will do
  57. *
  58. * throttle_time = min(total_time, NSEC_PER_SEC)
  59. *
  60. * This induced delay will throttle back the activity that is generating the
  61. * root cg issued io's, wethere that's some metadata intensive operation or the
  62. * group is using so much memory that it is pushing us into swap.
  63. *
  64. * Copyright (C) 2018 Josef Bacik
  65. */
  66. #include <linux/kernel.h>
  67. #include <linux/blk_types.h>
  68. #include <linux/backing-dev.h>
  69. #include <linux/module.h>
  70. #include <linux/timer.h>
  71. #include <linux/memcontrol.h>
  72. #include <linux/sched/loadavg.h>
  73. #include <linux/sched/signal.h>
  74. #include <trace/events/block.h>
  75. #include "blk-rq-qos.h"
  76. #include "blk-stat.h"
  77. #define DEFAULT_SCALE_COOKIE 1000000U
  78. static struct blkcg_policy blkcg_policy_iolatency;
  79. struct iolatency_grp;
  80. struct blk_iolatency {
  81. struct rq_qos rqos;
  82. struct timer_list timer;
  83. atomic_t enabled;
  84. };
  85. static inline struct blk_iolatency *BLKIOLATENCY(struct rq_qos *rqos)
  86. {
  87. return container_of(rqos, struct blk_iolatency, rqos);
  88. }
  89. static inline bool blk_iolatency_enabled(struct blk_iolatency *blkiolat)
  90. {
  91. return atomic_read(&blkiolat->enabled) > 0;
  92. }
  93. struct child_latency_info {
  94. spinlock_t lock;
  95. /* Last time we adjusted the scale of everybody. */
  96. u64 last_scale_event;
  97. /* The latency that we missed. */
  98. u64 scale_lat;
  99. /* Total io's from all of our children for the last summation. */
  100. u64 nr_samples;
  101. /* The guy who actually changed the latency numbers. */
  102. struct iolatency_grp *scale_grp;
  103. /* Cookie to tell if we need to scale up or down. */
  104. atomic_t scale_cookie;
  105. };
  106. struct percentile_stats {
  107. u64 total;
  108. u64 missed;
  109. };
  110. struct latency_stat {
  111. union {
  112. struct percentile_stats ps;
  113. struct blk_rq_stat rqs;
  114. };
  115. };
  116. struct iolatency_grp {
  117. struct blkg_policy_data pd;
  118. struct latency_stat __percpu *stats;
  119. struct latency_stat cur_stat;
  120. struct blk_iolatency *blkiolat;
  121. struct rq_depth rq_depth;
  122. struct rq_wait rq_wait;
  123. atomic64_t window_start;
  124. atomic_t scale_cookie;
  125. u64 min_lat_nsec;
  126. u64 cur_win_nsec;
  127. /* total running average of our io latency. */
  128. u64 lat_avg;
  129. /* Our current number of IO's for the last summation. */
  130. u64 nr_samples;
  131. bool ssd;
  132. struct child_latency_info child_lat;
  133. };
  134. #define BLKIOLATENCY_MIN_WIN_SIZE (100 * NSEC_PER_MSEC)
  135. #define BLKIOLATENCY_MAX_WIN_SIZE NSEC_PER_SEC
  136. /*
  137. * These are the constants used to fake the fixed-point moving average
  138. * calculation just like load average. The call to calc_load() folds
  139. * (FIXED_1 (2048) - exp_factor) * new_sample into lat_avg. The sampling
  140. * window size is bucketed to try to approximately calculate average
  141. * latency such that 1/exp (decay rate) is [1 min, 2.5 min) when windows
  142. * elapse immediately. Note, windows only elapse with IO activity. Idle
  143. * periods extend the most recent window.
  144. */
  145. #define BLKIOLATENCY_NR_EXP_FACTORS 5
  146. #define BLKIOLATENCY_EXP_BUCKET_SIZE (BLKIOLATENCY_MAX_WIN_SIZE / \
  147. (BLKIOLATENCY_NR_EXP_FACTORS - 1))
  148. static const u64 iolatency_exp_factors[BLKIOLATENCY_NR_EXP_FACTORS] = {
  149. 2045, // exp(1/600) - 600 samples
  150. 2039, // exp(1/240) - 240 samples
  151. 2031, // exp(1/120) - 120 samples
  152. 2023, // exp(1/80) - 80 samples
  153. 2014, // exp(1/60) - 60 samples
  154. };
  155. static inline struct iolatency_grp *pd_to_lat(struct blkg_policy_data *pd)
  156. {
  157. return pd ? container_of(pd, struct iolatency_grp, pd) : NULL;
  158. }
  159. static inline struct iolatency_grp *blkg_to_lat(struct blkcg_gq *blkg)
  160. {
  161. return pd_to_lat(blkg_to_pd(blkg, &blkcg_policy_iolatency));
  162. }
  163. static inline struct blkcg_gq *lat_to_blkg(struct iolatency_grp *iolat)
  164. {
  165. return pd_to_blkg(&iolat->pd);
  166. }
  167. static inline void latency_stat_init(struct iolatency_grp *iolat,
  168. struct latency_stat *stat)
  169. {
  170. if (iolat->ssd) {
  171. stat->ps.total = 0;
  172. stat->ps.missed = 0;
  173. } else
  174. blk_rq_stat_init(&stat->rqs);
  175. }
  176. static inline void latency_stat_sum(struct iolatency_grp *iolat,
  177. struct latency_stat *sum,
  178. struct latency_stat *stat)
  179. {
  180. if (iolat->ssd) {
  181. sum->ps.total += stat->ps.total;
  182. sum->ps.missed += stat->ps.missed;
  183. } else
  184. blk_rq_stat_sum(&sum->rqs, &stat->rqs);
  185. }
  186. static inline void latency_stat_record_time(struct iolatency_grp *iolat,
  187. u64 req_time)
  188. {
  189. struct latency_stat *stat = get_cpu_ptr(iolat->stats);
  190. if (iolat->ssd) {
  191. if (req_time >= iolat->min_lat_nsec)
  192. stat->ps.missed++;
  193. stat->ps.total++;
  194. } else
  195. blk_rq_stat_add(&stat->rqs, req_time);
  196. put_cpu_ptr(stat);
  197. }
  198. static inline bool latency_sum_ok(struct iolatency_grp *iolat,
  199. struct latency_stat *stat)
  200. {
  201. if (iolat->ssd) {
  202. u64 thresh = div64_u64(stat->ps.total, 10);
  203. thresh = max(thresh, 1ULL);
  204. return stat->ps.missed < thresh;
  205. }
  206. return stat->rqs.mean <= iolat->min_lat_nsec;
  207. }
  208. static inline u64 latency_stat_samples(struct iolatency_grp *iolat,
  209. struct latency_stat *stat)
  210. {
  211. if (iolat->ssd)
  212. return stat->ps.total;
  213. return stat->rqs.nr_samples;
  214. }
  215. static inline void iolat_update_total_lat_avg(struct iolatency_grp *iolat,
  216. struct latency_stat *stat)
  217. {
  218. int exp_idx;
  219. if (iolat->ssd)
  220. return;
  221. /*
  222. * calc_load() takes in a number stored in fixed point representation.
  223. * Because we are using this for IO time in ns, the values stored
  224. * are significantly larger than the FIXED_1 denominator (2048).
  225. * Therefore, rounding errors in the calculation are negligible and
  226. * can be ignored.
  227. */
  228. exp_idx = min_t(int, BLKIOLATENCY_NR_EXP_FACTORS - 1,
  229. div64_u64(iolat->cur_win_nsec,
  230. BLKIOLATENCY_EXP_BUCKET_SIZE));
  231. iolat->lat_avg = calc_load(iolat->lat_avg,
  232. iolatency_exp_factors[exp_idx],
  233. stat->rqs.mean);
  234. }
  235. static inline bool iolatency_may_queue(struct iolatency_grp *iolat,
  236. wait_queue_entry_t *wait,
  237. bool first_block)
  238. {
  239. struct rq_wait *rqw = &iolat->rq_wait;
  240. if (first_block && waitqueue_active(&rqw->wait) &&
  241. rqw->wait.head.next != &wait->entry)
  242. return false;
  243. return rq_wait_inc_below(rqw, iolat->rq_depth.max_depth);
  244. }
  245. static void __blkcg_iolatency_throttle(struct rq_qos *rqos,
  246. struct iolatency_grp *iolat,
  247. spinlock_t *lock, bool issue_as_root,
  248. bool use_memdelay)
  249. __releases(lock)
  250. __acquires(lock)
  251. {
  252. struct rq_wait *rqw = &iolat->rq_wait;
  253. unsigned use_delay = atomic_read(&lat_to_blkg(iolat)->use_delay);
  254. DEFINE_WAIT(wait);
  255. bool first_block = true;
  256. if (use_delay)
  257. blkcg_schedule_throttle(rqos->q, use_memdelay);
  258. /*
  259. * To avoid priority inversions we want to just take a slot if we are
  260. * issuing as root. If we're being killed off there's no point in
  261. * delaying things, we may have been killed by OOM so throttling may
  262. * make recovery take even longer, so just let the IO's through so the
  263. * task can go away.
  264. */
  265. if (issue_as_root || fatal_signal_pending(current)) {
  266. atomic_inc(&rqw->inflight);
  267. return;
  268. }
  269. if (iolatency_may_queue(iolat, &wait, first_block))
  270. return;
  271. do {
  272. prepare_to_wait_exclusive(&rqw->wait, &wait,
  273. TASK_UNINTERRUPTIBLE);
  274. if (iolatency_may_queue(iolat, &wait, first_block))
  275. break;
  276. first_block = false;
  277. if (lock) {
  278. spin_unlock_irq(lock);
  279. io_schedule();
  280. spin_lock_irq(lock);
  281. } else {
  282. io_schedule();
  283. }
  284. } while (1);
  285. finish_wait(&rqw->wait, &wait);
  286. }
  287. #define SCALE_DOWN_FACTOR 2
  288. #define SCALE_UP_FACTOR 4
  289. static inline unsigned long scale_amount(unsigned long qd, bool up)
  290. {
  291. return max(up ? qd >> SCALE_UP_FACTOR : qd >> SCALE_DOWN_FACTOR, 1UL);
  292. }
  293. /*
  294. * We scale the qd down faster than we scale up, so we need to use this helper
  295. * to adjust the scale_cookie accordingly so we don't prematurely get
  296. * scale_cookie at DEFAULT_SCALE_COOKIE and unthrottle too much.
  297. *
  298. * Each group has their own local copy of the last scale cookie they saw, so if
  299. * the global scale cookie goes up or down they know which way they need to go
  300. * based on their last knowledge of it.
  301. */
  302. static void scale_cookie_change(struct blk_iolatency *blkiolat,
  303. struct child_latency_info *lat_info,
  304. bool up)
  305. {
  306. unsigned long qd = blkiolat->rqos.q->nr_requests;
  307. unsigned long scale = scale_amount(qd, up);
  308. unsigned long old = atomic_read(&lat_info->scale_cookie);
  309. unsigned long max_scale = qd << 1;
  310. unsigned long diff = 0;
  311. if (old < DEFAULT_SCALE_COOKIE)
  312. diff = DEFAULT_SCALE_COOKIE - old;
  313. if (up) {
  314. if (scale + old > DEFAULT_SCALE_COOKIE)
  315. atomic_set(&lat_info->scale_cookie,
  316. DEFAULT_SCALE_COOKIE);
  317. else if (diff > qd)
  318. atomic_inc(&lat_info->scale_cookie);
  319. else
  320. atomic_add(scale, &lat_info->scale_cookie);
  321. } else {
  322. /*
  323. * We don't want to dig a hole so deep that it takes us hours to
  324. * dig out of it. Just enough that we don't throttle/unthrottle
  325. * with jagged workloads but can still unthrottle once pressure
  326. * has sufficiently dissipated.
  327. */
  328. if (diff > qd) {
  329. if (diff < max_scale)
  330. atomic_dec(&lat_info->scale_cookie);
  331. } else {
  332. atomic_sub(scale, &lat_info->scale_cookie);
  333. }
  334. }
  335. }
  336. /*
  337. * Change the queue depth of the iolatency_grp. We add/subtract 1/16th of the
  338. * queue depth at a time so we don't get wild swings and hopefully dial in to
  339. * fairer distribution of the overall queue depth.
  340. */
  341. static void scale_change(struct iolatency_grp *iolat, bool up)
  342. {
  343. unsigned long qd = iolat->blkiolat->rqos.q->nr_requests;
  344. unsigned long scale = scale_amount(qd, up);
  345. unsigned long old = iolat->rq_depth.max_depth;
  346. if (old > qd)
  347. old = qd;
  348. if (up) {
  349. if (old == 1 && blkcg_unuse_delay(lat_to_blkg(iolat)))
  350. return;
  351. if (old < qd) {
  352. old += scale;
  353. old = min(old, qd);
  354. iolat->rq_depth.max_depth = old;
  355. wake_up_all(&iolat->rq_wait.wait);
  356. }
  357. } else {
  358. old >>= 1;
  359. iolat->rq_depth.max_depth = max(old, 1UL);
  360. }
  361. }
  362. /* Check our parent and see if the scale cookie has changed. */
  363. static void check_scale_change(struct iolatency_grp *iolat)
  364. {
  365. struct iolatency_grp *parent;
  366. struct child_latency_info *lat_info;
  367. unsigned int cur_cookie;
  368. unsigned int our_cookie = atomic_read(&iolat->scale_cookie);
  369. u64 scale_lat;
  370. unsigned int old;
  371. int direction = 0;
  372. if (lat_to_blkg(iolat)->parent == NULL)
  373. return;
  374. parent = blkg_to_lat(lat_to_blkg(iolat)->parent);
  375. if (!parent)
  376. return;
  377. lat_info = &parent->child_lat;
  378. cur_cookie = atomic_read(&lat_info->scale_cookie);
  379. scale_lat = READ_ONCE(lat_info->scale_lat);
  380. if (cur_cookie < our_cookie)
  381. direction = -1;
  382. else if (cur_cookie > our_cookie)
  383. direction = 1;
  384. else
  385. return;
  386. old = atomic_cmpxchg(&iolat->scale_cookie, our_cookie, cur_cookie);
  387. /* Somebody beat us to the punch, just bail. */
  388. if (old != our_cookie)
  389. return;
  390. if (direction < 0 && iolat->min_lat_nsec) {
  391. u64 samples_thresh;
  392. if (!scale_lat || iolat->min_lat_nsec <= scale_lat)
  393. return;
  394. /*
  395. * Sometimes high priority groups are their own worst enemy, so
  396. * instead of taking it out on some poor other group that did 5%
  397. * or less of the IO's for the last summation just skip this
  398. * scale down event.
  399. */
  400. samples_thresh = lat_info->nr_samples * 5;
  401. samples_thresh = max(1ULL, div64_u64(samples_thresh, 100));
  402. if (iolat->nr_samples <= samples_thresh)
  403. return;
  404. }
  405. /* We're as low as we can go. */
  406. if (iolat->rq_depth.max_depth == 1 && direction < 0) {
  407. blkcg_use_delay(lat_to_blkg(iolat));
  408. return;
  409. }
  410. /* We're back to the default cookie, unthrottle all the things. */
  411. if (cur_cookie == DEFAULT_SCALE_COOKIE) {
  412. blkcg_clear_delay(lat_to_blkg(iolat));
  413. iolat->rq_depth.max_depth = UINT_MAX;
  414. wake_up_all(&iolat->rq_wait.wait);
  415. return;
  416. }
  417. scale_change(iolat, direction > 0);
  418. }
  419. static void blkcg_iolatency_throttle(struct rq_qos *rqos, struct bio *bio,
  420. spinlock_t *lock)
  421. {
  422. struct blk_iolatency *blkiolat = BLKIOLATENCY(rqos);
  423. struct blkcg *blkcg;
  424. struct blkcg_gq *blkg;
  425. struct request_queue *q = rqos->q;
  426. bool issue_as_root = bio_issue_as_root_blkg(bio);
  427. if (!blk_iolatency_enabled(blkiolat))
  428. return;
  429. rcu_read_lock();
  430. blkcg = bio_blkcg(bio);
  431. bio_associate_blkcg(bio, &blkcg->css);
  432. blkg = blkg_lookup(blkcg, q);
  433. if (unlikely(!blkg)) {
  434. if (!lock)
  435. spin_lock_irq(q->queue_lock);
  436. blkg = blkg_lookup_create(blkcg, q);
  437. if (IS_ERR(blkg))
  438. blkg = NULL;
  439. if (!lock)
  440. spin_unlock_irq(q->queue_lock);
  441. }
  442. if (!blkg)
  443. goto out;
  444. bio_issue_init(&bio->bi_issue, bio_sectors(bio));
  445. bio_associate_blkg(bio, blkg);
  446. out:
  447. rcu_read_unlock();
  448. while (blkg && blkg->parent) {
  449. struct iolatency_grp *iolat = blkg_to_lat(blkg);
  450. if (!iolat) {
  451. blkg = blkg->parent;
  452. continue;
  453. }
  454. check_scale_change(iolat);
  455. __blkcg_iolatency_throttle(rqos, iolat, lock, issue_as_root,
  456. (bio->bi_opf & REQ_SWAP) == REQ_SWAP);
  457. blkg = blkg->parent;
  458. }
  459. if (!timer_pending(&blkiolat->timer))
  460. mod_timer(&blkiolat->timer, jiffies + HZ);
  461. }
  462. static void iolatency_record_time(struct iolatency_grp *iolat,
  463. struct bio_issue *issue, u64 now,
  464. bool issue_as_root)
  465. {
  466. u64 start = bio_issue_time(issue);
  467. u64 req_time;
  468. /*
  469. * Have to do this so we are truncated to the correct time that our
  470. * issue is truncated to.
  471. */
  472. now = __bio_issue_time(now);
  473. if (now <= start)
  474. return;
  475. req_time = now - start;
  476. /*
  477. * We don't want to count issue_as_root bio's in the cgroups latency
  478. * statistics as it could skew the numbers downwards.
  479. */
  480. if (unlikely(issue_as_root && iolat->rq_depth.max_depth != UINT_MAX)) {
  481. u64 sub = iolat->min_lat_nsec;
  482. if (req_time < sub)
  483. blkcg_add_delay(lat_to_blkg(iolat), now, sub - req_time);
  484. return;
  485. }
  486. latency_stat_record_time(iolat, req_time);
  487. }
  488. #define BLKIOLATENCY_MIN_ADJUST_TIME (500 * NSEC_PER_MSEC)
  489. #define BLKIOLATENCY_MIN_GOOD_SAMPLES 5
  490. static void iolatency_check_latencies(struct iolatency_grp *iolat, u64 now)
  491. {
  492. struct blkcg_gq *blkg = lat_to_blkg(iolat);
  493. struct iolatency_grp *parent;
  494. struct child_latency_info *lat_info;
  495. struct latency_stat stat;
  496. unsigned long flags;
  497. int cpu;
  498. latency_stat_init(iolat, &stat);
  499. preempt_disable();
  500. for_each_online_cpu(cpu) {
  501. struct latency_stat *s;
  502. s = per_cpu_ptr(iolat->stats, cpu);
  503. latency_stat_sum(iolat, &stat, s);
  504. latency_stat_init(iolat, s);
  505. }
  506. preempt_enable();
  507. parent = blkg_to_lat(blkg->parent);
  508. if (!parent)
  509. return;
  510. lat_info = &parent->child_lat;
  511. iolat_update_total_lat_avg(iolat, &stat);
  512. /* Everything is ok and we don't need to adjust the scale. */
  513. if (latency_sum_ok(iolat, &stat) &&
  514. atomic_read(&lat_info->scale_cookie) == DEFAULT_SCALE_COOKIE)
  515. return;
  516. /* Somebody beat us to the punch, just bail. */
  517. spin_lock_irqsave(&lat_info->lock, flags);
  518. latency_stat_sum(iolat, &iolat->cur_stat, &stat);
  519. lat_info->nr_samples -= iolat->nr_samples;
  520. lat_info->nr_samples += latency_stat_samples(iolat, &iolat->cur_stat);
  521. iolat->nr_samples = latency_stat_samples(iolat, &iolat->cur_stat);
  522. if ((lat_info->last_scale_event >= now ||
  523. now - lat_info->last_scale_event < BLKIOLATENCY_MIN_ADJUST_TIME))
  524. goto out;
  525. if (latency_sum_ok(iolat, &iolat->cur_stat) &&
  526. latency_sum_ok(iolat, &stat)) {
  527. if (latency_stat_samples(iolat, &iolat->cur_stat) <
  528. BLKIOLATENCY_MIN_GOOD_SAMPLES)
  529. goto out;
  530. if (lat_info->scale_grp == iolat) {
  531. lat_info->last_scale_event = now;
  532. scale_cookie_change(iolat->blkiolat, lat_info, true);
  533. }
  534. } else if (lat_info->scale_lat == 0 ||
  535. lat_info->scale_lat >= iolat->min_lat_nsec) {
  536. lat_info->last_scale_event = now;
  537. if (!lat_info->scale_grp ||
  538. lat_info->scale_lat > iolat->min_lat_nsec) {
  539. WRITE_ONCE(lat_info->scale_lat, iolat->min_lat_nsec);
  540. lat_info->scale_grp = iolat;
  541. }
  542. scale_cookie_change(iolat->blkiolat, lat_info, false);
  543. }
  544. latency_stat_init(iolat, &iolat->cur_stat);
  545. out:
  546. spin_unlock_irqrestore(&lat_info->lock, flags);
  547. }
  548. static void blkcg_iolatency_done_bio(struct rq_qos *rqos, struct bio *bio)
  549. {
  550. struct blkcg_gq *blkg;
  551. struct rq_wait *rqw;
  552. struct iolatency_grp *iolat;
  553. u64 window_start;
  554. u64 now = ktime_to_ns(ktime_get());
  555. bool issue_as_root = bio_issue_as_root_blkg(bio);
  556. bool enabled = false;
  557. blkg = bio->bi_blkg;
  558. if (!blkg)
  559. return;
  560. iolat = blkg_to_lat(bio->bi_blkg);
  561. if (!iolat)
  562. return;
  563. enabled = blk_iolatency_enabled(iolat->blkiolat);
  564. while (blkg && blkg->parent) {
  565. iolat = blkg_to_lat(blkg);
  566. if (!iolat) {
  567. blkg = blkg->parent;
  568. continue;
  569. }
  570. rqw = &iolat->rq_wait;
  571. atomic_dec(&rqw->inflight);
  572. if (!enabled || iolat->min_lat_nsec == 0)
  573. goto next;
  574. iolatency_record_time(iolat, &bio->bi_issue, now,
  575. issue_as_root);
  576. window_start = atomic64_read(&iolat->window_start);
  577. if (now > window_start &&
  578. (now - window_start) >= iolat->cur_win_nsec) {
  579. if (atomic64_cmpxchg(&iolat->window_start,
  580. window_start, now) == window_start)
  581. iolatency_check_latencies(iolat, now);
  582. }
  583. next:
  584. wake_up(&rqw->wait);
  585. blkg = blkg->parent;
  586. }
  587. }
  588. static void blkcg_iolatency_cleanup(struct rq_qos *rqos, struct bio *bio)
  589. {
  590. struct blkcg_gq *blkg;
  591. blkg = bio->bi_blkg;
  592. while (blkg && blkg->parent) {
  593. struct rq_wait *rqw;
  594. struct iolatency_grp *iolat;
  595. iolat = blkg_to_lat(blkg);
  596. if (!iolat)
  597. goto next;
  598. rqw = &iolat->rq_wait;
  599. atomic_dec(&rqw->inflight);
  600. wake_up(&rqw->wait);
  601. next:
  602. blkg = blkg->parent;
  603. }
  604. }
  605. static void blkcg_iolatency_exit(struct rq_qos *rqos)
  606. {
  607. struct blk_iolatency *blkiolat = BLKIOLATENCY(rqos);
  608. del_timer_sync(&blkiolat->timer);
  609. blkcg_deactivate_policy(rqos->q, &blkcg_policy_iolatency);
  610. kfree(blkiolat);
  611. }
  612. static struct rq_qos_ops blkcg_iolatency_ops = {
  613. .throttle = blkcg_iolatency_throttle,
  614. .cleanup = blkcg_iolatency_cleanup,
  615. .done_bio = blkcg_iolatency_done_bio,
  616. .exit = blkcg_iolatency_exit,
  617. };
  618. static void blkiolatency_timer_fn(struct timer_list *t)
  619. {
  620. struct blk_iolatency *blkiolat = from_timer(blkiolat, t, timer);
  621. struct blkcg_gq *blkg;
  622. struct cgroup_subsys_state *pos_css;
  623. u64 now = ktime_to_ns(ktime_get());
  624. rcu_read_lock();
  625. blkg_for_each_descendant_pre(blkg, pos_css,
  626. blkiolat->rqos.q->root_blkg) {
  627. struct iolatency_grp *iolat;
  628. struct child_latency_info *lat_info;
  629. unsigned long flags;
  630. u64 cookie;
  631. /*
  632. * We could be exiting, don't access the pd unless we have a
  633. * ref on the blkg.
  634. */
  635. if (!blkg_try_get(blkg))
  636. continue;
  637. iolat = blkg_to_lat(blkg);
  638. if (!iolat)
  639. goto next;
  640. lat_info = &iolat->child_lat;
  641. cookie = atomic_read(&lat_info->scale_cookie);
  642. if (cookie >= DEFAULT_SCALE_COOKIE)
  643. goto next;
  644. spin_lock_irqsave(&lat_info->lock, flags);
  645. if (lat_info->last_scale_event >= now)
  646. goto next_lock;
  647. /*
  648. * We scaled down but don't have a scale_grp, scale up and carry
  649. * on.
  650. */
  651. if (lat_info->scale_grp == NULL) {
  652. scale_cookie_change(iolat->blkiolat, lat_info, true);
  653. goto next_lock;
  654. }
  655. /*
  656. * It's been 5 seconds since our last scale event, clear the
  657. * scale grp in case the group that needed the scale down isn't
  658. * doing any IO currently.
  659. */
  660. if (now - lat_info->last_scale_event >=
  661. ((u64)NSEC_PER_SEC * 5))
  662. lat_info->scale_grp = NULL;
  663. next_lock:
  664. spin_unlock_irqrestore(&lat_info->lock, flags);
  665. next:
  666. blkg_put(blkg);
  667. }
  668. rcu_read_unlock();
  669. }
  670. int blk_iolatency_init(struct request_queue *q)
  671. {
  672. struct blk_iolatency *blkiolat;
  673. struct rq_qos *rqos;
  674. int ret;
  675. blkiolat = kzalloc(sizeof(*blkiolat), GFP_KERNEL);
  676. if (!blkiolat)
  677. return -ENOMEM;
  678. rqos = &blkiolat->rqos;
  679. rqos->id = RQ_QOS_CGROUP;
  680. rqos->ops = &blkcg_iolatency_ops;
  681. rqos->q = q;
  682. rq_qos_add(q, rqos);
  683. ret = blkcg_activate_policy(q, &blkcg_policy_iolatency);
  684. if (ret) {
  685. rq_qos_del(q, rqos);
  686. kfree(blkiolat);
  687. return ret;
  688. }
  689. timer_setup(&blkiolat->timer, blkiolatency_timer_fn, 0);
  690. return 0;
  691. }
  692. static void iolatency_set_min_lat_nsec(struct blkcg_gq *blkg, u64 val)
  693. {
  694. struct iolatency_grp *iolat = blkg_to_lat(blkg);
  695. struct blk_iolatency *blkiolat = iolat->blkiolat;
  696. u64 oldval = iolat->min_lat_nsec;
  697. iolat->min_lat_nsec = val;
  698. iolat->cur_win_nsec = max_t(u64, val << 4, BLKIOLATENCY_MIN_WIN_SIZE);
  699. iolat->cur_win_nsec = min_t(u64, iolat->cur_win_nsec,
  700. BLKIOLATENCY_MAX_WIN_SIZE);
  701. if (!oldval && val)
  702. atomic_inc(&blkiolat->enabled);
  703. if (oldval && !val)
  704. atomic_dec(&blkiolat->enabled);
  705. }
  706. static void iolatency_clear_scaling(struct blkcg_gq *blkg)
  707. {
  708. if (blkg->parent) {
  709. struct iolatency_grp *iolat = blkg_to_lat(blkg->parent);
  710. struct child_latency_info *lat_info;
  711. if (!iolat)
  712. return;
  713. lat_info = &iolat->child_lat;
  714. spin_lock(&lat_info->lock);
  715. atomic_set(&lat_info->scale_cookie, DEFAULT_SCALE_COOKIE);
  716. lat_info->last_scale_event = 0;
  717. lat_info->scale_grp = NULL;
  718. lat_info->scale_lat = 0;
  719. spin_unlock(&lat_info->lock);
  720. }
  721. }
  722. static ssize_t iolatency_set_limit(struct kernfs_open_file *of, char *buf,
  723. size_t nbytes, loff_t off)
  724. {
  725. struct blkcg *blkcg = css_to_blkcg(of_css(of));
  726. struct blkcg_gq *blkg;
  727. struct blkg_conf_ctx ctx;
  728. struct iolatency_grp *iolat;
  729. char *p, *tok;
  730. u64 lat_val = 0;
  731. u64 oldval;
  732. int ret;
  733. ret = blkg_conf_prep(blkcg, &blkcg_policy_iolatency, buf, &ctx);
  734. if (ret)
  735. return ret;
  736. iolat = blkg_to_lat(ctx.blkg);
  737. p = ctx.body;
  738. ret = -EINVAL;
  739. while ((tok = strsep(&p, " "))) {
  740. char key[16];
  741. char val[21]; /* 18446744073709551616 */
  742. if (sscanf(tok, "%15[^=]=%20s", key, val) != 2)
  743. goto out;
  744. if (!strcmp(key, "target")) {
  745. u64 v;
  746. if (!strcmp(val, "max"))
  747. lat_val = 0;
  748. else if (sscanf(val, "%llu", &v) == 1)
  749. lat_val = v * NSEC_PER_USEC;
  750. else
  751. goto out;
  752. } else {
  753. goto out;
  754. }
  755. }
  756. /* Walk up the tree to see if our new val is lower than it should be. */
  757. blkg = ctx.blkg;
  758. oldval = iolat->min_lat_nsec;
  759. iolatency_set_min_lat_nsec(blkg, lat_val);
  760. if (oldval != iolat->min_lat_nsec) {
  761. iolatency_clear_scaling(blkg);
  762. }
  763. ret = 0;
  764. out:
  765. blkg_conf_finish(&ctx);
  766. return ret ?: nbytes;
  767. }
  768. static u64 iolatency_prfill_limit(struct seq_file *sf,
  769. struct blkg_policy_data *pd, int off)
  770. {
  771. struct iolatency_grp *iolat = pd_to_lat(pd);
  772. const char *dname = blkg_dev_name(pd->blkg);
  773. if (!dname || !iolat->min_lat_nsec)
  774. return 0;
  775. seq_printf(sf, "%s target=%llu\n",
  776. dname, div_u64(iolat->min_lat_nsec, NSEC_PER_USEC));
  777. return 0;
  778. }
  779. static int iolatency_print_limit(struct seq_file *sf, void *v)
  780. {
  781. blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
  782. iolatency_prfill_limit,
  783. &blkcg_policy_iolatency, seq_cft(sf)->private, false);
  784. return 0;
  785. }
  786. static size_t iolatency_ssd_stat(struct iolatency_grp *iolat, char *buf,
  787. size_t size)
  788. {
  789. struct latency_stat stat;
  790. int cpu;
  791. latency_stat_init(iolat, &stat);
  792. preempt_disable();
  793. for_each_online_cpu(cpu) {
  794. struct latency_stat *s;
  795. s = per_cpu_ptr(iolat->stats, cpu);
  796. latency_stat_sum(iolat, &stat, s);
  797. }
  798. preempt_enable();
  799. if (iolat->rq_depth.max_depth == UINT_MAX)
  800. return scnprintf(buf, size, " missed=%llu total=%llu depth=max",
  801. (unsigned long long)stat.ps.missed,
  802. (unsigned long long)stat.ps.total);
  803. return scnprintf(buf, size, " missed=%llu total=%llu depth=%u",
  804. (unsigned long long)stat.ps.missed,
  805. (unsigned long long)stat.ps.total,
  806. iolat->rq_depth.max_depth);
  807. }
  808. static size_t iolatency_pd_stat(struct blkg_policy_data *pd, char *buf,
  809. size_t size)
  810. {
  811. struct iolatency_grp *iolat = pd_to_lat(pd);
  812. unsigned long long avg_lat;
  813. unsigned long long cur_win;
  814. if (iolat->ssd)
  815. return iolatency_ssd_stat(iolat, buf, size);
  816. avg_lat = div64_u64(iolat->lat_avg, NSEC_PER_USEC);
  817. cur_win = div64_u64(iolat->cur_win_nsec, NSEC_PER_MSEC);
  818. if (iolat->rq_depth.max_depth == UINT_MAX)
  819. return scnprintf(buf, size, " depth=max avg_lat=%llu win=%llu",
  820. avg_lat, cur_win);
  821. return scnprintf(buf, size, " depth=%u avg_lat=%llu win=%llu",
  822. iolat->rq_depth.max_depth, avg_lat, cur_win);
  823. }
  824. static struct blkg_policy_data *iolatency_pd_alloc(gfp_t gfp, int node)
  825. {
  826. struct iolatency_grp *iolat;
  827. iolat = kzalloc_node(sizeof(*iolat), gfp, node);
  828. if (!iolat)
  829. return NULL;
  830. iolat->stats = __alloc_percpu_gfp(sizeof(struct latency_stat),
  831. __alignof__(struct latency_stat), gfp);
  832. if (!iolat->stats) {
  833. kfree(iolat);
  834. return NULL;
  835. }
  836. return &iolat->pd;
  837. }
  838. static void iolatency_pd_init(struct blkg_policy_data *pd)
  839. {
  840. struct iolatency_grp *iolat = pd_to_lat(pd);
  841. struct blkcg_gq *blkg = lat_to_blkg(iolat);
  842. struct rq_qos *rqos = blkcg_rq_qos(blkg->q);
  843. struct blk_iolatency *blkiolat = BLKIOLATENCY(rqos);
  844. u64 now = ktime_to_ns(ktime_get());
  845. int cpu;
  846. if (blk_queue_nonrot(blkg->q))
  847. iolat->ssd = true;
  848. else
  849. iolat->ssd = false;
  850. for_each_possible_cpu(cpu) {
  851. struct latency_stat *stat;
  852. stat = per_cpu_ptr(iolat->stats, cpu);
  853. latency_stat_init(iolat, stat);
  854. }
  855. latency_stat_init(iolat, &iolat->cur_stat);
  856. rq_wait_init(&iolat->rq_wait);
  857. spin_lock_init(&iolat->child_lat.lock);
  858. iolat->rq_depth.queue_depth = blkg->q->nr_requests;
  859. iolat->rq_depth.max_depth = UINT_MAX;
  860. iolat->rq_depth.default_depth = iolat->rq_depth.queue_depth;
  861. iolat->blkiolat = blkiolat;
  862. iolat->cur_win_nsec = 100 * NSEC_PER_MSEC;
  863. atomic64_set(&iolat->window_start, now);
  864. /*
  865. * We init things in list order, so the pd for the parent may not be
  866. * init'ed yet for whatever reason.
  867. */
  868. if (blkg->parent && blkg_to_pd(blkg->parent, &blkcg_policy_iolatency)) {
  869. struct iolatency_grp *parent = blkg_to_lat(blkg->parent);
  870. atomic_set(&iolat->scale_cookie,
  871. atomic_read(&parent->child_lat.scale_cookie));
  872. } else {
  873. atomic_set(&iolat->scale_cookie, DEFAULT_SCALE_COOKIE);
  874. }
  875. atomic_set(&iolat->child_lat.scale_cookie, DEFAULT_SCALE_COOKIE);
  876. }
  877. static void iolatency_pd_offline(struct blkg_policy_data *pd)
  878. {
  879. struct iolatency_grp *iolat = pd_to_lat(pd);
  880. struct blkcg_gq *blkg = lat_to_blkg(iolat);
  881. iolatency_set_min_lat_nsec(blkg, 0);
  882. iolatency_clear_scaling(blkg);
  883. }
  884. static void iolatency_pd_free(struct blkg_policy_data *pd)
  885. {
  886. struct iolatency_grp *iolat = pd_to_lat(pd);
  887. free_percpu(iolat->stats);
  888. kfree(iolat);
  889. }
  890. static struct cftype iolatency_files[] = {
  891. {
  892. .name = "latency",
  893. .flags = CFTYPE_NOT_ON_ROOT,
  894. .seq_show = iolatency_print_limit,
  895. .write = iolatency_set_limit,
  896. },
  897. {}
  898. };
  899. static struct blkcg_policy blkcg_policy_iolatency = {
  900. .dfl_cftypes = iolatency_files,
  901. .pd_alloc_fn = iolatency_pd_alloc,
  902. .pd_init_fn = iolatency_pd_init,
  903. .pd_offline_fn = iolatency_pd_offline,
  904. .pd_free_fn = iolatency_pd_free,
  905. .pd_stat_fn = iolatency_pd_stat,
  906. };
  907. static int __init iolatency_init(void)
  908. {
  909. return blkcg_policy_register(&blkcg_policy_iolatency);
  910. }
  911. static void __exit iolatency_exit(void)
  912. {
  913. return blkcg_policy_unregister(&blkcg_policy_iolatency);
  914. }
  915. module_init(iolatency_init);
  916. module_exit(iolatency_exit);