blk-cgroup.h 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784
  1. #ifndef _BLK_CGROUP_H
  2. #define _BLK_CGROUP_H
  3. /*
  4. * Common Block IO controller cgroup interface
  5. *
  6. * Based on ideas and code from CFQ, CFS and BFQ:
  7. * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
  8. *
  9. * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
  10. * Paolo Valente <paolo.valente@unimore.it>
  11. *
  12. * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com>
  13. * Nauman Rafique <nauman@google.com>
  14. */
  15. #include <linux/cgroup.h>
  16. #include <linux/percpu_counter.h>
  17. #include <linux/seq_file.h>
  18. #include <linux/radix-tree.h>
  19. #include <linux/blkdev.h>
  20. #include <linux/atomic.h>
  21. /* percpu_counter batch for blkg_[rw]stats, per-cpu drift doesn't matter */
  22. #define BLKG_STAT_CPU_BATCH (INT_MAX / 2)
  23. /* Max limits for throttle policy */
  24. #define THROTL_IOPS_MAX UINT_MAX
  25. #ifdef CONFIG_BLK_CGROUP
  26. enum blkg_rwstat_type {
  27. BLKG_RWSTAT_READ,
  28. BLKG_RWSTAT_WRITE,
  29. BLKG_RWSTAT_SYNC,
  30. BLKG_RWSTAT_ASYNC,
  31. BLKG_RWSTAT_NR,
  32. BLKG_RWSTAT_TOTAL = BLKG_RWSTAT_NR,
  33. };
  34. struct blkcg_gq;
  35. struct blkcg {
  36. struct cgroup_subsys_state css;
  37. spinlock_t lock;
  38. struct radix_tree_root blkg_tree;
  39. struct blkcg_gq *blkg_hint;
  40. struct hlist_head blkg_list;
  41. struct blkcg_policy_data *cpd[BLKCG_MAX_POLS];
  42. struct list_head all_blkcgs_node;
  43. #ifdef CONFIG_CGROUP_WRITEBACK
  44. struct list_head cgwb_list;
  45. #endif
  46. };
  47. /*
  48. * blkg_[rw]stat->aux_cnt is excluded for local stats but included for
  49. * recursive. Used to carry stats of dead children, and, for blkg_rwstat,
  50. * to carry result values from read and sum operations.
  51. */
  52. struct blkg_stat {
  53. struct percpu_counter cpu_cnt;
  54. atomic64_t aux_cnt;
  55. };
  56. struct blkg_rwstat {
  57. struct percpu_counter cpu_cnt[BLKG_RWSTAT_NR];
  58. atomic64_t aux_cnt[BLKG_RWSTAT_NR];
  59. };
  60. /*
  61. * A blkcg_gq (blkg) is association between a block cgroup (blkcg) and a
  62. * request_queue (q). This is used by blkcg policies which need to track
  63. * information per blkcg - q pair.
  64. *
  65. * There can be multiple active blkcg policies and each blkg:policy pair is
  66. * represented by a blkg_policy_data which is allocated and freed by each
  67. * policy's pd_alloc/free_fn() methods. A policy can allocate private data
  68. * area by allocating larger data structure which embeds blkg_policy_data
  69. * at the beginning.
  70. */
  71. struct blkg_policy_data {
  72. /* the blkg and policy id this per-policy data belongs to */
  73. struct blkcg_gq *blkg;
  74. int plid;
  75. };
  76. /*
  77. * Policies that need to keep per-blkcg data which is independent from any
  78. * request_queue associated to it should implement cpd_alloc/free_fn()
  79. * methods. A policy can allocate private data area by allocating larger
  80. * data structure which embeds blkcg_policy_data at the beginning.
  81. * cpd_init() is invoked to let each policy handle per-blkcg data.
  82. */
  83. struct blkcg_policy_data {
  84. /* the blkcg and policy id this per-policy data belongs to */
  85. struct blkcg *blkcg;
  86. int plid;
  87. };
  88. /* association between a blk cgroup and a request queue */
  89. struct blkcg_gq {
  90. /* Pointer to the associated request_queue */
  91. struct request_queue *q;
  92. struct list_head q_node;
  93. struct hlist_node blkcg_node;
  94. struct blkcg *blkcg;
  95. /*
  96. * Each blkg gets congested separately and the congestion state is
  97. * propagated to the matching bdi_writeback_congested.
  98. */
  99. struct bdi_writeback_congested *wb_congested;
  100. /* all non-root blkcg_gq's are guaranteed to have access to parent */
  101. struct blkcg_gq *parent;
  102. /* request allocation list for this blkcg-q pair */
  103. struct request_list rl;
  104. /* reference count */
  105. atomic_t refcnt;
  106. /* is this blkg online? protected by both blkcg and q locks */
  107. bool online;
  108. struct blkg_rwstat stat_bytes;
  109. struct blkg_rwstat stat_ios;
  110. struct blkg_policy_data *pd[BLKCG_MAX_POLS];
  111. struct rcu_head rcu_head;
  112. };
  113. typedef struct blkcg_policy_data *(blkcg_pol_alloc_cpd_fn)(gfp_t gfp);
  114. typedef void (blkcg_pol_init_cpd_fn)(struct blkcg_policy_data *cpd);
  115. typedef void (blkcg_pol_free_cpd_fn)(struct blkcg_policy_data *cpd);
  116. typedef struct blkg_policy_data *(blkcg_pol_alloc_pd_fn)(gfp_t gfp, int node);
  117. typedef void (blkcg_pol_init_pd_fn)(struct blkg_policy_data *pd);
  118. typedef void (blkcg_pol_online_pd_fn)(struct blkg_policy_data *pd);
  119. typedef void (blkcg_pol_offline_pd_fn)(struct blkg_policy_data *pd);
  120. typedef void (blkcg_pol_free_pd_fn)(struct blkg_policy_data *pd);
  121. typedef void (blkcg_pol_reset_pd_stats_fn)(struct blkg_policy_data *pd);
  122. struct blkcg_policy {
  123. int plid;
  124. /* cgroup files for the policy */
  125. struct cftype *dfl_cftypes;
  126. struct cftype *legacy_cftypes;
  127. /* operations */
  128. blkcg_pol_alloc_cpd_fn *cpd_alloc_fn;
  129. blkcg_pol_init_cpd_fn *cpd_init_fn;
  130. blkcg_pol_free_cpd_fn *cpd_free_fn;
  131. blkcg_pol_alloc_pd_fn *pd_alloc_fn;
  132. blkcg_pol_init_pd_fn *pd_init_fn;
  133. blkcg_pol_online_pd_fn *pd_online_fn;
  134. blkcg_pol_offline_pd_fn *pd_offline_fn;
  135. blkcg_pol_free_pd_fn *pd_free_fn;
  136. blkcg_pol_reset_pd_stats_fn *pd_reset_stats_fn;
  137. };
  138. extern struct blkcg blkcg_root;
  139. extern struct cgroup_subsys_state * const blkcg_root_css;
  140. struct blkcg_gq *blkg_lookup_slowpath(struct blkcg *blkcg,
  141. struct request_queue *q, bool update_hint);
  142. struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
  143. struct request_queue *q);
  144. int blkcg_init_queue(struct request_queue *q);
  145. void blkcg_drain_queue(struct request_queue *q);
  146. void blkcg_exit_queue(struct request_queue *q);
  147. /* Blkio controller policy registration */
  148. int blkcg_policy_register(struct blkcg_policy *pol);
  149. void blkcg_policy_unregister(struct blkcg_policy *pol);
  150. int blkcg_activate_policy(struct request_queue *q,
  151. const struct blkcg_policy *pol);
  152. void blkcg_deactivate_policy(struct request_queue *q,
  153. const struct blkcg_policy *pol);
  154. const char *blkg_dev_name(struct blkcg_gq *blkg);
  155. void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg,
  156. u64 (*prfill)(struct seq_file *,
  157. struct blkg_policy_data *, int),
  158. const struct blkcg_policy *pol, int data,
  159. bool show_total);
  160. u64 __blkg_prfill_u64(struct seq_file *sf, struct blkg_policy_data *pd, u64 v);
  161. u64 __blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
  162. const struct blkg_rwstat *rwstat);
  163. u64 blkg_prfill_stat(struct seq_file *sf, struct blkg_policy_data *pd, int off);
  164. u64 blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
  165. int off);
  166. int blkg_print_stat_bytes(struct seq_file *sf, void *v);
  167. int blkg_print_stat_ios(struct seq_file *sf, void *v);
  168. int blkg_print_stat_bytes_recursive(struct seq_file *sf, void *v);
  169. int blkg_print_stat_ios_recursive(struct seq_file *sf, void *v);
  170. u64 blkg_stat_recursive_sum(struct blkcg_gq *blkg,
  171. struct blkcg_policy *pol, int off);
  172. struct blkg_rwstat blkg_rwstat_recursive_sum(struct blkcg_gq *blkg,
  173. struct blkcg_policy *pol, int off);
  174. struct blkg_conf_ctx {
  175. struct gendisk *disk;
  176. struct blkcg_gq *blkg;
  177. char *body;
  178. };
  179. int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
  180. char *input, struct blkg_conf_ctx *ctx);
  181. void blkg_conf_finish(struct blkg_conf_ctx *ctx);
  182. static inline struct blkcg *css_to_blkcg(struct cgroup_subsys_state *css)
  183. {
  184. return css ? container_of(css, struct blkcg, css) : NULL;
  185. }
  186. static inline struct blkcg *task_blkcg(struct task_struct *tsk)
  187. {
  188. return css_to_blkcg(task_css(tsk, io_cgrp_id));
  189. }
  190. static inline struct blkcg *bio_blkcg(struct bio *bio)
  191. {
  192. if (bio && bio->bi_css)
  193. return css_to_blkcg(bio->bi_css);
  194. return task_blkcg(current);
  195. }
  196. static inline struct cgroup_subsys_state *
  197. task_get_blkcg_css(struct task_struct *task)
  198. {
  199. return task_get_css(task, io_cgrp_id);
  200. }
  201. /**
  202. * blkcg_parent - get the parent of a blkcg
  203. * @blkcg: blkcg of interest
  204. *
  205. * Return the parent blkcg of @blkcg. Can be called anytime.
  206. */
  207. static inline struct blkcg *blkcg_parent(struct blkcg *blkcg)
  208. {
  209. return css_to_blkcg(blkcg->css.parent);
  210. }
  211. /**
  212. * __blkg_lookup - internal version of blkg_lookup()
  213. * @blkcg: blkcg of interest
  214. * @q: request_queue of interest
  215. * @update_hint: whether to update lookup hint with the result or not
  216. *
  217. * This is internal version and shouldn't be used by policy
  218. * implementations. Looks up blkgs for the @blkcg - @q pair regardless of
  219. * @q's bypass state. If @update_hint is %true, the caller should be
  220. * holding @q->queue_lock and lookup hint is updated on success.
  221. */
  222. static inline struct blkcg_gq *__blkg_lookup(struct blkcg *blkcg,
  223. struct request_queue *q,
  224. bool update_hint)
  225. {
  226. struct blkcg_gq *blkg;
  227. if (blkcg == &blkcg_root)
  228. return q->root_blkg;
  229. blkg = rcu_dereference(blkcg->blkg_hint);
  230. if (blkg && blkg->q == q)
  231. return blkg;
  232. return blkg_lookup_slowpath(blkcg, q, update_hint);
  233. }
  234. /**
  235. * blkg_lookup - lookup blkg for the specified blkcg - q pair
  236. * @blkcg: blkcg of interest
  237. * @q: request_queue of interest
  238. *
  239. * Lookup blkg for the @blkcg - @q pair. This function should be called
  240. * under RCU read lock and is guaranteed to return %NULL if @q is bypassing
  241. * - see blk_queue_bypass_start() for details.
  242. */
  243. static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg,
  244. struct request_queue *q)
  245. {
  246. WARN_ON_ONCE(!rcu_read_lock_held());
  247. if (unlikely(blk_queue_bypass(q)))
  248. return NULL;
  249. return __blkg_lookup(blkcg, q, false);
  250. }
  251. /**
  252. * blkg_to_pdata - get policy private data
  253. * @blkg: blkg of interest
  254. * @pol: policy of interest
  255. *
  256. * Return pointer to private data associated with the @blkg-@pol pair.
  257. */
  258. static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg,
  259. struct blkcg_policy *pol)
  260. {
  261. return blkg ? blkg->pd[pol->plid] : NULL;
  262. }
  263. static inline struct blkcg_policy_data *blkcg_to_cpd(struct blkcg *blkcg,
  264. struct blkcg_policy *pol)
  265. {
  266. return blkcg ? blkcg->cpd[pol->plid] : NULL;
  267. }
  268. /**
  269. * pdata_to_blkg - get blkg associated with policy private data
  270. * @pd: policy private data of interest
  271. *
  272. * @pd is policy private data. Determine the blkg it's associated with.
  273. */
  274. static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd)
  275. {
  276. return pd ? pd->blkg : NULL;
  277. }
  278. static inline struct blkcg *cpd_to_blkcg(struct blkcg_policy_data *cpd)
  279. {
  280. return cpd ? cpd->blkcg : NULL;
  281. }
  282. /**
  283. * blkg_path - format cgroup path of blkg
  284. * @blkg: blkg of interest
  285. * @buf: target buffer
  286. * @buflen: target buffer length
  287. *
  288. * Format the path of the cgroup of @blkg into @buf.
  289. */
  290. static inline int blkg_path(struct blkcg_gq *blkg, char *buf, int buflen)
  291. {
  292. char *p;
  293. p = cgroup_path(blkg->blkcg->css.cgroup, buf, buflen);
  294. if (!p) {
  295. strncpy(buf, "<unavailable>", buflen);
  296. return -ENAMETOOLONG;
  297. }
  298. memmove(buf, p, buf + buflen - p);
  299. return 0;
  300. }
  301. /**
  302. * blkg_get - get a blkg reference
  303. * @blkg: blkg to get
  304. *
  305. * The caller should be holding an existing reference.
  306. */
  307. static inline void blkg_get(struct blkcg_gq *blkg)
  308. {
  309. WARN_ON_ONCE(atomic_read(&blkg->refcnt) <= 0);
  310. atomic_inc(&blkg->refcnt);
  311. }
  312. void __blkg_release_rcu(struct rcu_head *rcu);
  313. /**
  314. * blkg_put - put a blkg reference
  315. * @blkg: blkg to put
  316. */
  317. static inline void blkg_put(struct blkcg_gq *blkg)
  318. {
  319. WARN_ON_ONCE(atomic_read(&blkg->refcnt) <= 0);
  320. if (atomic_dec_and_test(&blkg->refcnt))
  321. call_rcu(&blkg->rcu_head, __blkg_release_rcu);
  322. }
  323. /**
  324. * blkg_for_each_descendant_pre - pre-order walk of a blkg's descendants
  325. * @d_blkg: loop cursor pointing to the current descendant
  326. * @pos_css: used for iteration
  327. * @p_blkg: target blkg to walk descendants of
  328. *
  329. * Walk @c_blkg through the descendants of @p_blkg. Must be used with RCU
  330. * read locked. If called under either blkcg or queue lock, the iteration
  331. * is guaranteed to include all and only online blkgs. The caller may
  332. * update @pos_css by calling css_rightmost_descendant() to skip subtree.
  333. * @p_blkg is included in the iteration and the first node to be visited.
  334. */
  335. #define blkg_for_each_descendant_pre(d_blkg, pos_css, p_blkg) \
  336. css_for_each_descendant_pre((pos_css), &(p_blkg)->blkcg->css) \
  337. if (((d_blkg) = __blkg_lookup(css_to_blkcg(pos_css), \
  338. (p_blkg)->q, false)))
  339. /**
  340. * blkg_for_each_descendant_post - post-order walk of a blkg's descendants
  341. * @d_blkg: loop cursor pointing to the current descendant
  342. * @pos_css: used for iteration
  343. * @p_blkg: target blkg to walk descendants of
  344. *
  345. * Similar to blkg_for_each_descendant_pre() but performs post-order
  346. * traversal instead. Synchronization rules are the same. @p_blkg is
  347. * included in the iteration and the last node to be visited.
  348. */
  349. #define blkg_for_each_descendant_post(d_blkg, pos_css, p_blkg) \
  350. css_for_each_descendant_post((pos_css), &(p_blkg)->blkcg->css) \
  351. if (((d_blkg) = __blkg_lookup(css_to_blkcg(pos_css), \
  352. (p_blkg)->q, false)))
  353. /**
  354. * blk_get_rl - get request_list to use
  355. * @q: request_queue of interest
  356. * @bio: bio which will be attached to the allocated request (may be %NULL)
  357. *
  358. * The caller wants to allocate a request from @q to use for @bio. Find
  359. * the request_list to use and obtain a reference on it. Should be called
  360. * under queue_lock. This function is guaranteed to return non-%NULL
  361. * request_list.
  362. */
  363. static inline struct request_list *blk_get_rl(struct request_queue *q,
  364. struct bio *bio)
  365. {
  366. struct blkcg *blkcg;
  367. struct blkcg_gq *blkg;
  368. rcu_read_lock();
  369. blkcg = bio_blkcg(bio);
  370. /* bypass blkg lookup and use @q->root_rl directly for root */
  371. if (blkcg == &blkcg_root)
  372. goto root_rl;
  373. /*
  374. * Try to use blkg->rl. blkg lookup may fail under memory pressure
  375. * or if either the blkcg or queue is going away. Fall back to
  376. * root_rl in such cases.
  377. */
  378. blkg = blkg_lookup(blkcg, q);
  379. if (unlikely(!blkg))
  380. goto root_rl;
  381. blkg_get(blkg);
  382. rcu_read_unlock();
  383. return &blkg->rl;
  384. root_rl:
  385. rcu_read_unlock();
  386. return &q->root_rl;
  387. }
  388. /**
  389. * blk_put_rl - put request_list
  390. * @rl: request_list to put
  391. *
  392. * Put the reference acquired by blk_get_rl(). Should be called under
  393. * queue_lock.
  394. */
  395. static inline void blk_put_rl(struct request_list *rl)
  396. {
  397. if (rl->blkg->blkcg != &blkcg_root)
  398. blkg_put(rl->blkg);
  399. }
  400. /**
  401. * blk_rq_set_rl - associate a request with a request_list
  402. * @rq: request of interest
  403. * @rl: target request_list
  404. *
  405. * Associate @rq with @rl so that accounting and freeing can know the
  406. * request_list @rq came from.
  407. */
  408. static inline void blk_rq_set_rl(struct request *rq, struct request_list *rl)
  409. {
  410. rq->rl = rl;
  411. }
  412. /**
  413. * blk_rq_rl - return the request_list a request came from
  414. * @rq: request of interest
  415. *
  416. * Return the request_list @rq is allocated from.
  417. */
  418. static inline struct request_list *blk_rq_rl(struct request *rq)
  419. {
  420. return rq->rl;
  421. }
  422. struct request_list *__blk_queue_next_rl(struct request_list *rl,
  423. struct request_queue *q);
  424. /**
  425. * blk_queue_for_each_rl - iterate through all request_lists of a request_queue
  426. *
  427. * Should be used under queue_lock.
  428. */
  429. #define blk_queue_for_each_rl(rl, q) \
  430. for ((rl) = &(q)->root_rl; (rl); (rl) = __blk_queue_next_rl((rl), (q)))
  431. static inline int blkg_stat_init(struct blkg_stat *stat, gfp_t gfp)
  432. {
  433. int ret;
  434. ret = percpu_counter_init(&stat->cpu_cnt, 0, gfp);
  435. if (ret)
  436. return ret;
  437. atomic64_set(&stat->aux_cnt, 0);
  438. return 0;
  439. }
  440. static inline void blkg_stat_exit(struct blkg_stat *stat)
  441. {
  442. percpu_counter_destroy(&stat->cpu_cnt);
  443. }
  444. /**
  445. * blkg_stat_add - add a value to a blkg_stat
  446. * @stat: target blkg_stat
  447. * @val: value to add
  448. *
  449. * Add @val to @stat. The caller must ensure that IRQ on the same CPU
  450. * don't re-enter this function for the same counter.
  451. */
  452. static inline void blkg_stat_add(struct blkg_stat *stat, uint64_t val)
  453. {
  454. __percpu_counter_add(&stat->cpu_cnt, val, BLKG_STAT_CPU_BATCH);
  455. }
  456. /**
  457. * blkg_stat_read - read the current value of a blkg_stat
  458. * @stat: blkg_stat to read
  459. */
  460. static inline uint64_t blkg_stat_read(struct blkg_stat *stat)
  461. {
  462. return percpu_counter_sum_positive(&stat->cpu_cnt);
  463. }
  464. /**
  465. * blkg_stat_reset - reset a blkg_stat
  466. * @stat: blkg_stat to reset
  467. */
  468. static inline void blkg_stat_reset(struct blkg_stat *stat)
  469. {
  470. percpu_counter_set(&stat->cpu_cnt, 0);
  471. atomic64_set(&stat->aux_cnt, 0);
  472. }
  473. /**
  474. * blkg_stat_add_aux - add a blkg_stat into another's aux count
  475. * @to: the destination blkg_stat
  476. * @from: the source
  477. *
  478. * Add @from's count including the aux one to @to's aux count.
  479. */
  480. static inline void blkg_stat_add_aux(struct blkg_stat *to,
  481. struct blkg_stat *from)
  482. {
  483. atomic64_add(blkg_stat_read(from) + atomic64_read(&from->aux_cnt),
  484. &to->aux_cnt);
  485. }
  486. static inline int blkg_rwstat_init(struct blkg_rwstat *rwstat, gfp_t gfp)
  487. {
  488. int i, ret;
  489. for (i = 0; i < BLKG_RWSTAT_NR; i++) {
  490. ret = percpu_counter_init(&rwstat->cpu_cnt[i], 0, gfp);
  491. if (ret) {
  492. while (--i >= 0)
  493. percpu_counter_destroy(&rwstat->cpu_cnt[i]);
  494. return ret;
  495. }
  496. atomic64_set(&rwstat->aux_cnt[i], 0);
  497. }
  498. return 0;
  499. }
  500. static inline void blkg_rwstat_exit(struct blkg_rwstat *rwstat)
  501. {
  502. int i;
  503. for (i = 0; i < BLKG_RWSTAT_NR; i++)
  504. percpu_counter_destroy(&rwstat->cpu_cnt[i]);
  505. }
  506. /**
  507. * blkg_rwstat_add - add a value to a blkg_rwstat
  508. * @rwstat: target blkg_rwstat
  509. * @rw: mask of REQ_{WRITE|SYNC}
  510. * @val: value to add
  511. *
  512. * Add @val to @rwstat. The counters are chosen according to @rw. The
  513. * caller is responsible for synchronizing calls to this function.
  514. */
  515. static inline void blkg_rwstat_add(struct blkg_rwstat *rwstat,
  516. int rw, uint64_t val)
  517. {
  518. struct percpu_counter *cnt;
  519. if (rw & REQ_WRITE)
  520. cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_WRITE];
  521. else
  522. cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_READ];
  523. __percpu_counter_add(cnt, val, BLKG_STAT_CPU_BATCH);
  524. if (rw & REQ_SYNC)
  525. cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_SYNC];
  526. else
  527. cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_ASYNC];
  528. __percpu_counter_add(cnt, val, BLKG_STAT_CPU_BATCH);
  529. }
  530. /**
  531. * blkg_rwstat_read - read the current values of a blkg_rwstat
  532. * @rwstat: blkg_rwstat to read
  533. *
  534. * Read the current snapshot of @rwstat and return it in the aux counts.
  535. */
  536. static inline struct blkg_rwstat blkg_rwstat_read(struct blkg_rwstat *rwstat)
  537. {
  538. struct blkg_rwstat result;
  539. int i;
  540. for (i = 0; i < BLKG_RWSTAT_NR; i++)
  541. atomic64_set(&result.aux_cnt[i],
  542. percpu_counter_sum_positive(&rwstat->cpu_cnt[i]));
  543. return result;
  544. }
  545. /**
  546. * blkg_rwstat_total - read the total count of a blkg_rwstat
  547. * @rwstat: blkg_rwstat to read
  548. *
  549. * Return the total count of @rwstat regardless of the IO direction. This
  550. * function can be called without synchronization and takes care of u64
  551. * atomicity.
  552. */
  553. static inline uint64_t blkg_rwstat_total(struct blkg_rwstat *rwstat)
  554. {
  555. struct blkg_rwstat tmp = blkg_rwstat_read(rwstat);
  556. return atomic64_read(&tmp.aux_cnt[BLKG_RWSTAT_READ]) +
  557. atomic64_read(&tmp.aux_cnt[BLKG_RWSTAT_WRITE]);
  558. }
  559. /**
  560. * blkg_rwstat_reset - reset a blkg_rwstat
  561. * @rwstat: blkg_rwstat to reset
  562. */
  563. static inline void blkg_rwstat_reset(struct blkg_rwstat *rwstat)
  564. {
  565. int i;
  566. for (i = 0; i < BLKG_RWSTAT_NR; i++) {
  567. percpu_counter_set(&rwstat->cpu_cnt[i], 0);
  568. atomic64_set(&rwstat->aux_cnt[i], 0);
  569. }
  570. }
  571. /**
  572. * blkg_rwstat_add_aux - add a blkg_rwstat into another's aux count
  573. * @to: the destination blkg_rwstat
  574. * @from: the source
  575. *
  576. * Add @from's count including the aux one to @to's aux count.
  577. */
  578. static inline void blkg_rwstat_add_aux(struct blkg_rwstat *to,
  579. struct blkg_rwstat *from)
  580. {
  581. struct blkg_rwstat v = blkg_rwstat_read(from);
  582. int i;
  583. for (i = 0; i < BLKG_RWSTAT_NR; i++)
  584. atomic64_add(atomic64_read(&v.aux_cnt[i]) +
  585. atomic64_read(&from->aux_cnt[i]),
  586. &to->aux_cnt[i]);
  587. }
  588. #ifdef CONFIG_BLK_DEV_THROTTLING
  589. extern bool blk_throtl_bio(struct request_queue *q, struct blkcg_gq *blkg,
  590. struct bio *bio);
  591. #else
  592. static inline bool blk_throtl_bio(struct request_queue *q, struct blkcg_gq *blkg,
  593. struct bio *bio) { return false; }
  594. #endif
  595. static inline bool blkcg_bio_issue_check(struct request_queue *q,
  596. struct bio *bio)
  597. {
  598. struct blkcg *blkcg;
  599. struct blkcg_gq *blkg;
  600. bool throtl = false;
  601. rcu_read_lock();
  602. blkcg = bio_blkcg(bio);
  603. blkg = blkg_lookup(blkcg, q);
  604. if (unlikely(!blkg)) {
  605. spin_lock_irq(q->queue_lock);
  606. blkg = blkg_lookup_create(blkcg, q);
  607. if (IS_ERR(blkg))
  608. blkg = NULL;
  609. spin_unlock_irq(q->queue_lock);
  610. }
  611. throtl = blk_throtl_bio(q, blkg, bio);
  612. if (!throtl) {
  613. blkg = blkg ?: q->root_blkg;
  614. blkg_rwstat_add(&blkg->stat_bytes, bio->bi_flags,
  615. bio->bi_iter.bi_size);
  616. blkg_rwstat_add(&blkg->stat_ios, bio->bi_flags, 1);
  617. }
  618. rcu_read_unlock();
  619. return !throtl;
  620. }
  621. #else /* CONFIG_BLK_CGROUP */
  622. struct blkcg {
  623. };
  624. struct blkg_policy_data {
  625. };
  626. struct blkcg_policy_data {
  627. };
  628. struct blkcg_gq {
  629. };
  630. struct blkcg_policy {
  631. };
  632. #define blkcg_root_css ((struct cgroup_subsys_state *)ERR_PTR(-EINVAL))
  633. static inline struct cgroup_subsys_state *
  634. task_get_blkcg_css(struct task_struct *task)
  635. {
  636. return NULL;
  637. }
  638. #ifdef CONFIG_BLOCK
  639. static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, void *key) { return NULL; }
  640. static inline int blkcg_init_queue(struct request_queue *q) { return 0; }
  641. static inline void blkcg_drain_queue(struct request_queue *q) { }
  642. static inline void blkcg_exit_queue(struct request_queue *q) { }
  643. static inline int blkcg_policy_register(struct blkcg_policy *pol) { return 0; }
  644. static inline void blkcg_policy_unregister(struct blkcg_policy *pol) { }
  645. static inline int blkcg_activate_policy(struct request_queue *q,
  646. const struct blkcg_policy *pol) { return 0; }
  647. static inline void blkcg_deactivate_policy(struct request_queue *q,
  648. const struct blkcg_policy *pol) { }
  649. static inline struct blkcg *bio_blkcg(struct bio *bio) { return NULL; }
  650. static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg,
  651. struct blkcg_policy *pol) { return NULL; }
  652. static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd) { return NULL; }
  653. static inline char *blkg_path(struct blkcg_gq *blkg) { return NULL; }
  654. static inline void blkg_get(struct blkcg_gq *blkg) { }
  655. static inline void blkg_put(struct blkcg_gq *blkg) { }
  656. static inline struct request_list *blk_get_rl(struct request_queue *q,
  657. struct bio *bio) { return &q->root_rl; }
  658. static inline void blk_put_rl(struct request_list *rl) { }
  659. static inline void blk_rq_set_rl(struct request *rq, struct request_list *rl) { }
  660. static inline struct request_list *blk_rq_rl(struct request *rq) { return &rq->q->root_rl; }
  661. static inline bool blkcg_bio_issue_check(struct request_queue *q,
  662. struct bio *bio) { return true; }
  663. #define blk_queue_for_each_rl(rl, q) \
  664. for ((rl) = &(q)->root_rl; (rl); (rl) = NULL)
  665. #endif /* CONFIG_BLOCK */
  666. #endif /* CONFIG_BLK_CGROUP */
  667. #endif /* _BLK_CGROUP_H */