blk-cgroup.h 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786
  1. #ifndef _BLK_CGROUP_H
  2. #define _BLK_CGROUP_H
  3. /*
  4. * Common Block IO controller cgroup interface
  5. *
  6. * Based on ideas and code from CFQ, CFS and BFQ:
  7. * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
  8. *
  9. * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
  10. * Paolo Valente <paolo.valente@unimore.it>
  11. *
  12. * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com>
  13. * Nauman Rafique <nauman@google.com>
  14. */
  15. #include <linux/cgroup.h>
  16. #include <linux/percpu_counter.h>
  17. #include <linux/seq_file.h>
  18. #include <linux/radix-tree.h>
  19. #include <linux/blkdev.h>
  20. #include <linux/atomic.h>
  21. /* percpu_counter batch for blkg_[rw]stats, per-cpu drift doesn't matter */
  22. #define BLKG_STAT_CPU_BATCH (INT_MAX / 2)
  23. /* Max limits for throttle policy */
  24. #define THROTL_IOPS_MAX UINT_MAX
  25. #ifdef CONFIG_BLK_CGROUP
  26. enum blkg_rwstat_type {
  27. BLKG_RWSTAT_READ,
  28. BLKG_RWSTAT_WRITE,
  29. BLKG_RWSTAT_SYNC,
  30. BLKG_RWSTAT_ASYNC,
  31. BLKG_RWSTAT_NR,
  32. BLKG_RWSTAT_TOTAL = BLKG_RWSTAT_NR,
  33. };
  34. struct blkcg_gq;
  35. struct blkcg {
  36. struct cgroup_subsys_state css;
  37. spinlock_t lock;
  38. struct radix_tree_root blkg_tree;
  39. struct blkcg_gq *blkg_hint;
  40. struct hlist_head blkg_list;
  41. struct blkcg_policy_data *cpd[BLKCG_MAX_POLS];
  42. struct list_head all_blkcgs_node;
  43. #ifdef CONFIG_CGROUP_WRITEBACK
  44. struct list_head cgwb_list;
  45. #endif
  46. };
  47. /*
  48. * blkg_[rw]stat->aux_cnt is excluded for local stats but included for
  49. * recursive. Used to carry stats of dead children, and, for blkg_rwstat,
  50. * to carry result values from read and sum operations.
  51. */
  52. struct blkg_stat {
  53. struct percpu_counter cpu_cnt;
  54. atomic64_t aux_cnt;
  55. };
  56. struct blkg_rwstat {
  57. struct percpu_counter cpu_cnt[BLKG_RWSTAT_NR];
  58. atomic64_t aux_cnt[BLKG_RWSTAT_NR];
  59. };
  60. /*
  61. * A blkcg_gq (blkg) is association between a block cgroup (blkcg) and a
  62. * request_queue (q). This is used by blkcg policies which need to track
  63. * information per blkcg - q pair.
  64. *
  65. * There can be multiple active blkcg policies and each blkg:policy pair is
  66. * represented by a blkg_policy_data which is allocated and freed by each
  67. * policy's pd_alloc/free_fn() methods. A policy can allocate private data
  68. * area by allocating larger data structure which embeds blkg_policy_data
  69. * at the beginning.
  70. */
  71. struct blkg_policy_data {
  72. /* the blkg and policy id this per-policy data belongs to */
  73. struct blkcg_gq *blkg;
  74. int plid;
  75. };
  76. /*
  77. * Policies that need to keep per-blkcg data which is independent from any
  78. * request_queue associated to it should implement cpd_alloc/free_fn()
  79. * methods. A policy can allocate private data area by allocating larger
  80. * data structure which embeds blkcg_policy_data at the beginning.
  81. * cpd_init() is invoked to let each policy handle per-blkcg data.
  82. */
  83. struct blkcg_policy_data {
  84. /* the blkcg and policy id this per-policy data belongs to */
  85. struct blkcg *blkcg;
  86. int plid;
  87. };
  88. /* association between a blk cgroup and a request queue */
  89. struct blkcg_gq {
  90. /* Pointer to the associated request_queue */
  91. struct request_queue *q;
  92. struct list_head q_node;
  93. struct hlist_node blkcg_node;
  94. struct blkcg *blkcg;
  95. /*
  96. * Each blkg gets congested separately and the congestion state is
  97. * propagated to the matching bdi_writeback_congested.
  98. */
  99. struct bdi_writeback_congested *wb_congested;
  100. /* all non-root blkcg_gq's are guaranteed to have access to parent */
  101. struct blkcg_gq *parent;
  102. /* request allocation list for this blkcg-q pair */
  103. struct request_list rl;
  104. /* reference count */
  105. atomic_t refcnt;
  106. /* is this blkg online? protected by both blkcg and q locks */
  107. bool online;
  108. struct blkg_rwstat stat_bytes;
  109. struct blkg_rwstat stat_ios;
  110. struct blkg_policy_data *pd[BLKCG_MAX_POLS];
  111. struct rcu_head rcu_head;
  112. };
  113. typedef struct blkcg_policy_data *(blkcg_pol_alloc_cpd_fn)(gfp_t gfp);
  114. typedef void (blkcg_pol_init_cpd_fn)(struct blkcg_policy_data *cpd);
  115. typedef void (blkcg_pol_free_cpd_fn)(struct blkcg_policy_data *cpd);
  116. typedef void (blkcg_pol_bind_cpd_fn)(struct blkcg_policy_data *cpd);
  117. typedef struct blkg_policy_data *(blkcg_pol_alloc_pd_fn)(gfp_t gfp, int node);
  118. typedef void (blkcg_pol_init_pd_fn)(struct blkg_policy_data *pd);
  119. typedef void (blkcg_pol_online_pd_fn)(struct blkg_policy_data *pd);
  120. typedef void (blkcg_pol_offline_pd_fn)(struct blkg_policy_data *pd);
  121. typedef void (blkcg_pol_free_pd_fn)(struct blkg_policy_data *pd);
  122. typedef void (blkcg_pol_reset_pd_stats_fn)(struct blkg_policy_data *pd);
  123. struct blkcg_policy {
  124. int plid;
  125. /* cgroup files for the policy */
  126. struct cftype *dfl_cftypes;
  127. struct cftype *legacy_cftypes;
  128. /* operations */
  129. blkcg_pol_alloc_cpd_fn *cpd_alloc_fn;
  130. blkcg_pol_init_cpd_fn *cpd_init_fn;
  131. blkcg_pol_free_cpd_fn *cpd_free_fn;
  132. blkcg_pol_bind_cpd_fn *cpd_bind_fn;
  133. blkcg_pol_alloc_pd_fn *pd_alloc_fn;
  134. blkcg_pol_init_pd_fn *pd_init_fn;
  135. blkcg_pol_online_pd_fn *pd_online_fn;
  136. blkcg_pol_offline_pd_fn *pd_offline_fn;
  137. blkcg_pol_free_pd_fn *pd_free_fn;
  138. blkcg_pol_reset_pd_stats_fn *pd_reset_stats_fn;
  139. };
  140. extern struct blkcg blkcg_root;
  141. extern struct cgroup_subsys_state * const blkcg_root_css;
  142. struct blkcg_gq *blkg_lookup_slowpath(struct blkcg *blkcg,
  143. struct request_queue *q, bool update_hint);
  144. struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
  145. struct request_queue *q);
  146. int blkcg_init_queue(struct request_queue *q);
  147. void blkcg_drain_queue(struct request_queue *q);
  148. void blkcg_exit_queue(struct request_queue *q);
  149. /* Blkio controller policy registration */
  150. int blkcg_policy_register(struct blkcg_policy *pol);
  151. void blkcg_policy_unregister(struct blkcg_policy *pol);
  152. int blkcg_activate_policy(struct request_queue *q,
  153. const struct blkcg_policy *pol);
  154. void blkcg_deactivate_policy(struct request_queue *q,
  155. const struct blkcg_policy *pol);
  156. const char *blkg_dev_name(struct blkcg_gq *blkg);
  157. void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg,
  158. u64 (*prfill)(struct seq_file *,
  159. struct blkg_policy_data *, int),
  160. const struct blkcg_policy *pol, int data,
  161. bool show_total);
  162. u64 __blkg_prfill_u64(struct seq_file *sf, struct blkg_policy_data *pd, u64 v);
  163. u64 __blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
  164. const struct blkg_rwstat *rwstat);
  165. u64 blkg_prfill_stat(struct seq_file *sf, struct blkg_policy_data *pd, int off);
  166. u64 blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
  167. int off);
  168. int blkg_print_stat_bytes(struct seq_file *sf, void *v);
  169. int blkg_print_stat_ios(struct seq_file *sf, void *v);
  170. int blkg_print_stat_bytes_recursive(struct seq_file *sf, void *v);
  171. int blkg_print_stat_ios_recursive(struct seq_file *sf, void *v);
  172. u64 blkg_stat_recursive_sum(struct blkcg_gq *blkg,
  173. struct blkcg_policy *pol, int off);
  174. struct blkg_rwstat blkg_rwstat_recursive_sum(struct blkcg_gq *blkg,
  175. struct blkcg_policy *pol, int off);
  176. struct blkg_conf_ctx {
  177. struct gendisk *disk;
  178. struct blkcg_gq *blkg;
  179. char *body;
  180. };
  181. int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
  182. char *input, struct blkg_conf_ctx *ctx);
  183. void blkg_conf_finish(struct blkg_conf_ctx *ctx);
  184. static inline struct blkcg *css_to_blkcg(struct cgroup_subsys_state *css)
  185. {
  186. return css ? container_of(css, struct blkcg, css) : NULL;
  187. }
  188. static inline struct blkcg *task_blkcg(struct task_struct *tsk)
  189. {
  190. return css_to_blkcg(task_css(tsk, io_cgrp_id));
  191. }
  192. static inline struct blkcg *bio_blkcg(struct bio *bio)
  193. {
  194. if (bio && bio->bi_css)
  195. return css_to_blkcg(bio->bi_css);
  196. return task_blkcg(current);
  197. }
  198. static inline struct cgroup_subsys_state *
  199. task_get_blkcg_css(struct task_struct *task)
  200. {
  201. return task_get_css(task, io_cgrp_id);
  202. }
  203. /**
  204. * blkcg_parent - get the parent of a blkcg
  205. * @blkcg: blkcg of interest
  206. *
  207. * Return the parent blkcg of @blkcg. Can be called anytime.
  208. */
  209. static inline struct blkcg *blkcg_parent(struct blkcg *blkcg)
  210. {
  211. return css_to_blkcg(blkcg->css.parent);
  212. }
  213. /**
  214. * __blkg_lookup - internal version of blkg_lookup()
  215. * @blkcg: blkcg of interest
  216. * @q: request_queue of interest
  217. * @update_hint: whether to update lookup hint with the result or not
  218. *
  219. * This is internal version and shouldn't be used by policy
  220. * implementations. Looks up blkgs for the @blkcg - @q pair regardless of
  221. * @q's bypass state. If @update_hint is %true, the caller should be
  222. * holding @q->queue_lock and lookup hint is updated on success.
  223. */
  224. static inline struct blkcg_gq *__blkg_lookup(struct blkcg *blkcg,
  225. struct request_queue *q,
  226. bool update_hint)
  227. {
  228. struct blkcg_gq *blkg;
  229. if (blkcg == &blkcg_root)
  230. return q->root_blkg;
  231. blkg = rcu_dereference(blkcg->blkg_hint);
  232. if (blkg && blkg->q == q)
  233. return blkg;
  234. return blkg_lookup_slowpath(blkcg, q, update_hint);
  235. }
  236. /**
  237. * blkg_lookup - lookup blkg for the specified blkcg - q pair
  238. * @blkcg: blkcg of interest
  239. * @q: request_queue of interest
  240. *
  241. * Lookup blkg for the @blkcg - @q pair. This function should be called
  242. * under RCU read lock and is guaranteed to return %NULL if @q is bypassing
  243. * - see blk_queue_bypass_start() for details.
  244. */
  245. static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg,
  246. struct request_queue *q)
  247. {
  248. WARN_ON_ONCE(!rcu_read_lock_held());
  249. if (unlikely(blk_queue_bypass(q)))
  250. return NULL;
  251. return __blkg_lookup(blkcg, q, false);
  252. }
  253. /**
  254. * blkg_to_pdata - get policy private data
  255. * @blkg: blkg of interest
  256. * @pol: policy of interest
  257. *
  258. * Return pointer to private data associated with the @blkg-@pol pair.
  259. */
  260. static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg,
  261. struct blkcg_policy *pol)
  262. {
  263. return blkg ? blkg->pd[pol->plid] : NULL;
  264. }
  265. static inline struct blkcg_policy_data *blkcg_to_cpd(struct blkcg *blkcg,
  266. struct blkcg_policy *pol)
  267. {
  268. return blkcg ? blkcg->cpd[pol->plid] : NULL;
  269. }
  270. /**
  271. * pdata_to_blkg - get blkg associated with policy private data
  272. * @pd: policy private data of interest
  273. *
  274. * @pd is policy private data. Determine the blkg it's associated with.
  275. */
  276. static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd)
  277. {
  278. return pd ? pd->blkg : NULL;
  279. }
  280. static inline struct blkcg *cpd_to_blkcg(struct blkcg_policy_data *cpd)
  281. {
  282. return cpd ? cpd->blkcg : NULL;
  283. }
  284. /**
  285. * blkg_path - format cgroup path of blkg
  286. * @blkg: blkg of interest
  287. * @buf: target buffer
  288. * @buflen: target buffer length
  289. *
  290. * Format the path of the cgroup of @blkg into @buf.
  291. */
  292. static inline int blkg_path(struct blkcg_gq *blkg, char *buf, int buflen)
  293. {
  294. char *p;
  295. p = cgroup_path(blkg->blkcg->css.cgroup, buf, buflen);
  296. if (!p) {
  297. strncpy(buf, "<unavailable>", buflen);
  298. return -ENAMETOOLONG;
  299. }
  300. memmove(buf, p, buf + buflen - p);
  301. return 0;
  302. }
  303. /**
  304. * blkg_get - get a blkg reference
  305. * @blkg: blkg to get
  306. *
  307. * The caller should be holding an existing reference.
  308. */
  309. static inline void blkg_get(struct blkcg_gq *blkg)
  310. {
  311. WARN_ON_ONCE(atomic_read(&blkg->refcnt) <= 0);
  312. atomic_inc(&blkg->refcnt);
  313. }
  314. void __blkg_release_rcu(struct rcu_head *rcu);
  315. /**
  316. * blkg_put - put a blkg reference
  317. * @blkg: blkg to put
  318. */
  319. static inline void blkg_put(struct blkcg_gq *blkg)
  320. {
  321. WARN_ON_ONCE(atomic_read(&blkg->refcnt) <= 0);
  322. if (atomic_dec_and_test(&blkg->refcnt))
  323. call_rcu(&blkg->rcu_head, __blkg_release_rcu);
  324. }
  325. /**
  326. * blkg_for_each_descendant_pre - pre-order walk of a blkg's descendants
  327. * @d_blkg: loop cursor pointing to the current descendant
  328. * @pos_css: used for iteration
  329. * @p_blkg: target blkg to walk descendants of
  330. *
  331. * Walk @c_blkg through the descendants of @p_blkg. Must be used with RCU
  332. * read locked. If called under either blkcg or queue lock, the iteration
  333. * is guaranteed to include all and only online blkgs. The caller may
  334. * update @pos_css by calling css_rightmost_descendant() to skip subtree.
  335. * @p_blkg is included in the iteration and the first node to be visited.
  336. */
  337. #define blkg_for_each_descendant_pre(d_blkg, pos_css, p_blkg) \
  338. css_for_each_descendant_pre((pos_css), &(p_blkg)->blkcg->css) \
  339. if (((d_blkg) = __blkg_lookup(css_to_blkcg(pos_css), \
  340. (p_blkg)->q, false)))
  341. /**
  342. * blkg_for_each_descendant_post - post-order walk of a blkg's descendants
  343. * @d_blkg: loop cursor pointing to the current descendant
  344. * @pos_css: used for iteration
  345. * @p_blkg: target blkg to walk descendants of
  346. *
  347. * Similar to blkg_for_each_descendant_pre() but performs post-order
  348. * traversal instead. Synchronization rules are the same. @p_blkg is
  349. * included in the iteration and the last node to be visited.
  350. */
  351. #define blkg_for_each_descendant_post(d_blkg, pos_css, p_blkg) \
  352. css_for_each_descendant_post((pos_css), &(p_blkg)->blkcg->css) \
  353. if (((d_blkg) = __blkg_lookup(css_to_blkcg(pos_css), \
  354. (p_blkg)->q, false)))
  355. /**
  356. * blk_get_rl - get request_list to use
  357. * @q: request_queue of interest
  358. * @bio: bio which will be attached to the allocated request (may be %NULL)
  359. *
  360. * The caller wants to allocate a request from @q to use for @bio. Find
  361. * the request_list to use and obtain a reference on it. Should be called
  362. * under queue_lock. This function is guaranteed to return non-%NULL
  363. * request_list.
  364. */
  365. static inline struct request_list *blk_get_rl(struct request_queue *q,
  366. struct bio *bio)
  367. {
  368. struct blkcg *blkcg;
  369. struct blkcg_gq *blkg;
  370. rcu_read_lock();
  371. blkcg = bio_blkcg(bio);
  372. /* bypass blkg lookup and use @q->root_rl directly for root */
  373. if (blkcg == &blkcg_root)
  374. goto root_rl;
  375. /*
  376. * Try to use blkg->rl. blkg lookup may fail under memory pressure
  377. * or if either the blkcg or queue is going away. Fall back to
  378. * root_rl in such cases.
  379. */
  380. blkg = blkg_lookup(blkcg, q);
  381. if (unlikely(!blkg))
  382. goto root_rl;
  383. blkg_get(blkg);
  384. rcu_read_unlock();
  385. return &blkg->rl;
  386. root_rl:
  387. rcu_read_unlock();
  388. return &q->root_rl;
  389. }
  390. /**
  391. * blk_put_rl - put request_list
  392. * @rl: request_list to put
  393. *
  394. * Put the reference acquired by blk_get_rl(). Should be called under
  395. * queue_lock.
  396. */
  397. static inline void blk_put_rl(struct request_list *rl)
  398. {
  399. if (rl->blkg->blkcg != &blkcg_root)
  400. blkg_put(rl->blkg);
  401. }
  402. /**
  403. * blk_rq_set_rl - associate a request with a request_list
  404. * @rq: request of interest
  405. * @rl: target request_list
  406. *
  407. * Associate @rq with @rl so that accounting and freeing can know the
  408. * request_list @rq came from.
  409. */
  410. static inline void blk_rq_set_rl(struct request *rq, struct request_list *rl)
  411. {
  412. rq->rl = rl;
  413. }
  414. /**
  415. * blk_rq_rl - return the request_list a request came from
  416. * @rq: request of interest
  417. *
  418. * Return the request_list @rq is allocated from.
  419. */
  420. static inline struct request_list *blk_rq_rl(struct request *rq)
  421. {
  422. return rq->rl;
  423. }
  424. struct request_list *__blk_queue_next_rl(struct request_list *rl,
  425. struct request_queue *q);
  426. /**
  427. * blk_queue_for_each_rl - iterate through all request_lists of a request_queue
  428. *
  429. * Should be used under queue_lock.
  430. */
  431. #define blk_queue_for_each_rl(rl, q) \
  432. for ((rl) = &(q)->root_rl; (rl); (rl) = __blk_queue_next_rl((rl), (q)))
  433. static inline int blkg_stat_init(struct blkg_stat *stat, gfp_t gfp)
  434. {
  435. int ret;
  436. ret = percpu_counter_init(&stat->cpu_cnt, 0, gfp);
  437. if (ret)
  438. return ret;
  439. atomic64_set(&stat->aux_cnt, 0);
  440. return 0;
  441. }
  442. static inline void blkg_stat_exit(struct blkg_stat *stat)
  443. {
  444. percpu_counter_destroy(&stat->cpu_cnt);
  445. }
  446. /**
  447. * blkg_stat_add - add a value to a blkg_stat
  448. * @stat: target blkg_stat
  449. * @val: value to add
  450. *
  451. * Add @val to @stat. The caller must ensure that IRQ on the same CPU
  452. * don't re-enter this function for the same counter.
  453. */
  454. static inline void blkg_stat_add(struct blkg_stat *stat, uint64_t val)
  455. {
  456. __percpu_counter_add(&stat->cpu_cnt, val, BLKG_STAT_CPU_BATCH);
  457. }
  458. /**
  459. * blkg_stat_read - read the current value of a blkg_stat
  460. * @stat: blkg_stat to read
  461. */
  462. static inline uint64_t blkg_stat_read(struct blkg_stat *stat)
  463. {
  464. return percpu_counter_sum_positive(&stat->cpu_cnt);
  465. }
  466. /**
  467. * blkg_stat_reset - reset a blkg_stat
  468. * @stat: blkg_stat to reset
  469. */
  470. static inline void blkg_stat_reset(struct blkg_stat *stat)
  471. {
  472. percpu_counter_set(&stat->cpu_cnt, 0);
  473. atomic64_set(&stat->aux_cnt, 0);
  474. }
  475. /**
  476. * blkg_stat_add_aux - add a blkg_stat into another's aux count
  477. * @to: the destination blkg_stat
  478. * @from: the source
  479. *
  480. * Add @from's count including the aux one to @to's aux count.
  481. */
  482. static inline void blkg_stat_add_aux(struct blkg_stat *to,
  483. struct blkg_stat *from)
  484. {
  485. atomic64_add(blkg_stat_read(from) + atomic64_read(&from->aux_cnt),
  486. &to->aux_cnt);
  487. }
  488. static inline int blkg_rwstat_init(struct blkg_rwstat *rwstat, gfp_t gfp)
  489. {
  490. int i, ret;
  491. for (i = 0; i < BLKG_RWSTAT_NR; i++) {
  492. ret = percpu_counter_init(&rwstat->cpu_cnt[i], 0, gfp);
  493. if (ret) {
  494. while (--i >= 0)
  495. percpu_counter_destroy(&rwstat->cpu_cnt[i]);
  496. return ret;
  497. }
  498. atomic64_set(&rwstat->aux_cnt[i], 0);
  499. }
  500. return 0;
  501. }
  502. static inline void blkg_rwstat_exit(struct blkg_rwstat *rwstat)
  503. {
  504. int i;
  505. for (i = 0; i < BLKG_RWSTAT_NR; i++)
  506. percpu_counter_destroy(&rwstat->cpu_cnt[i]);
  507. }
  508. /**
  509. * blkg_rwstat_add - add a value to a blkg_rwstat
  510. * @rwstat: target blkg_rwstat
  511. * @rw: mask of REQ_{WRITE|SYNC}
  512. * @val: value to add
  513. *
  514. * Add @val to @rwstat. The counters are chosen according to @rw. The
  515. * caller is responsible for synchronizing calls to this function.
  516. */
  517. static inline void blkg_rwstat_add(struct blkg_rwstat *rwstat,
  518. int rw, uint64_t val)
  519. {
  520. struct percpu_counter *cnt;
  521. if (rw & REQ_WRITE)
  522. cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_WRITE];
  523. else
  524. cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_READ];
  525. __percpu_counter_add(cnt, val, BLKG_STAT_CPU_BATCH);
  526. if (rw & REQ_SYNC)
  527. cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_SYNC];
  528. else
  529. cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_ASYNC];
  530. __percpu_counter_add(cnt, val, BLKG_STAT_CPU_BATCH);
  531. }
  532. /**
  533. * blkg_rwstat_read - read the current values of a blkg_rwstat
  534. * @rwstat: blkg_rwstat to read
  535. *
  536. * Read the current snapshot of @rwstat and return it in the aux counts.
  537. */
  538. static inline struct blkg_rwstat blkg_rwstat_read(struct blkg_rwstat *rwstat)
  539. {
  540. struct blkg_rwstat result;
  541. int i;
  542. for (i = 0; i < BLKG_RWSTAT_NR; i++)
  543. atomic64_set(&result.aux_cnt[i],
  544. percpu_counter_sum_positive(&rwstat->cpu_cnt[i]));
  545. return result;
  546. }
  547. /**
  548. * blkg_rwstat_total - read the total count of a blkg_rwstat
  549. * @rwstat: blkg_rwstat to read
  550. *
  551. * Return the total count of @rwstat regardless of the IO direction. This
  552. * function can be called without synchronization and takes care of u64
  553. * atomicity.
  554. */
  555. static inline uint64_t blkg_rwstat_total(struct blkg_rwstat *rwstat)
  556. {
  557. struct blkg_rwstat tmp = blkg_rwstat_read(rwstat);
  558. return atomic64_read(&tmp.aux_cnt[BLKG_RWSTAT_READ]) +
  559. atomic64_read(&tmp.aux_cnt[BLKG_RWSTAT_WRITE]);
  560. }
  561. /**
  562. * blkg_rwstat_reset - reset a blkg_rwstat
  563. * @rwstat: blkg_rwstat to reset
  564. */
  565. static inline void blkg_rwstat_reset(struct blkg_rwstat *rwstat)
  566. {
  567. int i;
  568. for (i = 0; i < BLKG_RWSTAT_NR; i++) {
  569. percpu_counter_set(&rwstat->cpu_cnt[i], 0);
  570. atomic64_set(&rwstat->aux_cnt[i], 0);
  571. }
  572. }
  573. /**
  574. * blkg_rwstat_add_aux - add a blkg_rwstat into another's aux count
  575. * @to: the destination blkg_rwstat
  576. * @from: the source
  577. *
  578. * Add @from's count including the aux one to @to's aux count.
  579. */
  580. static inline void blkg_rwstat_add_aux(struct blkg_rwstat *to,
  581. struct blkg_rwstat *from)
  582. {
  583. struct blkg_rwstat v = blkg_rwstat_read(from);
  584. int i;
  585. for (i = 0; i < BLKG_RWSTAT_NR; i++)
  586. atomic64_add(atomic64_read(&v.aux_cnt[i]) +
  587. atomic64_read(&from->aux_cnt[i]),
  588. &to->aux_cnt[i]);
  589. }
  590. #ifdef CONFIG_BLK_DEV_THROTTLING
  591. extern bool blk_throtl_bio(struct request_queue *q, struct blkcg_gq *blkg,
  592. struct bio *bio);
  593. #else
  594. static inline bool blk_throtl_bio(struct request_queue *q, struct blkcg_gq *blkg,
  595. struct bio *bio) { return false; }
  596. #endif
  597. static inline bool blkcg_bio_issue_check(struct request_queue *q,
  598. struct bio *bio)
  599. {
  600. struct blkcg *blkcg;
  601. struct blkcg_gq *blkg;
  602. bool throtl = false;
  603. rcu_read_lock();
  604. blkcg = bio_blkcg(bio);
  605. blkg = blkg_lookup(blkcg, q);
  606. if (unlikely(!blkg)) {
  607. spin_lock_irq(q->queue_lock);
  608. blkg = blkg_lookup_create(blkcg, q);
  609. if (IS_ERR(blkg))
  610. blkg = NULL;
  611. spin_unlock_irq(q->queue_lock);
  612. }
  613. throtl = blk_throtl_bio(q, blkg, bio);
  614. if (!throtl) {
  615. blkg = blkg ?: q->root_blkg;
  616. blkg_rwstat_add(&blkg->stat_bytes, bio->bi_flags,
  617. bio->bi_iter.bi_size);
  618. blkg_rwstat_add(&blkg->stat_ios, bio->bi_flags, 1);
  619. }
  620. rcu_read_unlock();
  621. return !throtl;
  622. }
  623. #else /* CONFIG_BLK_CGROUP */
  624. struct blkcg {
  625. };
  626. struct blkg_policy_data {
  627. };
  628. struct blkcg_policy_data {
  629. };
  630. struct blkcg_gq {
  631. };
  632. struct blkcg_policy {
  633. };
  634. #define blkcg_root_css ((struct cgroup_subsys_state *)ERR_PTR(-EINVAL))
  635. static inline struct cgroup_subsys_state *
  636. task_get_blkcg_css(struct task_struct *task)
  637. {
  638. return NULL;
  639. }
  640. #ifdef CONFIG_BLOCK
  641. static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, void *key) { return NULL; }
  642. static inline int blkcg_init_queue(struct request_queue *q) { return 0; }
  643. static inline void blkcg_drain_queue(struct request_queue *q) { }
  644. static inline void blkcg_exit_queue(struct request_queue *q) { }
  645. static inline int blkcg_policy_register(struct blkcg_policy *pol) { return 0; }
  646. static inline void blkcg_policy_unregister(struct blkcg_policy *pol) { }
  647. static inline int blkcg_activate_policy(struct request_queue *q,
  648. const struct blkcg_policy *pol) { return 0; }
  649. static inline void blkcg_deactivate_policy(struct request_queue *q,
  650. const struct blkcg_policy *pol) { }
  651. static inline struct blkcg *bio_blkcg(struct bio *bio) { return NULL; }
  652. static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg,
  653. struct blkcg_policy *pol) { return NULL; }
  654. static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd) { return NULL; }
  655. static inline char *blkg_path(struct blkcg_gq *blkg) { return NULL; }
  656. static inline void blkg_get(struct blkcg_gq *blkg) { }
  657. static inline void blkg_put(struct blkcg_gq *blkg) { }
  658. static inline struct request_list *blk_get_rl(struct request_queue *q,
  659. struct bio *bio) { return &q->root_rl; }
  660. static inline void blk_put_rl(struct request_list *rl) { }
  661. static inline void blk_rq_set_rl(struct request *rq, struct request_list *rl) { }
  662. static inline struct request_list *blk_rq_rl(struct request *rq) { return &rq->q->root_rl; }
  663. static inline bool blkcg_bio_issue_check(struct request_queue *q,
  664. struct bio *bio) { return true; }
  665. #define blk_queue_for_each_rl(rl, q) \
  666. for ((rl) = &(q)->root_rl; (rl); (rl) = NULL)
  667. #endif /* CONFIG_BLOCK */
  668. #endif /* CONFIG_BLK_CGROUP */
  669. #endif /* _BLK_CGROUP_H */