backing-dev.h 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565
  1. /*
  2. * include/linux/backing-dev.h
  3. *
  4. * low-level device information and state which is propagated up through
  5. * to high-level code.
  6. */
  7. #ifndef _LINUX_BACKING_DEV_H
  8. #define _LINUX_BACKING_DEV_H
  9. #include <linux/kernel.h>
  10. #include <linux/fs.h>
  11. #include <linux/sched.h>
  12. #include <linux/blkdev.h>
  13. #include <linux/writeback.h>
  14. #include <linux/blk-cgroup.h>
  15. #include <linux/backing-dev-defs.h>
  16. int __must_check bdi_init(struct backing_dev_info *bdi);
  17. void bdi_destroy(struct backing_dev_info *bdi);
  18. __printf(3, 4)
  19. int bdi_register(struct backing_dev_info *bdi, struct device *parent,
  20. const char *fmt, ...);
  21. int bdi_register_dev(struct backing_dev_info *bdi, dev_t dev);
  22. void bdi_unregister(struct backing_dev_info *bdi);
  23. int __must_check bdi_setup_and_register(struct backing_dev_info *, char *);
  24. void wb_start_writeback(struct bdi_writeback *wb, long nr_pages,
  25. bool range_cyclic, enum wb_reason reason);
  26. void bdi_start_background_writeback(struct backing_dev_info *bdi);
  27. void wb_workfn(struct work_struct *work);
  28. void wb_wakeup_delayed(struct bdi_writeback *wb);
  29. extern spinlock_t bdi_lock;
  30. extern struct list_head bdi_list;
  31. extern struct workqueue_struct *bdi_wq;
  32. static inline bool wb_has_dirty_io(struct bdi_writeback *wb)
  33. {
  34. return test_bit(WB_has_dirty_io, &wb->state);
  35. }
  36. static inline bool bdi_has_dirty_io(struct backing_dev_info *bdi)
  37. {
  38. /*
  39. * @bdi->tot_write_bandwidth is guaranteed to be > 0 if there are
  40. * any dirty wbs. See wb_update_write_bandwidth().
  41. */
  42. return atomic_long_read(&bdi->tot_write_bandwidth);
  43. }
  44. static inline void __add_wb_stat(struct bdi_writeback *wb,
  45. enum wb_stat_item item, s64 amount)
  46. {
  47. __percpu_counter_add(&wb->stat[item], amount, WB_STAT_BATCH);
  48. }
  49. static inline void __inc_wb_stat(struct bdi_writeback *wb,
  50. enum wb_stat_item item)
  51. {
  52. __add_wb_stat(wb, item, 1);
  53. }
  54. static inline void inc_wb_stat(struct bdi_writeback *wb, enum wb_stat_item item)
  55. {
  56. unsigned long flags;
  57. local_irq_save(flags);
  58. __inc_wb_stat(wb, item);
  59. local_irq_restore(flags);
  60. }
  61. static inline void __dec_wb_stat(struct bdi_writeback *wb,
  62. enum wb_stat_item item)
  63. {
  64. __add_wb_stat(wb, item, -1);
  65. }
  66. static inline void dec_wb_stat(struct bdi_writeback *wb, enum wb_stat_item item)
  67. {
  68. unsigned long flags;
  69. local_irq_save(flags);
  70. __dec_wb_stat(wb, item);
  71. local_irq_restore(flags);
  72. }
  73. static inline s64 wb_stat(struct bdi_writeback *wb, enum wb_stat_item item)
  74. {
  75. return percpu_counter_read_positive(&wb->stat[item]);
  76. }
  77. static inline s64 __wb_stat_sum(struct bdi_writeback *wb,
  78. enum wb_stat_item item)
  79. {
  80. return percpu_counter_sum_positive(&wb->stat[item]);
  81. }
  82. static inline s64 wb_stat_sum(struct bdi_writeback *wb, enum wb_stat_item item)
  83. {
  84. s64 sum;
  85. unsigned long flags;
  86. local_irq_save(flags);
  87. sum = __wb_stat_sum(wb, item);
  88. local_irq_restore(flags);
  89. return sum;
  90. }
  91. extern void wb_writeout_inc(struct bdi_writeback *wb);
  92. /*
  93. * maximal error of a stat counter.
  94. */
  95. static inline unsigned long wb_stat_error(struct bdi_writeback *wb)
  96. {
  97. #ifdef CONFIG_SMP
  98. return nr_cpu_ids * WB_STAT_BATCH;
  99. #else
  100. return 1;
  101. #endif
  102. }
  103. int bdi_set_min_ratio(struct backing_dev_info *bdi, unsigned int min_ratio);
  104. int bdi_set_max_ratio(struct backing_dev_info *bdi, unsigned int max_ratio);
  105. /*
  106. * Flags in backing_dev_info::capability
  107. *
  108. * The first three flags control whether dirty pages will contribute to the
  109. * VM's accounting and whether writepages() should be called for dirty pages
  110. * (something that would not, for example, be appropriate for ramfs)
  111. *
  112. * WARNING: these flags are closely related and should not normally be
  113. * used separately. The BDI_CAP_NO_ACCT_AND_WRITEBACK combines these
  114. * three flags into a single convenience macro.
  115. *
  116. * BDI_CAP_NO_ACCT_DIRTY: Dirty pages shouldn't contribute to accounting
  117. * BDI_CAP_NO_WRITEBACK: Don't write pages back
  118. * BDI_CAP_NO_ACCT_WB: Don't automatically account writeback pages
  119. * BDI_CAP_STRICTLIMIT: Keep number of dirty pages below bdi threshold.
  120. *
  121. * BDI_CAP_CGROUP_WRITEBACK: Supports cgroup-aware writeback.
  122. */
  123. #define BDI_CAP_NO_ACCT_DIRTY 0x00000001
  124. #define BDI_CAP_NO_WRITEBACK 0x00000002
  125. #define BDI_CAP_NO_ACCT_WB 0x00000004
  126. #define BDI_CAP_STABLE_WRITES 0x00000008
  127. #define BDI_CAP_STRICTLIMIT 0x00000010
  128. #define BDI_CAP_CGROUP_WRITEBACK 0x00000020
  129. #define BDI_CAP_NO_ACCT_AND_WRITEBACK \
  130. (BDI_CAP_NO_WRITEBACK | BDI_CAP_NO_ACCT_DIRTY | BDI_CAP_NO_ACCT_WB)
  131. extern struct backing_dev_info noop_backing_dev_info;
  132. /**
  133. * writeback_in_progress - determine whether there is writeback in progress
  134. * @wb: bdi_writeback of interest
  135. *
  136. * Determine whether there is writeback waiting to be handled against a
  137. * bdi_writeback.
  138. */
  139. static inline bool writeback_in_progress(struct bdi_writeback *wb)
  140. {
  141. return test_bit(WB_writeback_running, &wb->state);
  142. }
  143. static inline struct backing_dev_info *inode_to_bdi(struct inode *inode)
  144. {
  145. struct super_block *sb;
  146. if (!inode)
  147. return &noop_backing_dev_info;
  148. sb = inode->i_sb;
  149. #ifdef CONFIG_BLOCK
  150. if (sb_is_blkdev_sb(sb))
  151. return blk_get_backing_dev_info(I_BDEV(inode));
  152. #endif
  153. return sb->s_bdi;
  154. }
  155. static inline int wb_congested(struct bdi_writeback *wb, int cong_bits)
  156. {
  157. struct backing_dev_info *bdi = wb->bdi;
  158. if (bdi->congested_fn)
  159. return bdi->congested_fn(bdi->congested_data, cong_bits);
  160. return wb->congested->state & cong_bits;
  161. }
  162. long congestion_wait(int sync, long timeout);
  163. long wait_iff_congested(struct zone *zone, int sync, long timeout);
  164. int pdflush_proc_obsolete(struct ctl_table *table, int write,
  165. void __user *buffer, size_t *lenp, loff_t *ppos);
  166. static inline bool bdi_cap_stable_pages_required(struct backing_dev_info *bdi)
  167. {
  168. return bdi->capabilities & BDI_CAP_STABLE_WRITES;
  169. }
  170. static inline bool bdi_cap_writeback_dirty(struct backing_dev_info *bdi)
  171. {
  172. return !(bdi->capabilities & BDI_CAP_NO_WRITEBACK);
  173. }
  174. static inline bool bdi_cap_account_dirty(struct backing_dev_info *bdi)
  175. {
  176. return !(bdi->capabilities & BDI_CAP_NO_ACCT_DIRTY);
  177. }
  178. static inline bool bdi_cap_account_writeback(struct backing_dev_info *bdi)
  179. {
  180. /* Paranoia: BDI_CAP_NO_WRITEBACK implies BDI_CAP_NO_ACCT_WB */
  181. return !(bdi->capabilities & (BDI_CAP_NO_ACCT_WB |
  182. BDI_CAP_NO_WRITEBACK));
  183. }
  184. static inline bool mapping_cap_writeback_dirty(struct address_space *mapping)
  185. {
  186. return bdi_cap_writeback_dirty(inode_to_bdi(mapping->host));
  187. }
  188. static inline bool mapping_cap_account_dirty(struct address_space *mapping)
  189. {
  190. return bdi_cap_account_dirty(inode_to_bdi(mapping->host));
  191. }
  192. static inline int bdi_sched_wait(void *word)
  193. {
  194. schedule();
  195. return 0;
  196. }
  197. #ifdef CONFIG_CGROUP_WRITEBACK
  198. struct bdi_writeback_congested *
  199. wb_congested_get_create(struct backing_dev_info *bdi, int blkcg_id, gfp_t gfp);
  200. void wb_congested_put(struct bdi_writeback_congested *congested);
  201. struct bdi_writeback *wb_get_create(struct backing_dev_info *bdi,
  202. struct cgroup_subsys_state *memcg_css,
  203. gfp_t gfp);
  204. void __inode_attach_wb(struct inode *inode, struct page *page);
  205. void wb_memcg_offline(struct mem_cgroup *memcg);
  206. void wb_blkcg_offline(struct blkcg *blkcg);
  207. int inode_congested(struct inode *inode, int cong_bits);
  208. /**
  209. * inode_cgwb_enabled - test whether cgroup writeback is enabled on an inode
  210. * @inode: inode of interest
  211. *
  212. * cgroup writeback requires support from both the bdi and filesystem.
  213. * Test whether @inode has both.
  214. */
  215. static inline bool inode_cgwb_enabled(struct inode *inode)
  216. {
  217. struct backing_dev_info *bdi = inode_to_bdi(inode);
  218. return bdi_cap_account_dirty(bdi) &&
  219. (bdi->capabilities & BDI_CAP_CGROUP_WRITEBACK) &&
  220. (inode->i_sb->s_type->fs_flags & FS_CGROUP_WRITEBACK);
  221. }
  222. /**
  223. * wb_tryget - try to increment a wb's refcount
  224. * @wb: bdi_writeback to get
  225. */
  226. static inline bool wb_tryget(struct bdi_writeback *wb)
  227. {
  228. if (wb != &wb->bdi->wb)
  229. return percpu_ref_tryget(&wb->refcnt);
  230. return true;
  231. }
  232. /**
  233. * wb_get - increment a wb's refcount
  234. * @wb: bdi_writeback to get
  235. */
  236. static inline void wb_get(struct bdi_writeback *wb)
  237. {
  238. if (wb != &wb->bdi->wb)
  239. percpu_ref_get(&wb->refcnt);
  240. }
  241. /**
  242. * wb_put - decrement a wb's refcount
  243. * @wb: bdi_writeback to put
  244. */
  245. static inline void wb_put(struct bdi_writeback *wb)
  246. {
  247. if (wb != &wb->bdi->wb)
  248. percpu_ref_put(&wb->refcnt);
  249. }
  250. /**
  251. * wb_find_current - find wb for %current on a bdi
  252. * @bdi: bdi of interest
  253. *
  254. * Find the wb of @bdi which matches both the memcg and blkcg of %current.
  255. * Must be called under rcu_read_lock() which protects the returend wb.
  256. * NULL if not found.
  257. */
  258. static inline struct bdi_writeback *wb_find_current(struct backing_dev_info *bdi)
  259. {
  260. struct cgroup_subsys_state *memcg_css;
  261. struct bdi_writeback *wb;
  262. memcg_css = task_css(current, memory_cgrp_id);
  263. if (!memcg_css->parent)
  264. return &bdi->wb;
  265. wb = radix_tree_lookup(&bdi->cgwb_tree, memcg_css->id);
  266. /*
  267. * %current's blkcg equals the effective blkcg of its memcg. No
  268. * need to use the relatively expensive cgroup_get_e_css().
  269. */
  270. if (likely(wb && wb->blkcg_css == task_css(current, blkio_cgrp_id)))
  271. return wb;
  272. return NULL;
  273. }
  274. /**
  275. * wb_get_create_current - get or create wb for %current on a bdi
  276. * @bdi: bdi of interest
  277. * @gfp: allocation mask
  278. *
  279. * Equivalent to wb_get_create() on %current's memcg. This function is
  280. * called from a relatively hot path and optimizes the common cases using
  281. * wb_find_current().
  282. */
  283. static inline struct bdi_writeback *
  284. wb_get_create_current(struct backing_dev_info *bdi, gfp_t gfp)
  285. {
  286. struct bdi_writeback *wb;
  287. rcu_read_lock();
  288. wb = wb_find_current(bdi);
  289. if (wb && unlikely(!wb_tryget(wb)))
  290. wb = NULL;
  291. rcu_read_unlock();
  292. if (unlikely(!wb)) {
  293. struct cgroup_subsys_state *memcg_css;
  294. memcg_css = task_get_css(current, memory_cgrp_id);
  295. wb = wb_get_create(bdi, memcg_css, gfp);
  296. css_put(memcg_css);
  297. }
  298. return wb;
  299. }
  300. /**
  301. * inode_attach_wb - associate an inode with its wb
  302. * @inode: inode of interest
  303. * @page: page being dirtied (may be NULL)
  304. *
  305. * If @inode doesn't have its wb, associate it with the wb matching the
  306. * memcg of @page or, if @page is NULL, %current. May be called w/ or w/o
  307. * @inode->i_lock.
  308. */
  309. static inline void inode_attach_wb(struct inode *inode, struct page *page)
  310. {
  311. if (!inode->i_wb)
  312. __inode_attach_wb(inode, page);
  313. }
  314. /**
  315. * inode_detach_wb - disassociate an inode from its wb
  316. * @inode: inode of interest
  317. *
  318. * @inode is being freed. Detach from its wb.
  319. */
  320. static inline void inode_detach_wb(struct inode *inode)
  321. {
  322. if (inode->i_wb) {
  323. wb_put(inode->i_wb);
  324. inode->i_wb = NULL;
  325. }
  326. }
  327. /**
  328. * inode_to_wb - determine the wb of an inode
  329. * @inode: inode of interest
  330. *
  331. * Returns the wb @inode is currently associated with.
  332. */
  333. static inline struct bdi_writeback *inode_to_wb(struct inode *inode)
  334. {
  335. return inode->i_wb;
  336. }
  337. struct wb_iter {
  338. int start_blkcg_id;
  339. struct radix_tree_iter tree_iter;
  340. void **slot;
  341. };
  342. static inline struct bdi_writeback *__wb_iter_next(struct wb_iter *iter,
  343. struct backing_dev_info *bdi)
  344. {
  345. struct radix_tree_iter *titer = &iter->tree_iter;
  346. WARN_ON_ONCE(!rcu_read_lock_held());
  347. if (iter->start_blkcg_id >= 0) {
  348. iter->slot = radix_tree_iter_init(titer, iter->start_blkcg_id);
  349. iter->start_blkcg_id = -1;
  350. } else {
  351. iter->slot = radix_tree_next_slot(iter->slot, titer, 0);
  352. }
  353. if (!iter->slot)
  354. iter->slot = radix_tree_next_chunk(&bdi->cgwb_tree, titer, 0);
  355. if (iter->slot)
  356. return *iter->slot;
  357. return NULL;
  358. }
  359. static inline struct bdi_writeback *__wb_iter_init(struct wb_iter *iter,
  360. struct backing_dev_info *bdi,
  361. int start_blkcg_id)
  362. {
  363. iter->start_blkcg_id = start_blkcg_id;
  364. if (start_blkcg_id)
  365. return __wb_iter_next(iter, bdi);
  366. else
  367. return &bdi->wb;
  368. }
  369. /**
  370. * bdi_for_each_wb - walk all wb's of a bdi in ascending blkcg ID order
  371. * @wb_cur: cursor struct bdi_writeback pointer
  372. * @bdi: bdi to walk wb's of
  373. * @iter: pointer to struct wb_iter to be used as iteration buffer
  374. * @start_blkcg_id: blkcg ID to start iteration from
  375. *
  376. * Iterate @wb_cur through the wb's (bdi_writeback's) of @bdi in ascending
  377. * blkcg ID order starting from @start_blkcg_id. @iter is struct wb_iter
  378. * to be used as temp storage during iteration. rcu_read_lock() must be
  379. * held throughout iteration.
  380. */
  381. #define bdi_for_each_wb(wb_cur, bdi, iter, start_blkcg_id) \
  382. for ((wb_cur) = __wb_iter_init(iter, bdi, start_blkcg_id); \
  383. (wb_cur); (wb_cur) = __wb_iter_next(iter, bdi))
  384. #else /* CONFIG_CGROUP_WRITEBACK */
  385. static inline bool inode_cgwb_enabled(struct inode *inode)
  386. {
  387. return false;
  388. }
  389. static inline struct bdi_writeback_congested *
  390. wb_congested_get_create(struct backing_dev_info *bdi, int blkcg_id, gfp_t gfp)
  391. {
  392. return bdi->wb.congested;
  393. }
  394. static inline void wb_congested_put(struct bdi_writeback_congested *congested)
  395. {
  396. }
  397. static inline bool wb_tryget(struct bdi_writeback *wb)
  398. {
  399. return true;
  400. }
  401. static inline void wb_get(struct bdi_writeback *wb)
  402. {
  403. }
  404. static inline void wb_put(struct bdi_writeback *wb)
  405. {
  406. }
  407. static inline struct bdi_writeback *wb_find_current(struct backing_dev_info *bdi)
  408. {
  409. return &bdi->wb;
  410. }
  411. static inline struct bdi_writeback *
  412. wb_get_create_current(struct backing_dev_info *bdi, gfp_t gfp)
  413. {
  414. return &bdi->wb;
  415. }
  416. static inline void inode_attach_wb(struct inode *inode, struct page *page)
  417. {
  418. }
  419. static inline void inode_detach_wb(struct inode *inode)
  420. {
  421. }
  422. static inline struct bdi_writeback *inode_to_wb(struct inode *inode)
  423. {
  424. return &inode_to_bdi(inode)->wb;
  425. }
  426. static inline void wb_memcg_offline(struct mem_cgroup *memcg)
  427. {
  428. }
  429. static inline void wb_blkcg_offline(struct blkcg *blkcg)
  430. {
  431. }
  432. struct wb_iter {
  433. int next_id;
  434. };
  435. #define bdi_for_each_wb(wb_cur, bdi, iter, start_blkcg_id) \
  436. for ((iter)->next_id = (start_blkcg_id); \
  437. ({ (wb_cur) = !(iter)->next_id++ ? &(bdi)->wb : NULL; }); )
  438. static inline int inode_congested(struct inode *inode, int cong_bits)
  439. {
  440. return wb_congested(&inode_to_bdi(inode)->wb, cong_bits);
  441. }
  442. #endif /* CONFIG_CGROUP_WRITEBACK */
  443. static inline int inode_read_congested(struct inode *inode)
  444. {
  445. return inode_congested(inode, 1 << WB_sync_congested);
  446. }
  447. static inline int inode_write_congested(struct inode *inode)
  448. {
  449. return inode_congested(inode, 1 << WB_async_congested);
  450. }
  451. static inline int inode_rw_congested(struct inode *inode)
  452. {
  453. return inode_congested(inode, (1 << WB_sync_congested) |
  454. (1 << WB_async_congested));
  455. }
  456. static inline int bdi_congested(struct backing_dev_info *bdi, int cong_bits)
  457. {
  458. return wb_congested(&bdi->wb, cong_bits);
  459. }
  460. static inline int bdi_read_congested(struct backing_dev_info *bdi)
  461. {
  462. return bdi_congested(bdi, 1 << WB_sync_congested);
  463. }
  464. static inline int bdi_write_congested(struct backing_dev_info *bdi)
  465. {
  466. return bdi_congested(bdi, 1 << WB_async_congested);
  467. }
  468. static inline int bdi_rw_congested(struct backing_dev_info *bdi)
  469. {
  470. return bdi_congested(bdi, (1 << WB_sync_congested) |
  471. (1 << WB_async_congested));
  472. }
  473. #endif /* _LINUX_BACKING_DEV_H */