backing-dev.h 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576
  1. /*
  2. * include/linux/backing-dev.h
  3. *
  4. * low-level device information and state which is propagated up through
  5. * to high-level code.
  6. */
  7. #ifndef _LINUX_BACKING_DEV_H
  8. #define _LINUX_BACKING_DEV_H
  9. #include <linux/kernel.h>
  10. #include <linux/fs.h>
  11. #include <linux/sched.h>
  12. #include <linux/blkdev.h>
  13. #include <linux/writeback.h>
  14. #include <linux/blk-cgroup.h>
  15. #include <linux/backing-dev-defs.h>
  16. #include <linux/slab.h>
  17. int __must_check bdi_init(struct backing_dev_info *bdi);
  18. void bdi_destroy(struct backing_dev_info *bdi);
  19. __printf(3, 4)
  20. int bdi_register(struct backing_dev_info *bdi, struct device *parent,
  21. const char *fmt, ...);
  22. int bdi_register_dev(struct backing_dev_info *bdi, dev_t dev);
  23. int __must_check bdi_setup_and_register(struct backing_dev_info *, char *);
  24. void wb_start_writeback(struct bdi_writeback *wb, long nr_pages,
  25. bool range_cyclic, enum wb_reason reason);
  26. void wb_start_background_writeback(struct bdi_writeback *wb);
  27. void wb_workfn(struct work_struct *work);
  28. void wb_wakeup_delayed(struct bdi_writeback *wb);
  29. extern spinlock_t bdi_lock;
  30. extern struct list_head bdi_list;
  31. extern struct workqueue_struct *bdi_wq;
  32. static inline bool wb_has_dirty_io(struct bdi_writeback *wb)
  33. {
  34. return test_bit(WB_has_dirty_io, &wb->state);
  35. }
  36. static inline bool bdi_has_dirty_io(struct backing_dev_info *bdi)
  37. {
  38. /*
  39. * @bdi->tot_write_bandwidth is guaranteed to be > 0 if there are
  40. * any dirty wbs. See wb_update_write_bandwidth().
  41. */
  42. return atomic_long_read(&bdi->tot_write_bandwidth);
  43. }
  44. static inline void __add_wb_stat(struct bdi_writeback *wb,
  45. enum wb_stat_item item, s64 amount)
  46. {
  47. __percpu_counter_add(&wb->stat[item], amount, WB_STAT_BATCH);
  48. }
  49. static inline void __inc_wb_stat(struct bdi_writeback *wb,
  50. enum wb_stat_item item)
  51. {
  52. __add_wb_stat(wb, item, 1);
  53. }
  54. static inline void inc_wb_stat(struct bdi_writeback *wb, enum wb_stat_item item)
  55. {
  56. unsigned long flags;
  57. local_irq_save(flags);
  58. __inc_wb_stat(wb, item);
  59. local_irq_restore(flags);
  60. }
  61. static inline void __dec_wb_stat(struct bdi_writeback *wb,
  62. enum wb_stat_item item)
  63. {
  64. __add_wb_stat(wb, item, -1);
  65. }
  66. static inline void dec_wb_stat(struct bdi_writeback *wb, enum wb_stat_item item)
  67. {
  68. unsigned long flags;
  69. local_irq_save(flags);
  70. __dec_wb_stat(wb, item);
  71. local_irq_restore(flags);
  72. }
  73. static inline s64 wb_stat(struct bdi_writeback *wb, enum wb_stat_item item)
  74. {
  75. return percpu_counter_read_positive(&wb->stat[item]);
  76. }
  77. static inline s64 __wb_stat_sum(struct bdi_writeback *wb,
  78. enum wb_stat_item item)
  79. {
  80. return percpu_counter_sum_positive(&wb->stat[item]);
  81. }
  82. static inline s64 wb_stat_sum(struct bdi_writeback *wb, enum wb_stat_item item)
  83. {
  84. s64 sum;
  85. unsigned long flags;
  86. local_irq_save(flags);
  87. sum = __wb_stat_sum(wb, item);
  88. local_irq_restore(flags);
  89. return sum;
  90. }
  91. extern void wb_writeout_inc(struct bdi_writeback *wb);
  92. /*
  93. * maximal error of a stat counter.
  94. */
  95. static inline unsigned long wb_stat_error(struct bdi_writeback *wb)
  96. {
  97. #ifdef CONFIG_SMP
  98. return nr_cpu_ids * WB_STAT_BATCH;
  99. #else
  100. return 1;
  101. #endif
  102. }
  103. int bdi_set_min_ratio(struct backing_dev_info *bdi, unsigned int min_ratio);
  104. int bdi_set_max_ratio(struct backing_dev_info *bdi, unsigned int max_ratio);
  105. /*
  106. * Flags in backing_dev_info::capability
  107. *
  108. * The first three flags control whether dirty pages will contribute to the
  109. * VM's accounting and whether writepages() should be called for dirty pages
  110. * (something that would not, for example, be appropriate for ramfs)
  111. *
  112. * WARNING: these flags are closely related and should not normally be
  113. * used separately. The BDI_CAP_NO_ACCT_AND_WRITEBACK combines these
  114. * three flags into a single convenience macro.
  115. *
  116. * BDI_CAP_NO_ACCT_DIRTY: Dirty pages shouldn't contribute to accounting
  117. * BDI_CAP_NO_WRITEBACK: Don't write pages back
  118. * BDI_CAP_NO_ACCT_WB: Don't automatically account writeback pages
  119. * BDI_CAP_STRICTLIMIT: Keep number of dirty pages below bdi threshold.
  120. *
  121. * BDI_CAP_CGROUP_WRITEBACK: Supports cgroup-aware writeback.
  122. */
  123. #define BDI_CAP_NO_ACCT_DIRTY 0x00000001
  124. #define BDI_CAP_NO_WRITEBACK 0x00000002
  125. #define BDI_CAP_NO_ACCT_WB 0x00000004
  126. #define BDI_CAP_STABLE_WRITES 0x00000008
  127. #define BDI_CAP_STRICTLIMIT 0x00000010
  128. #define BDI_CAP_CGROUP_WRITEBACK 0x00000020
  129. #define BDI_CAP_NO_ACCT_AND_WRITEBACK \
  130. (BDI_CAP_NO_WRITEBACK | BDI_CAP_NO_ACCT_DIRTY | BDI_CAP_NO_ACCT_WB)
  131. extern struct backing_dev_info noop_backing_dev_info;
  132. /**
  133. * writeback_in_progress - determine whether there is writeback in progress
  134. * @wb: bdi_writeback of interest
  135. *
  136. * Determine whether there is writeback waiting to be handled against a
  137. * bdi_writeback.
  138. */
  139. static inline bool writeback_in_progress(struct bdi_writeback *wb)
  140. {
  141. return test_bit(WB_writeback_running, &wb->state);
  142. }
  143. static inline struct backing_dev_info *inode_to_bdi(struct inode *inode)
  144. {
  145. struct super_block *sb;
  146. if (!inode)
  147. return &noop_backing_dev_info;
  148. sb = inode->i_sb;
  149. #ifdef CONFIG_BLOCK
  150. if (sb_is_blkdev_sb(sb))
  151. return blk_get_backing_dev_info(I_BDEV(inode));
  152. #endif
  153. return sb->s_bdi;
  154. }
  155. static inline int wb_congested(struct bdi_writeback *wb, int cong_bits)
  156. {
  157. struct backing_dev_info *bdi = wb->bdi;
  158. if (bdi->congested_fn)
  159. return bdi->congested_fn(bdi->congested_data, cong_bits);
  160. return wb->congested->state & cong_bits;
  161. }
  162. long congestion_wait(int sync, long timeout);
  163. long wait_iff_congested(struct zone *zone, int sync, long timeout);
  164. int pdflush_proc_obsolete(struct ctl_table *table, int write,
  165. void __user *buffer, size_t *lenp, loff_t *ppos);
  166. static inline bool bdi_cap_stable_pages_required(struct backing_dev_info *bdi)
  167. {
  168. return bdi->capabilities & BDI_CAP_STABLE_WRITES;
  169. }
  170. static inline bool bdi_cap_writeback_dirty(struct backing_dev_info *bdi)
  171. {
  172. return !(bdi->capabilities & BDI_CAP_NO_WRITEBACK);
  173. }
  174. static inline bool bdi_cap_account_dirty(struct backing_dev_info *bdi)
  175. {
  176. return !(bdi->capabilities & BDI_CAP_NO_ACCT_DIRTY);
  177. }
  178. static inline bool bdi_cap_account_writeback(struct backing_dev_info *bdi)
  179. {
  180. /* Paranoia: BDI_CAP_NO_WRITEBACK implies BDI_CAP_NO_ACCT_WB */
  181. return !(bdi->capabilities & (BDI_CAP_NO_ACCT_WB |
  182. BDI_CAP_NO_WRITEBACK));
  183. }
  184. static inline bool mapping_cap_writeback_dirty(struct address_space *mapping)
  185. {
  186. return bdi_cap_writeback_dirty(inode_to_bdi(mapping->host));
  187. }
  188. static inline bool mapping_cap_account_dirty(struct address_space *mapping)
  189. {
  190. return bdi_cap_account_dirty(inode_to_bdi(mapping->host));
  191. }
  192. static inline int bdi_sched_wait(void *word)
  193. {
  194. schedule();
  195. return 0;
  196. }
  197. #ifdef CONFIG_CGROUP_WRITEBACK
  198. struct bdi_writeback_congested *
  199. wb_congested_get_create(struct backing_dev_info *bdi, int blkcg_id, gfp_t gfp);
  200. void wb_congested_put(struct bdi_writeback_congested *congested);
  201. struct bdi_writeback *wb_get_create(struct backing_dev_info *bdi,
  202. struct cgroup_subsys_state *memcg_css,
  203. gfp_t gfp);
  204. void wb_memcg_offline(struct mem_cgroup *memcg);
  205. void wb_blkcg_offline(struct blkcg *blkcg);
  206. int inode_congested(struct inode *inode, int cong_bits);
  207. /**
  208. * inode_cgwb_enabled - test whether cgroup writeback is enabled on an inode
  209. * @inode: inode of interest
  210. *
  211. * cgroup writeback requires support from both the bdi and filesystem.
  212. * Also, both memcg and iocg have to be on the default hierarchy. Test
  213. * whether all conditions are met.
  214. *
  215. * Note that the test result may change dynamically on the same inode
  216. * depending on how memcg and iocg are configured.
  217. */
  218. static inline bool inode_cgwb_enabled(struct inode *inode)
  219. {
  220. struct backing_dev_info *bdi = inode_to_bdi(inode);
  221. return cgroup_subsys_on_dfl(memory_cgrp_subsys) &&
  222. cgroup_subsys_on_dfl(io_cgrp_subsys) &&
  223. bdi_cap_account_dirty(bdi) &&
  224. (bdi->capabilities & BDI_CAP_CGROUP_WRITEBACK) &&
  225. (inode->i_sb->s_iflags & SB_I_CGROUPWB);
  226. }
  227. /**
  228. * wb_find_current - find wb for %current on a bdi
  229. * @bdi: bdi of interest
  230. *
  231. * Find the wb of @bdi which matches both the memcg and blkcg of %current.
  232. * Must be called under rcu_read_lock() which protects the returend wb.
  233. * NULL if not found.
  234. */
  235. static inline struct bdi_writeback *wb_find_current(struct backing_dev_info *bdi)
  236. {
  237. struct cgroup_subsys_state *memcg_css;
  238. struct bdi_writeback *wb;
  239. memcg_css = task_css(current, memory_cgrp_id);
  240. if (!memcg_css->parent)
  241. return &bdi->wb;
  242. wb = radix_tree_lookup(&bdi->cgwb_tree, memcg_css->id);
  243. /*
  244. * %current's blkcg equals the effective blkcg of its memcg. No
  245. * need to use the relatively expensive cgroup_get_e_css().
  246. */
  247. if (likely(wb && wb->blkcg_css == task_css(current, io_cgrp_id)))
  248. return wb;
  249. return NULL;
  250. }
  251. /**
  252. * wb_get_create_current - get or create wb for %current on a bdi
  253. * @bdi: bdi of interest
  254. * @gfp: allocation mask
  255. *
  256. * Equivalent to wb_get_create() on %current's memcg. This function is
  257. * called from a relatively hot path and optimizes the common cases using
  258. * wb_find_current().
  259. */
  260. static inline struct bdi_writeback *
  261. wb_get_create_current(struct backing_dev_info *bdi, gfp_t gfp)
  262. {
  263. struct bdi_writeback *wb;
  264. rcu_read_lock();
  265. wb = wb_find_current(bdi);
  266. if (wb && unlikely(!wb_tryget(wb)))
  267. wb = NULL;
  268. rcu_read_unlock();
  269. if (unlikely(!wb)) {
  270. struct cgroup_subsys_state *memcg_css;
  271. memcg_css = task_get_css(current, memory_cgrp_id);
  272. wb = wb_get_create(bdi, memcg_css, gfp);
  273. css_put(memcg_css);
  274. }
  275. return wb;
  276. }
  277. /**
  278. * inode_to_wb_is_valid - test whether an inode has a wb associated
  279. * @inode: inode of interest
  280. *
  281. * Returns %true if @inode has a wb associated. May be called without any
  282. * locking.
  283. */
  284. static inline bool inode_to_wb_is_valid(struct inode *inode)
  285. {
  286. return inode->i_wb;
  287. }
  288. /**
  289. * inode_to_wb - determine the wb of an inode
  290. * @inode: inode of interest
  291. *
  292. * Returns the wb @inode is currently associated with. The caller must be
  293. * holding either @inode->i_lock, @inode->i_mapping->tree_lock, or the
  294. * associated wb's list_lock.
  295. */
  296. static inline struct bdi_writeback *inode_to_wb(struct inode *inode)
  297. {
  298. #ifdef CONFIG_LOCKDEP
  299. WARN_ON_ONCE(debug_locks &&
  300. (!lockdep_is_held(&inode->i_lock) &&
  301. !lockdep_is_held(&inode->i_mapping->tree_lock) &&
  302. !lockdep_is_held(&inode->i_wb->list_lock)));
  303. #endif
  304. return inode->i_wb;
  305. }
  306. /**
  307. * unlocked_inode_to_wb_begin - begin unlocked inode wb access transaction
  308. * @inode: target inode
  309. * @lockedp: temp bool output param, to be passed to the end function
  310. *
  311. * The caller wants to access the wb associated with @inode but isn't
  312. * holding inode->i_lock, mapping->tree_lock or wb->list_lock. This
  313. * function determines the wb associated with @inode and ensures that the
  314. * association doesn't change until the transaction is finished with
  315. * unlocked_inode_to_wb_end().
  316. *
  317. * The caller must call unlocked_inode_to_wb_end() with *@lockdep
  318. * afterwards and can't sleep during transaction. IRQ may or may not be
  319. * disabled on return.
  320. */
  321. static inline struct bdi_writeback *
  322. unlocked_inode_to_wb_begin(struct inode *inode, bool *lockedp)
  323. {
  324. rcu_read_lock();
  325. /*
  326. * Paired with store_release in inode_switch_wb_work_fn() and
  327. * ensures that we see the new wb if we see cleared I_WB_SWITCH.
  328. */
  329. *lockedp = smp_load_acquire(&inode->i_state) & I_WB_SWITCH;
  330. if (unlikely(*lockedp))
  331. spin_lock_irq(&inode->i_mapping->tree_lock);
  332. /*
  333. * Protected by either !I_WB_SWITCH + rcu_read_lock() or tree_lock.
  334. * inode_to_wb() will bark. Deref directly.
  335. */
  336. return inode->i_wb;
  337. }
  338. /**
  339. * unlocked_inode_to_wb_end - end inode wb access transaction
  340. * @inode: target inode
  341. * @locked: *@lockedp from unlocked_inode_to_wb_begin()
  342. */
  343. static inline void unlocked_inode_to_wb_end(struct inode *inode, bool locked)
  344. {
  345. if (unlikely(locked))
  346. spin_unlock_irq(&inode->i_mapping->tree_lock);
  347. rcu_read_unlock();
  348. }
  349. struct wb_iter {
  350. int start_memcg_id;
  351. struct radix_tree_iter tree_iter;
  352. void **slot;
  353. };
  354. static inline struct bdi_writeback *__wb_iter_next(struct wb_iter *iter,
  355. struct backing_dev_info *bdi)
  356. {
  357. struct radix_tree_iter *titer = &iter->tree_iter;
  358. WARN_ON_ONCE(!rcu_read_lock_held());
  359. if (iter->start_memcg_id >= 0) {
  360. iter->slot = radix_tree_iter_init(titer, iter->start_memcg_id);
  361. iter->start_memcg_id = -1;
  362. } else {
  363. iter->slot = radix_tree_next_slot(iter->slot, titer, 0);
  364. }
  365. if (!iter->slot)
  366. iter->slot = radix_tree_next_chunk(&bdi->cgwb_tree, titer, 0);
  367. if (iter->slot)
  368. return *iter->slot;
  369. return NULL;
  370. }
  371. static inline struct bdi_writeback *__wb_iter_init(struct wb_iter *iter,
  372. struct backing_dev_info *bdi,
  373. int start_memcg_id)
  374. {
  375. iter->start_memcg_id = start_memcg_id;
  376. if (start_memcg_id)
  377. return __wb_iter_next(iter, bdi);
  378. else
  379. return &bdi->wb;
  380. }
  381. /**
  382. * bdi_for_each_wb - walk all wb's of a bdi in ascending memcg ID order
  383. * @wb_cur: cursor struct bdi_writeback pointer
  384. * @bdi: bdi to walk wb's of
  385. * @iter: pointer to struct wb_iter to be used as iteration buffer
  386. * @start_memcg_id: memcg ID to start iteration from
  387. *
  388. * Iterate @wb_cur through the wb's (bdi_writeback's) of @bdi in ascending
  389. * memcg ID order starting from @start_memcg_id. @iter is struct wb_iter
  390. * to be used as temp storage during iteration. rcu_read_lock() must be
  391. * held throughout iteration.
  392. */
  393. #define bdi_for_each_wb(wb_cur, bdi, iter, start_memcg_id) \
  394. for ((wb_cur) = __wb_iter_init(iter, bdi, start_memcg_id); \
  395. (wb_cur); (wb_cur) = __wb_iter_next(iter, bdi))
  396. #else /* CONFIG_CGROUP_WRITEBACK */
  397. static inline bool inode_cgwb_enabled(struct inode *inode)
  398. {
  399. return false;
  400. }
  401. static inline struct bdi_writeback_congested *
  402. wb_congested_get_create(struct backing_dev_info *bdi, int blkcg_id, gfp_t gfp)
  403. {
  404. atomic_inc(&bdi->wb_congested->refcnt);
  405. return bdi->wb_congested;
  406. }
  407. static inline void wb_congested_put(struct bdi_writeback_congested *congested)
  408. {
  409. if (atomic_dec_and_test(&congested->refcnt))
  410. kfree(congested);
  411. }
  412. static inline struct bdi_writeback *wb_find_current(struct backing_dev_info *bdi)
  413. {
  414. return &bdi->wb;
  415. }
  416. static inline struct bdi_writeback *
  417. wb_get_create_current(struct backing_dev_info *bdi, gfp_t gfp)
  418. {
  419. return &bdi->wb;
  420. }
  421. static inline bool inode_to_wb_is_valid(struct inode *inode)
  422. {
  423. return true;
  424. }
  425. static inline struct bdi_writeback *inode_to_wb(struct inode *inode)
  426. {
  427. return &inode_to_bdi(inode)->wb;
  428. }
  429. static inline struct bdi_writeback *
  430. unlocked_inode_to_wb_begin(struct inode *inode, bool *lockedp)
  431. {
  432. return inode_to_wb(inode);
  433. }
  434. static inline void unlocked_inode_to_wb_end(struct inode *inode, bool locked)
  435. {
  436. }
  437. static inline void wb_memcg_offline(struct mem_cgroup *memcg)
  438. {
  439. }
  440. static inline void wb_blkcg_offline(struct blkcg *blkcg)
  441. {
  442. }
  443. struct wb_iter {
  444. int next_id;
  445. };
  446. #define bdi_for_each_wb(wb_cur, bdi, iter, start_blkcg_id) \
  447. for ((iter)->next_id = (start_blkcg_id); \
  448. ({ (wb_cur) = !(iter)->next_id++ ? &(bdi)->wb : NULL; }); )
  449. static inline int inode_congested(struct inode *inode, int cong_bits)
  450. {
  451. return wb_congested(&inode_to_bdi(inode)->wb, cong_bits);
  452. }
  453. #endif /* CONFIG_CGROUP_WRITEBACK */
  454. static inline int inode_read_congested(struct inode *inode)
  455. {
  456. return inode_congested(inode, 1 << WB_sync_congested);
  457. }
  458. static inline int inode_write_congested(struct inode *inode)
  459. {
  460. return inode_congested(inode, 1 << WB_async_congested);
  461. }
  462. static inline int inode_rw_congested(struct inode *inode)
  463. {
  464. return inode_congested(inode, (1 << WB_sync_congested) |
  465. (1 << WB_async_congested));
  466. }
  467. static inline int bdi_congested(struct backing_dev_info *bdi, int cong_bits)
  468. {
  469. return wb_congested(&bdi->wb, cong_bits);
  470. }
  471. static inline int bdi_read_congested(struct backing_dev_info *bdi)
  472. {
  473. return bdi_congested(bdi, 1 << WB_sync_congested);
  474. }
  475. static inline int bdi_write_congested(struct backing_dev_info *bdi)
  476. {
  477. return bdi_congested(bdi, 1 << WB_async_congested);
  478. }
  479. static inline int bdi_rw_congested(struct backing_dev_info *bdi)
  480. {
  481. return bdi_congested(bdi, (1 << WB_sync_congested) |
  482. (1 << WB_async_congested));
  483. }
  484. #endif /* _LINUX_BACKING_DEV_H */