backing-dev.h 6.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218
  1. /*
  2. * include/linux/backing-dev.h
  3. *
  4. * low-level device information and state which is propagated up through
  5. * to high-level code.
  6. */
  7. #ifndef _LINUX_BACKING_DEV_H
  8. #define _LINUX_BACKING_DEV_H
  9. #include <linux/kernel.h>
  10. #include <linux/fs.h>
  11. #include <linux/sched.h>
  12. #include <linux/writeback.h>
  13. #include <linux/backing-dev-defs.h>
  14. struct backing_dev_info *inode_to_bdi(struct inode *inode);
  15. int __must_check bdi_init(struct backing_dev_info *bdi);
  16. void bdi_destroy(struct backing_dev_info *bdi);
  17. __printf(3, 4)
  18. int bdi_register(struct backing_dev_info *bdi, struct device *parent,
  19. const char *fmt, ...);
  20. int bdi_register_dev(struct backing_dev_info *bdi, dev_t dev);
  21. void bdi_unregister(struct backing_dev_info *bdi);
  22. int __must_check bdi_setup_and_register(struct backing_dev_info *, char *);
  23. void bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages,
  24. enum wb_reason reason);
  25. void bdi_start_background_writeback(struct backing_dev_info *bdi);
  26. void wb_workfn(struct work_struct *work);
  27. int bdi_has_dirty_io(struct backing_dev_info *bdi);
  28. void wb_wakeup_delayed(struct bdi_writeback *wb);
  29. extern spinlock_t bdi_lock;
  30. extern struct list_head bdi_list;
  31. extern struct workqueue_struct *bdi_wq;
  32. static inline int wb_has_dirty_io(struct bdi_writeback *wb)
  33. {
  34. return !list_empty(&wb->b_dirty) ||
  35. !list_empty(&wb->b_io) ||
  36. !list_empty(&wb->b_more_io);
  37. }
  38. static inline void __add_wb_stat(struct bdi_writeback *wb,
  39. enum wb_stat_item item, s64 amount)
  40. {
  41. __percpu_counter_add(&wb->stat[item], amount, WB_STAT_BATCH);
  42. }
  43. static inline void __inc_wb_stat(struct bdi_writeback *wb,
  44. enum wb_stat_item item)
  45. {
  46. __add_wb_stat(wb, item, 1);
  47. }
  48. static inline void inc_wb_stat(struct bdi_writeback *wb, enum wb_stat_item item)
  49. {
  50. unsigned long flags;
  51. local_irq_save(flags);
  52. __inc_wb_stat(wb, item);
  53. local_irq_restore(flags);
  54. }
  55. static inline void __dec_wb_stat(struct bdi_writeback *wb,
  56. enum wb_stat_item item)
  57. {
  58. __add_wb_stat(wb, item, -1);
  59. }
  60. static inline void dec_wb_stat(struct bdi_writeback *wb, enum wb_stat_item item)
  61. {
  62. unsigned long flags;
  63. local_irq_save(flags);
  64. __dec_wb_stat(wb, item);
  65. local_irq_restore(flags);
  66. }
  67. static inline s64 wb_stat(struct bdi_writeback *wb, enum wb_stat_item item)
  68. {
  69. return percpu_counter_read_positive(&wb->stat[item]);
  70. }
  71. static inline s64 __wb_stat_sum(struct bdi_writeback *wb,
  72. enum wb_stat_item item)
  73. {
  74. return percpu_counter_sum_positive(&wb->stat[item]);
  75. }
  76. static inline s64 wb_stat_sum(struct bdi_writeback *wb, enum wb_stat_item item)
  77. {
  78. s64 sum;
  79. unsigned long flags;
  80. local_irq_save(flags);
  81. sum = __wb_stat_sum(wb, item);
  82. local_irq_restore(flags);
  83. return sum;
  84. }
  85. extern void wb_writeout_inc(struct bdi_writeback *wb);
  86. /*
  87. * maximal error of a stat counter.
  88. */
  89. static inline unsigned long wb_stat_error(struct bdi_writeback *wb)
  90. {
  91. #ifdef CONFIG_SMP
  92. return nr_cpu_ids * WB_STAT_BATCH;
  93. #else
  94. return 1;
  95. #endif
  96. }
  97. int bdi_set_min_ratio(struct backing_dev_info *bdi, unsigned int min_ratio);
  98. int bdi_set_max_ratio(struct backing_dev_info *bdi, unsigned int max_ratio);
  99. /*
  100. * Flags in backing_dev_info::capability
  101. *
  102. * The first three flags control whether dirty pages will contribute to the
  103. * VM's accounting and whether writepages() should be called for dirty pages
  104. * (something that would not, for example, be appropriate for ramfs)
  105. *
  106. * WARNING: these flags are closely related and should not normally be
  107. * used separately. The BDI_CAP_NO_ACCT_AND_WRITEBACK combines these
  108. * three flags into a single convenience macro.
  109. *
  110. * BDI_CAP_NO_ACCT_DIRTY: Dirty pages shouldn't contribute to accounting
  111. * BDI_CAP_NO_WRITEBACK: Don't write pages back
  112. * BDI_CAP_NO_ACCT_WB: Don't automatically account writeback pages
  113. * BDI_CAP_STRICTLIMIT: Keep number of dirty pages below bdi threshold.
  114. */
  115. #define BDI_CAP_NO_ACCT_DIRTY 0x00000001
  116. #define BDI_CAP_NO_WRITEBACK 0x00000002
  117. #define BDI_CAP_NO_ACCT_WB 0x00000004
  118. #define BDI_CAP_STABLE_WRITES 0x00000008
  119. #define BDI_CAP_STRICTLIMIT 0x00000010
  120. #define BDI_CAP_NO_ACCT_AND_WRITEBACK \
  121. (BDI_CAP_NO_WRITEBACK | BDI_CAP_NO_ACCT_DIRTY | BDI_CAP_NO_ACCT_WB)
  122. extern struct backing_dev_info noop_backing_dev_info;
  123. int writeback_in_progress(struct backing_dev_info *bdi);
  124. static inline int bdi_congested(struct backing_dev_info *bdi, int bdi_bits)
  125. {
  126. if (bdi->congested_fn)
  127. return bdi->congested_fn(bdi->congested_data, bdi_bits);
  128. return (bdi->wb.state & bdi_bits);
  129. }
  130. static inline int bdi_read_congested(struct backing_dev_info *bdi)
  131. {
  132. return bdi_congested(bdi, 1 << WB_sync_congested);
  133. }
  134. static inline int bdi_write_congested(struct backing_dev_info *bdi)
  135. {
  136. return bdi_congested(bdi, 1 << WB_async_congested);
  137. }
  138. static inline int bdi_rw_congested(struct backing_dev_info *bdi)
  139. {
  140. return bdi_congested(bdi, (1 << WB_sync_congested) |
  141. (1 << WB_async_congested));
  142. }
  143. long congestion_wait(int sync, long timeout);
  144. long wait_iff_congested(struct zone *zone, int sync, long timeout);
  145. int pdflush_proc_obsolete(struct ctl_table *table, int write,
  146. void __user *buffer, size_t *lenp, loff_t *ppos);
  147. static inline bool bdi_cap_stable_pages_required(struct backing_dev_info *bdi)
  148. {
  149. return bdi->capabilities & BDI_CAP_STABLE_WRITES;
  150. }
  151. static inline bool bdi_cap_writeback_dirty(struct backing_dev_info *bdi)
  152. {
  153. return !(bdi->capabilities & BDI_CAP_NO_WRITEBACK);
  154. }
  155. static inline bool bdi_cap_account_dirty(struct backing_dev_info *bdi)
  156. {
  157. return !(bdi->capabilities & BDI_CAP_NO_ACCT_DIRTY);
  158. }
  159. static inline bool bdi_cap_account_writeback(struct backing_dev_info *bdi)
  160. {
  161. /* Paranoia: BDI_CAP_NO_WRITEBACK implies BDI_CAP_NO_ACCT_WB */
  162. return !(bdi->capabilities & (BDI_CAP_NO_ACCT_WB |
  163. BDI_CAP_NO_WRITEBACK));
  164. }
  165. static inline bool mapping_cap_writeback_dirty(struct address_space *mapping)
  166. {
  167. return bdi_cap_writeback_dirty(inode_to_bdi(mapping->host));
  168. }
  169. static inline bool mapping_cap_account_dirty(struct address_space *mapping)
  170. {
  171. return bdi_cap_account_dirty(inode_to_bdi(mapping->host));
  172. }
  173. static inline int bdi_sched_wait(void *word)
  174. {
  175. schedule();
  176. return 0;
  177. }
  178. #endif /* _LINUX_BACKING_DEV_H */