backing-dev-defs.h 2.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106
  1. #ifndef __LINUX_BACKING_DEV_DEFS_H
  2. #define __LINUX_BACKING_DEV_DEFS_H
  3. #include <linux/list.h>
  4. #include <linux/spinlock.h>
  5. #include <linux/percpu_counter.h>
  6. #include <linux/flex_proportions.h>
  7. #include <linux/timer.h>
  8. #include <linux/workqueue.h>
  9. struct page;
  10. struct device;
  11. struct dentry;
  12. /*
  13. * Bits in bdi_writeback.state
  14. */
  15. enum wb_state {
  16. WB_async_congested, /* The async (write) queue is getting full */
  17. WB_sync_congested, /* The sync queue is getting full */
  18. WB_registered, /* bdi_register() was done */
  19. WB_writeback_running, /* Writeback is in progress */
  20. };
  21. typedef int (congested_fn)(void *, int);
  22. enum wb_stat_item {
  23. WB_RECLAIMABLE,
  24. WB_WRITEBACK,
  25. WB_DIRTIED,
  26. WB_WRITTEN,
  27. NR_WB_STAT_ITEMS
  28. };
  29. #define WB_STAT_BATCH (8*(1+ilog2(nr_cpu_ids)))
  30. struct bdi_writeback {
  31. struct backing_dev_info *bdi; /* our parent bdi */
  32. unsigned long state; /* Always use atomic bitops on this */
  33. unsigned long last_old_flush; /* last old data flush */
  34. struct list_head b_dirty; /* dirty inodes */
  35. struct list_head b_io; /* parked for writeback */
  36. struct list_head b_more_io; /* parked for more writeback */
  37. struct list_head b_dirty_time; /* time stamps are dirty */
  38. spinlock_t list_lock; /* protects the b_* lists */
  39. struct percpu_counter stat[NR_WB_STAT_ITEMS];
  40. unsigned long bw_time_stamp; /* last time write bw is updated */
  41. unsigned long dirtied_stamp;
  42. unsigned long written_stamp; /* pages written at bw_time_stamp */
  43. unsigned long write_bandwidth; /* the estimated write bandwidth */
  44. unsigned long avg_write_bandwidth; /* further smoothed write bw */
  45. /*
  46. * The base dirty throttle rate, re-calculated on every 200ms.
  47. * All the bdi tasks' dirty rate will be curbed under it.
  48. * @dirty_ratelimit tracks the estimated @balanced_dirty_ratelimit
  49. * in small steps and is much more smooth/stable than the latter.
  50. */
  51. unsigned long dirty_ratelimit;
  52. unsigned long balanced_dirty_ratelimit;
  53. struct fprop_local_percpu completions;
  54. int dirty_exceeded;
  55. spinlock_t work_lock; /* protects work_list & dwork scheduling */
  56. struct list_head work_list;
  57. struct delayed_work dwork; /* work item used for writeback */
  58. };
  59. struct backing_dev_info {
  60. struct list_head bdi_list;
  61. unsigned long ra_pages; /* max readahead in PAGE_CACHE_SIZE units */
  62. unsigned int capabilities; /* Device capabilities */
  63. congested_fn *congested_fn; /* Function pointer if device is md/dm */
  64. void *congested_data; /* Pointer to aux data for congested func */
  65. char *name;
  66. unsigned int min_ratio;
  67. unsigned int max_ratio, max_prop_frac;
  68. struct bdi_writeback wb; /* default writeback info for this bdi */
  69. struct device *dev;
  70. struct timer_list laptop_mode_wb_timer;
  71. #ifdef CONFIG_DEBUG_FS
  72. struct dentry *debug_dir;
  73. struct dentry *debug_stats;
  74. #endif
  75. };
  76. enum {
  77. BLK_RW_ASYNC = 0,
  78. BLK_RW_SYNC = 1,
  79. };
  80. void clear_bdi_congested(struct backing_dev_info *bdi, int sync);
  81. void set_bdi_congested(struct backing_dev_info *bdi, int sync);
  82. #endif /* __LINUX_BACKING_DEV_DEFS_H */