vmstat.h 7.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287
  1. #ifndef _LINUX_VMSTAT_H
  2. #define _LINUX_VMSTAT_H
  3. #include <linux/types.h>
  4. #include <linux/percpu.h>
  5. #include <linux/mm.h>
  6. #include <linux/mmzone.h>
  7. #include <asm/atomic.h>
  8. #ifdef CONFIG_ZONE_DMA
  9. #define DMA_ZONE(xx) xx##_DMA,
  10. #else
  11. #define DMA_ZONE(xx)
  12. #endif
  13. #ifdef CONFIG_ZONE_DMA32
  14. #define DMA32_ZONE(xx) xx##_DMA32,
  15. #else
  16. #define DMA32_ZONE(xx)
  17. #endif
  18. #ifdef CONFIG_HIGHMEM
  19. #define HIGHMEM_ZONE(xx) , xx##_HIGH
  20. #else
  21. #define HIGHMEM_ZONE(xx)
  22. #endif
  23. #define FOR_ALL_ZONES(xx) DMA_ZONE(xx) DMA32_ZONE(xx) xx##_NORMAL HIGHMEM_ZONE(xx) , xx##_MOVABLE
  24. enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
  25. FOR_ALL_ZONES(PGALLOC),
  26. PGFREE, PGACTIVATE, PGDEACTIVATE,
  27. PGFAULT, PGMAJFAULT,
  28. FOR_ALL_ZONES(PGREFILL),
  29. FOR_ALL_ZONES(PGSTEAL),
  30. FOR_ALL_ZONES(PGSCAN_KSWAPD),
  31. FOR_ALL_ZONES(PGSCAN_DIRECT),
  32. PGINODESTEAL, SLABS_SCANNED, KSWAPD_STEAL, KSWAPD_INODESTEAL,
  33. PAGEOUTRUN, ALLOCSTALL, PGROTATED,
  34. #ifdef CONFIG_HUGETLB_PAGE
  35. HTLB_BUDDY_PGALLOC, HTLB_BUDDY_PGALLOC_FAIL,
  36. #endif
  37. UNEVICTABLE_PGCULLED, /* culled to noreclaim list */
  38. UNEVICTABLE_PGSCANNED, /* scanned for reclaimability */
  39. UNEVICTABLE_PGRESCUED, /* rescued from noreclaim list */
  40. UNEVICTABLE_PGMLOCKED,
  41. UNEVICTABLE_PGMUNLOCKED,
  42. UNEVICTABLE_PGCLEARED, /* on COW, page truncate */
  43. UNEVICTABLE_PGSTRANDED, /* unable to isolate on unlock */
  44. UNEVICTABLE_MLOCKFREED,
  45. NR_VM_EVENT_ITEMS
  46. };
  47. extern int sysctl_stat_interval;
  48. #ifdef CONFIG_VM_EVENT_COUNTERS
  49. /*
  50. * Light weight per cpu counter implementation.
  51. *
  52. * Counters should only be incremented and no critical kernel component
  53. * should rely on the counter values.
  54. *
  55. * Counters are handled completely inline. On many platforms the code
  56. * generated will simply be the increment of a global address.
  57. */
  58. struct vm_event_state {
  59. unsigned long event[NR_VM_EVENT_ITEMS];
  60. };
  61. DECLARE_PER_CPU(struct vm_event_state, vm_event_states);
  62. static inline void __count_vm_event(enum vm_event_item item)
  63. {
  64. __get_cpu_var(vm_event_states).event[item]++;
  65. }
  66. static inline void count_vm_event(enum vm_event_item item)
  67. {
  68. get_cpu_var(vm_event_states).event[item]++;
  69. put_cpu();
  70. }
  71. static inline void __count_vm_events(enum vm_event_item item, long delta)
  72. {
  73. __get_cpu_var(vm_event_states).event[item] += delta;
  74. }
  75. static inline void count_vm_events(enum vm_event_item item, long delta)
  76. {
  77. get_cpu_var(vm_event_states).event[item] += delta;
  78. put_cpu();
  79. }
  80. extern void all_vm_events(unsigned long *);
  81. #ifdef CONFIG_HOTPLUG
  82. extern void vm_events_fold_cpu(int cpu);
  83. #else
  84. static inline void vm_events_fold_cpu(int cpu)
  85. {
  86. }
  87. #endif
  88. #else
  89. /* Disable counters */
  90. static inline void count_vm_event(enum vm_event_item item)
  91. {
  92. }
  93. static inline void count_vm_events(enum vm_event_item item, long delta)
  94. {
  95. }
  96. static inline void __count_vm_event(enum vm_event_item item)
  97. {
  98. }
  99. static inline void __count_vm_events(enum vm_event_item item, long delta)
  100. {
  101. }
  102. static inline void all_vm_events(unsigned long *ret)
  103. {
  104. }
  105. static inline void vm_events_fold_cpu(int cpu)
  106. {
  107. }
  108. #endif /* CONFIG_VM_EVENT_COUNTERS */
  109. #define __count_zone_vm_events(item, zone, delta) \
  110. __count_vm_events(item##_NORMAL - ZONE_NORMAL + \
  111. zone_idx(zone), delta)
  112. /*
  113. * Zone based page accounting with per cpu differentials.
  114. */
  115. extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
  116. static inline void zone_page_state_add(long x, struct zone *zone,
  117. enum zone_stat_item item)
  118. {
  119. atomic_long_add(x, &zone->vm_stat[item]);
  120. atomic_long_add(x, &vm_stat[item]);
  121. }
  122. static inline unsigned long global_page_state(enum zone_stat_item item)
  123. {
  124. long x = atomic_long_read(&vm_stat[item]);
  125. #ifdef CONFIG_SMP
  126. if (x < 0)
  127. x = 0;
  128. #endif
  129. return x;
  130. }
  131. static inline unsigned long zone_page_state(struct zone *zone,
  132. enum zone_stat_item item)
  133. {
  134. long x = atomic_long_read(&zone->vm_stat[item]);
  135. #ifdef CONFIG_SMP
  136. if (x < 0)
  137. x = 0;
  138. #endif
  139. return x;
  140. }
  141. extern unsigned long global_lru_pages(void);
  142. static inline unsigned long zone_lru_pages(struct zone *zone)
  143. {
  144. return (zone_page_state(zone, NR_ACTIVE_ANON)
  145. + zone_page_state(zone, NR_ACTIVE_FILE)
  146. + zone_page_state(zone, NR_INACTIVE_ANON)
  147. + zone_page_state(zone, NR_INACTIVE_FILE));
  148. }
  149. #ifdef CONFIG_NUMA
  150. /*
  151. * Determine the per node value of a stat item. This function
  152. * is called frequently in a NUMA machine, so try to be as
  153. * frugal as possible.
  154. */
  155. static inline unsigned long node_page_state(int node,
  156. enum zone_stat_item item)
  157. {
  158. struct zone *zones = NODE_DATA(node)->node_zones;
  159. return
  160. #ifdef CONFIG_ZONE_DMA
  161. zone_page_state(&zones[ZONE_DMA], item) +
  162. #endif
  163. #ifdef CONFIG_ZONE_DMA32
  164. zone_page_state(&zones[ZONE_DMA32], item) +
  165. #endif
  166. #ifdef CONFIG_HIGHMEM
  167. zone_page_state(&zones[ZONE_HIGHMEM], item) +
  168. #endif
  169. zone_page_state(&zones[ZONE_NORMAL], item) +
  170. zone_page_state(&zones[ZONE_MOVABLE], item);
  171. }
  172. extern void zone_statistics(struct zone *, struct zone *);
  173. #else
  174. #define node_page_state(node, item) global_page_state(item)
  175. #define zone_statistics(_zl,_z) do { } while (0)
  176. #endif /* CONFIG_NUMA */
  177. #define __add_zone_page_state(__z, __i, __d) \
  178. __mod_zone_page_state(__z, __i, __d)
  179. #define __sub_zone_page_state(__z, __i, __d) \
  180. __mod_zone_page_state(__z, __i,-(__d))
  181. #define add_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, __d)
  182. #define sub_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, -(__d))
  183. static inline void zap_zone_vm_stats(struct zone *zone)
  184. {
  185. memset(zone->vm_stat, 0, sizeof(zone->vm_stat));
  186. }
  187. extern void inc_zone_state(struct zone *, enum zone_stat_item);
  188. #ifdef CONFIG_SMP
  189. void __mod_zone_page_state(struct zone *, enum zone_stat_item item, int);
  190. void __inc_zone_page_state(struct page *, enum zone_stat_item);
  191. void __dec_zone_page_state(struct page *, enum zone_stat_item);
  192. void mod_zone_page_state(struct zone *, enum zone_stat_item, int);
  193. void inc_zone_page_state(struct page *, enum zone_stat_item);
  194. void dec_zone_page_state(struct page *, enum zone_stat_item);
  195. extern void inc_zone_state(struct zone *, enum zone_stat_item);
  196. extern void __inc_zone_state(struct zone *, enum zone_stat_item);
  197. extern void dec_zone_state(struct zone *, enum zone_stat_item);
  198. extern void __dec_zone_state(struct zone *, enum zone_stat_item);
  199. void refresh_cpu_vm_stats(int);
  200. #else /* CONFIG_SMP */
  201. /*
  202. * We do not maintain differentials in a single processor configuration.
  203. * The functions directly modify the zone and global counters.
  204. */
  205. static inline void __mod_zone_page_state(struct zone *zone,
  206. enum zone_stat_item item, int delta)
  207. {
  208. zone_page_state_add(delta, zone, item);
  209. }
  210. static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
  211. {
  212. atomic_long_inc(&zone->vm_stat[item]);
  213. atomic_long_inc(&vm_stat[item]);
  214. }
  215. static inline void __inc_zone_page_state(struct page *page,
  216. enum zone_stat_item item)
  217. {
  218. __inc_zone_state(page_zone(page), item);
  219. }
  220. static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
  221. {
  222. atomic_long_dec(&zone->vm_stat[item]);
  223. atomic_long_dec(&vm_stat[item]);
  224. }
  225. static inline void __dec_zone_page_state(struct page *page,
  226. enum zone_stat_item item)
  227. {
  228. __dec_zone_state(page_zone(page), item);
  229. }
  230. /*
  231. * We only use atomic operations to update counters. So there is no need to
  232. * disable interrupts.
  233. */
  234. #define inc_zone_page_state __inc_zone_page_state
  235. #define dec_zone_page_state __dec_zone_page_state
  236. #define mod_zone_page_state __mod_zone_page_state
  237. static inline void refresh_cpu_vm_stats(int cpu) { }
  238. #endif
  239. #endif /* _LINUX_VMSTAT_H */