page_cgroup.h 4.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184
  1. #ifndef __LINUX_PAGE_CGROUP_H
  2. #define __LINUX_PAGE_CGROUP_H
  3. enum {
  4. /* flags for mem_cgroup */
  5. PCG_LOCK, /* Lock for pc->mem_cgroup and following bits. */
  6. PCG_CACHE, /* charged as cache */
  7. PCG_USED, /* this object is in use. */
  8. PCG_MIGRATION, /* under page migration */
  9. /* flags for mem_cgroup and file and I/O status */
  10. PCG_MOVE_LOCK, /* For race between move_account v.s. following bits */
  11. PCG_FILE_MAPPED, /* page is accounted as "mapped" */
  12. /* No lock in page_cgroup */
  13. PCG_ACCT_LRU, /* page has been accounted for (under lru_lock) */
  14. __NR_PCG_FLAGS,
  15. };
  16. #ifndef __GENERATING_BOUNDS_H
  17. #include <generated/bounds.h>
  18. #ifdef CONFIG_CGROUP_MEM_RES_CTLR
  19. #include <linux/bit_spinlock.h>
  20. /*
  21. * Page Cgroup can be considered as an extended mem_map.
  22. * A page_cgroup page is associated with every page descriptor. The
  23. * page_cgroup helps us identify information about the cgroup
  24. * All page cgroups are allocated at boot or memory hotplug event,
  25. * then the page cgroup for pfn always exists.
  26. */
  27. struct page_cgroup {
  28. unsigned long flags;
  29. struct mem_cgroup *mem_cgroup;
  30. };
  31. void __meminit pgdat_page_cgroup_init(struct pglist_data *pgdat);
  32. #ifdef CONFIG_SPARSEMEM
  33. static inline void __init page_cgroup_init_flatmem(void)
  34. {
  35. }
  36. extern void __init page_cgroup_init(void);
  37. #else
  38. void __init page_cgroup_init_flatmem(void);
  39. static inline void __init page_cgroup_init(void)
  40. {
  41. }
  42. #endif
  43. struct page_cgroup *lookup_page_cgroup(struct page *page);
  44. struct page *lookup_cgroup_page(struct page_cgroup *pc);
  45. #define TESTPCGFLAG(uname, lname) \
  46. static inline int PageCgroup##uname(struct page_cgroup *pc) \
  47. { return test_bit(PCG_##lname, &pc->flags); }
  48. #define SETPCGFLAG(uname, lname) \
  49. static inline void SetPageCgroup##uname(struct page_cgroup *pc)\
  50. { set_bit(PCG_##lname, &pc->flags); }
  51. #define CLEARPCGFLAG(uname, lname) \
  52. static inline void ClearPageCgroup##uname(struct page_cgroup *pc) \
  53. { clear_bit(PCG_##lname, &pc->flags); }
  54. #define TESTCLEARPCGFLAG(uname, lname) \
  55. static inline int TestClearPageCgroup##uname(struct page_cgroup *pc) \
  56. { return test_and_clear_bit(PCG_##lname, &pc->flags); }
  57. /* Cache flag is set only once (at allocation) */
  58. TESTPCGFLAG(Cache, CACHE)
  59. CLEARPCGFLAG(Cache, CACHE)
  60. SETPCGFLAG(Cache, CACHE)
  61. TESTPCGFLAG(Used, USED)
  62. CLEARPCGFLAG(Used, USED)
  63. SETPCGFLAG(Used, USED)
  64. SETPCGFLAG(AcctLRU, ACCT_LRU)
  65. CLEARPCGFLAG(AcctLRU, ACCT_LRU)
  66. TESTPCGFLAG(AcctLRU, ACCT_LRU)
  67. TESTCLEARPCGFLAG(AcctLRU, ACCT_LRU)
  68. SETPCGFLAG(FileMapped, FILE_MAPPED)
  69. CLEARPCGFLAG(FileMapped, FILE_MAPPED)
  70. TESTPCGFLAG(FileMapped, FILE_MAPPED)
  71. SETPCGFLAG(Migration, MIGRATION)
  72. CLEARPCGFLAG(Migration, MIGRATION)
  73. TESTPCGFLAG(Migration, MIGRATION)
  74. static inline void lock_page_cgroup(struct page_cgroup *pc)
  75. {
  76. /*
  77. * Don't take this lock in IRQ context.
  78. * This lock is for pc->mem_cgroup, USED, CACHE, MIGRATION
  79. */
  80. bit_spin_lock(PCG_LOCK, &pc->flags);
  81. }
  82. static inline void unlock_page_cgroup(struct page_cgroup *pc)
  83. {
  84. bit_spin_unlock(PCG_LOCK, &pc->flags);
  85. }
  86. static inline void move_lock_page_cgroup(struct page_cgroup *pc,
  87. unsigned long *flags)
  88. {
  89. /*
  90. * We know updates to pc->flags of page cache's stats are from both of
  91. * usual context or IRQ context. Disable IRQ to avoid deadlock.
  92. */
  93. local_irq_save(*flags);
  94. bit_spin_lock(PCG_MOVE_LOCK, &pc->flags);
  95. }
  96. static inline void move_unlock_page_cgroup(struct page_cgroup *pc,
  97. unsigned long *flags)
  98. {
  99. bit_spin_unlock(PCG_MOVE_LOCK, &pc->flags);
  100. local_irq_restore(*flags);
  101. }
  102. #else /* CONFIG_CGROUP_MEM_RES_CTLR */
  103. struct page_cgroup;
  104. static inline void __meminit pgdat_page_cgroup_init(struct pglist_data *pgdat)
  105. {
  106. }
  107. static inline struct page_cgroup *lookup_page_cgroup(struct page *page)
  108. {
  109. return NULL;
  110. }
  111. static inline void page_cgroup_init(void)
  112. {
  113. }
  114. static inline void __init page_cgroup_init_flatmem(void)
  115. {
  116. }
  117. #endif /* CONFIG_CGROUP_MEM_RES_CTLR */
  118. #include <linux/swap.h>
  119. #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
  120. extern unsigned short swap_cgroup_cmpxchg(swp_entry_t ent,
  121. unsigned short old, unsigned short new);
  122. extern unsigned short swap_cgroup_record(swp_entry_t ent, unsigned short id);
  123. extern unsigned short lookup_swap_cgroup_id(swp_entry_t ent);
  124. extern int swap_cgroup_swapon(int type, unsigned long max_pages);
  125. extern void swap_cgroup_swapoff(int type);
  126. #else
  127. static inline
  128. unsigned short swap_cgroup_record(swp_entry_t ent, unsigned short id)
  129. {
  130. return 0;
  131. }
  132. static inline
  133. unsigned short lookup_swap_cgroup_id(swp_entry_t ent)
  134. {
  135. return 0;
  136. }
  137. static inline int
  138. swap_cgroup_swapon(int type, unsigned long max_pages)
  139. {
  140. return 0;
  141. }
  142. static inline void swap_cgroup_swapoff(int type)
  143. {
  144. return;
  145. }
  146. #endif /* CONFIG_CGROUP_MEM_RES_CTLR_SWAP */
  147. #endif /* !__GENERATING_BOUNDS_H */
  148. #endif /* __LINUX_PAGE_CGROUP_H */