page_owner.c 6.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311
  1. #include <linux/debugfs.h>
  2. #include <linux/mm.h>
  3. #include <linux/slab.h>
  4. #include <linux/uaccess.h>
  5. #include <linux/bootmem.h>
  6. #include <linux/stacktrace.h>
  7. #include <linux/page_owner.h>
  8. #include "internal.h"
  9. static bool page_owner_disabled = true;
  10. bool page_owner_inited __read_mostly;
  11. static void init_early_allocated_pages(void);
  12. static int early_page_owner_param(char *buf)
  13. {
  14. if (!buf)
  15. return -EINVAL;
  16. if (strcmp(buf, "on") == 0)
  17. page_owner_disabled = false;
  18. return 0;
  19. }
  20. early_param("page_owner", early_page_owner_param);
  21. static bool need_page_owner(void)
  22. {
  23. if (page_owner_disabled)
  24. return false;
  25. return true;
  26. }
  27. static void init_page_owner(void)
  28. {
  29. if (page_owner_disabled)
  30. return;
  31. page_owner_inited = true;
  32. init_early_allocated_pages();
  33. }
  34. struct page_ext_operations page_owner_ops = {
  35. .need = need_page_owner,
  36. .init = init_page_owner,
  37. };
  38. void __reset_page_owner(struct page *page, unsigned int order)
  39. {
  40. int i;
  41. struct page_ext *page_ext;
  42. for (i = 0; i < (1 << order); i++) {
  43. page_ext = lookup_page_ext(page + i);
  44. __clear_bit(PAGE_EXT_OWNER, &page_ext->flags);
  45. }
  46. }
  47. void __set_page_owner(struct page *page, unsigned int order, gfp_t gfp_mask)
  48. {
  49. struct page_ext *page_ext;
  50. struct stack_trace *trace;
  51. page_ext = lookup_page_ext(page);
  52. trace = &page_ext->trace;
  53. trace->nr_entries = 0;
  54. trace->max_entries = ARRAY_SIZE(page_ext->trace_entries);
  55. trace->entries = &page_ext->trace_entries[0];
  56. trace->skip = 3;
  57. save_stack_trace(&page_ext->trace);
  58. page_ext->order = order;
  59. page_ext->gfp_mask = gfp_mask;
  60. __set_bit(PAGE_EXT_OWNER, &page_ext->flags);
  61. }
  62. static ssize_t
  63. print_page_owner(char __user *buf, size_t count, unsigned long pfn,
  64. struct page *page, struct page_ext *page_ext)
  65. {
  66. int ret;
  67. int pageblock_mt, page_mt;
  68. char *kbuf;
  69. kbuf = kmalloc(count, GFP_KERNEL);
  70. if (!kbuf)
  71. return -ENOMEM;
  72. ret = snprintf(kbuf, count,
  73. "Page allocated via order %u, mask 0x%x\n",
  74. page_ext->order, page_ext->gfp_mask);
  75. if (ret >= count)
  76. goto err;
  77. /* Print information relevant to grouping pages by mobility */
  78. pageblock_mt = get_pfnblock_migratetype(page, pfn);
  79. page_mt = gfpflags_to_migratetype(page_ext->gfp_mask);
  80. ret += snprintf(kbuf + ret, count - ret,
  81. "PFN %lu Block %lu type %d %s Flags %s%s%s%s%s%s%s%s%s%s%s%s\n",
  82. pfn,
  83. pfn >> pageblock_order,
  84. pageblock_mt,
  85. pageblock_mt != page_mt ? "Fallback" : " ",
  86. PageLocked(page) ? "K" : " ",
  87. PageError(page) ? "E" : " ",
  88. PageReferenced(page) ? "R" : " ",
  89. PageUptodate(page) ? "U" : " ",
  90. PageDirty(page) ? "D" : " ",
  91. PageLRU(page) ? "L" : " ",
  92. PageActive(page) ? "A" : " ",
  93. PageSlab(page) ? "S" : " ",
  94. PageWriteback(page) ? "W" : " ",
  95. PageCompound(page) ? "C" : " ",
  96. PageSwapCache(page) ? "B" : " ",
  97. PageMappedToDisk(page) ? "M" : " ");
  98. if (ret >= count)
  99. goto err;
  100. ret += snprint_stack_trace(kbuf + ret, count - ret,
  101. &page_ext->trace, 0);
  102. if (ret >= count)
  103. goto err;
  104. ret += snprintf(kbuf + ret, count - ret, "\n");
  105. if (ret >= count)
  106. goto err;
  107. if (copy_to_user(buf, kbuf, ret))
  108. ret = -EFAULT;
  109. kfree(kbuf);
  110. return ret;
  111. err:
  112. kfree(kbuf);
  113. return -ENOMEM;
  114. }
  115. static ssize_t
  116. read_page_owner(struct file *file, char __user *buf, size_t count, loff_t *ppos)
  117. {
  118. unsigned long pfn;
  119. struct page *page;
  120. struct page_ext *page_ext;
  121. if (!page_owner_inited)
  122. return -EINVAL;
  123. page = NULL;
  124. pfn = min_low_pfn + *ppos;
  125. /* Find a valid PFN or the start of a MAX_ORDER_NR_PAGES area */
  126. while (!pfn_valid(pfn) && (pfn & (MAX_ORDER_NR_PAGES - 1)) != 0)
  127. pfn++;
  128. drain_all_pages(NULL);
  129. /* Find an allocated page */
  130. for (; pfn < max_pfn; pfn++) {
  131. /*
  132. * If the new page is in a new MAX_ORDER_NR_PAGES area,
  133. * validate the area as existing, skip it if not
  134. */
  135. if ((pfn & (MAX_ORDER_NR_PAGES - 1)) == 0 && !pfn_valid(pfn)) {
  136. pfn += MAX_ORDER_NR_PAGES - 1;
  137. continue;
  138. }
  139. /* Check for holes within a MAX_ORDER area */
  140. if (!pfn_valid_within(pfn))
  141. continue;
  142. page = pfn_to_page(pfn);
  143. if (PageBuddy(page)) {
  144. unsigned long freepage_order = page_order_unsafe(page);
  145. if (freepage_order < MAX_ORDER)
  146. pfn += (1UL << freepage_order) - 1;
  147. continue;
  148. }
  149. page_ext = lookup_page_ext(page);
  150. /*
  151. * Some pages could be missed by concurrent allocation or free,
  152. * because we don't hold the zone lock.
  153. */
  154. if (!test_bit(PAGE_EXT_OWNER, &page_ext->flags))
  155. continue;
  156. /* Record the next PFN to read in the file offset */
  157. *ppos = (pfn - min_low_pfn) + 1;
  158. return print_page_owner(buf, count, pfn, page, page_ext);
  159. }
  160. return 0;
  161. }
  162. static void init_pages_in_zone(pg_data_t *pgdat, struct zone *zone)
  163. {
  164. struct page *page;
  165. struct page_ext *page_ext;
  166. unsigned long pfn = zone->zone_start_pfn, block_end_pfn;
  167. unsigned long end_pfn = pfn + zone->spanned_pages;
  168. unsigned long count = 0;
  169. /* Scan block by block. First and last block may be incomplete */
  170. pfn = zone->zone_start_pfn;
  171. /*
  172. * Walk the zone in pageblock_nr_pages steps. If a page block spans
  173. * a zone boundary, it will be double counted between zones. This does
  174. * not matter as the mixed block count will still be correct
  175. */
  176. for (; pfn < end_pfn; ) {
  177. if (!pfn_valid(pfn)) {
  178. pfn = ALIGN(pfn + 1, MAX_ORDER_NR_PAGES);
  179. continue;
  180. }
  181. block_end_pfn = ALIGN(pfn + 1, pageblock_nr_pages);
  182. block_end_pfn = min(block_end_pfn, end_pfn);
  183. page = pfn_to_page(pfn);
  184. for (; pfn < block_end_pfn; pfn++) {
  185. if (!pfn_valid_within(pfn))
  186. continue;
  187. page = pfn_to_page(pfn);
  188. /*
  189. * We are safe to check buddy flag and order, because
  190. * this is init stage and only single thread runs.
  191. */
  192. if (PageBuddy(page)) {
  193. pfn += (1UL << page_order(page)) - 1;
  194. continue;
  195. }
  196. if (PageReserved(page))
  197. continue;
  198. page_ext = lookup_page_ext(page);
  199. /* Maybe overraping zone */
  200. if (test_bit(PAGE_EXT_OWNER, &page_ext->flags))
  201. continue;
  202. /* Found early allocated page */
  203. set_page_owner(page, 0, 0);
  204. count++;
  205. }
  206. }
  207. pr_info("Node %d, zone %8s: page owner found early allocated %lu pages\n",
  208. pgdat->node_id, zone->name, count);
  209. }
  210. static void init_zones_in_node(pg_data_t *pgdat)
  211. {
  212. struct zone *zone;
  213. struct zone *node_zones = pgdat->node_zones;
  214. unsigned long flags;
  215. for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; ++zone) {
  216. if (!populated_zone(zone))
  217. continue;
  218. spin_lock_irqsave(&zone->lock, flags);
  219. init_pages_in_zone(pgdat, zone);
  220. spin_unlock_irqrestore(&zone->lock, flags);
  221. }
  222. }
  223. static void init_early_allocated_pages(void)
  224. {
  225. pg_data_t *pgdat;
  226. drain_all_pages(NULL);
  227. for_each_online_pgdat(pgdat)
  228. init_zones_in_node(pgdat);
  229. }
  230. static const struct file_operations proc_page_owner_operations = {
  231. .read = read_page_owner,
  232. };
  233. static int __init pageowner_init(void)
  234. {
  235. struct dentry *dentry;
  236. if (!page_owner_inited) {
  237. pr_info("page_owner is disabled\n");
  238. return 0;
  239. }
  240. dentry = debugfs_create_file("page_owner", S_IRUSR, NULL,
  241. NULL, &proc_page_owner_operations);
  242. if (IS_ERR(dentry))
  243. return PTR_ERR(dentry);
  244. return 0;
  245. }
  246. module_init(pageowner_init)