page_owner.c 7.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320
  1. #include <linux/debugfs.h>
  2. #include <linux/mm.h>
  3. #include <linux/slab.h>
  4. #include <linux/uaccess.h>
  5. #include <linux/bootmem.h>
  6. #include <linux/stacktrace.h>
  7. #include <linux/page_owner.h>
  8. #include "internal.h"
  9. static bool page_owner_disabled = true;
  10. bool page_owner_inited __read_mostly;
  11. static void init_early_allocated_pages(void);
  12. static int early_page_owner_param(char *buf)
  13. {
  14. if (!buf)
  15. return -EINVAL;
  16. if (strcmp(buf, "on") == 0)
  17. page_owner_disabled = false;
  18. return 0;
  19. }
  20. early_param("page_owner", early_page_owner_param);
  21. static bool need_page_owner(void)
  22. {
  23. if (page_owner_disabled)
  24. return false;
  25. return true;
  26. }
  27. static void init_page_owner(void)
  28. {
  29. if (page_owner_disabled)
  30. return;
  31. page_owner_inited = true;
  32. init_early_allocated_pages();
  33. }
  34. struct page_ext_operations page_owner_ops = {
  35. .need = need_page_owner,
  36. .init = init_page_owner,
  37. };
  38. void __reset_page_owner(struct page *page, unsigned int order)
  39. {
  40. int i;
  41. struct page_ext *page_ext;
  42. for (i = 0; i < (1 << order); i++) {
  43. page_ext = lookup_page_ext(page + i);
  44. __clear_bit(PAGE_EXT_OWNER, &page_ext->flags);
  45. }
  46. }
  47. void __set_page_owner(struct page *page, unsigned int order, gfp_t gfp_mask)
  48. {
  49. struct page_ext *page_ext = lookup_page_ext(page);
  50. struct stack_trace trace = {
  51. .nr_entries = 0,
  52. .max_entries = ARRAY_SIZE(page_ext->trace_entries),
  53. .entries = &page_ext->trace_entries[0],
  54. .skip = 3,
  55. };
  56. save_stack_trace(&trace);
  57. page_ext->order = order;
  58. page_ext->gfp_mask = gfp_mask;
  59. page_ext->nr_entries = trace.nr_entries;
  60. __set_bit(PAGE_EXT_OWNER, &page_ext->flags);
  61. }
  62. gfp_t __get_page_owner_gfp(struct page *page)
  63. {
  64. struct page_ext *page_ext = lookup_page_ext(page);
  65. return page_ext->gfp_mask;
  66. }
  67. static ssize_t
  68. print_page_owner(char __user *buf, size_t count, unsigned long pfn,
  69. struct page *page, struct page_ext *page_ext)
  70. {
  71. int ret;
  72. int pageblock_mt, page_mt;
  73. char *kbuf;
  74. struct stack_trace trace = {
  75. .nr_entries = page_ext->nr_entries,
  76. .entries = &page_ext->trace_entries[0],
  77. };
  78. kbuf = kmalloc(count, GFP_KERNEL);
  79. if (!kbuf)
  80. return -ENOMEM;
  81. ret = snprintf(kbuf, count,
  82. "Page allocated via order %u, mask 0x%x\n",
  83. page_ext->order, page_ext->gfp_mask);
  84. if (ret >= count)
  85. goto err;
  86. /* Print information relevant to grouping pages by mobility */
  87. pageblock_mt = get_pfnblock_migratetype(page, pfn);
  88. page_mt = gfpflags_to_migratetype(page_ext->gfp_mask);
  89. ret += snprintf(kbuf + ret, count - ret,
  90. "PFN %lu Block %lu type %d %s Flags %s%s%s%s%s%s%s%s%s%s%s%s\n",
  91. pfn,
  92. pfn >> pageblock_order,
  93. pageblock_mt,
  94. pageblock_mt != page_mt ? "Fallback" : " ",
  95. PageLocked(page) ? "K" : " ",
  96. PageError(page) ? "E" : " ",
  97. PageReferenced(page) ? "R" : " ",
  98. PageUptodate(page) ? "U" : " ",
  99. PageDirty(page) ? "D" : " ",
  100. PageLRU(page) ? "L" : " ",
  101. PageActive(page) ? "A" : " ",
  102. PageSlab(page) ? "S" : " ",
  103. PageWriteback(page) ? "W" : " ",
  104. PageCompound(page) ? "C" : " ",
  105. PageSwapCache(page) ? "B" : " ",
  106. PageMappedToDisk(page) ? "M" : " ");
  107. if (ret >= count)
  108. goto err;
  109. ret += snprint_stack_trace(kbuf + ret, count - ret, &trace, 0);
  110. if (ret >= count)
  111. goto err;
  112. ret += snprintf(kbuf + ret, count - ret, "\n");
  113. if (ret >= count)
  114. goto err;
  115. if (copy_to_user(buf, kbuf, ret))
  116. ret = -EFAULT;
  117. kfree(kbuf);
  118. return ret;
  119. err:
  120. kfree(kbuf);
  121. return -ENOMEM;
  122. }
  123. static ssize_t
  124. read_page_owner(struct file *file, char __user *buf, size_t count, loff_t *ppos)
  125. {
  126. unsigned long pfn;
  127. struct page *page;
  128. struct page_ext *page_ext;
  129. if (!page_owner_inited)
  130. return -EINVAL;
  131. page = NULL;
  132. pfn = min_low_pfn + *ppos;
  133. /* Find a valid PFN or the start of a MAX_ORDER_NR_PAGES area */
  134. while (!pfn_valid(pfn) && (pfn & (MAX_ORDER_NR_PAGES - 1)) != 0)
  135. pfn++;
  136. drain_all_pages(NULL);
  137. /* Find an allocated page */
  138. for (; pfn < max_pfn; pfn++) {
  139. /*
  140. * If the new page is in a new MAX_ORDER_NR_PAGES area,
  141. * validate the area as existing, skip it if not
  142. */
  143. if ((pfn & (MAX_ORDER_NR_PAGES - 1)) == 0 && !pfn_valid(pfn)) {
  144. pfn += MAX_ORDER_NR_PAGES - 1;
  145. continue;
  146. }
  147. /* Check for holes within a MAX_ORDER area */
  148. if (!pfn_valid_within(pfn))
  149. continue;
  150. page = pfn_to_page(pfn);
  151. if (PageBuddy(page)) {
  152. unsigned long freepage_order = page_order_unsafe(page);
  153. if (freepage_order < MAX_ORDER)
  154. pfn += (1UL << freepage_order) - 1;
  155. continue;
  156. }
  157. page_ext = lookup_page_ext(page);
  158. /*
  159. * Some pages could be missed by concurrent allocation or free,
  160. * because we don't hold the zone lock.
  161. */
  162. if (!test_bit(PAGE_EXT_OWNER, &page_ext->flags))
  163. continue;
  164. /* Record the next PFN to read in the file offset */
  165. *ppos = (pfn - min_low_pfn) + 1;
  166. return print_page_owner(buf, count, pfn, page, page_ext);
  167. }
  168. return 0;
  169. }
  170. static void init_pages_in_zone(pg_data_t *pgdat, struct zone *zone)
  171. {
  172. struct page *page;
  173. struct page_ext *page_ext;
  174. unsigned long pfn = zone->zone_start_pfn, block_end_pfn;
  175. unsigned long end_pfn = pfn + zone->spanned_pages;
  176. unsigned long count = 0;
  177. /* Scan block by block. First and last block may be incomplete */
  178. pfn = zone->zone_start_pfn;
  179. /*
  180. * Walk the zone in pageblock_nr_pages steps. If a page block spans
  181. * a zone boundary, it will be double counted between zones. This does
  182. * not matter as the mixed block count will still be correct
  183. */
  184. for (; pfn < end_pfn; ) {
  185. if (!pfn_valid(pfn)) {
  186. pfn = ALIGN(pfn + 1, MAX_ORDER_NR_PAGES);
  187. continue;
  188. }
  189. block_end_pfn = ALIGN(pfn + 1, pageblock_nr_pages);
  190. block_end_pfn = min(block_end_pfn, end_pfn);
  191. page = pfn_to_page(pfn);
  192. for (; pfn < block_end_pfn; pfn++) {
  193. if (!pfn_valid_within(pfn))
  194. continue;
  195. page = pfn_to_page(pfn);
  196. /*
  197. * We are safe to check buddy flag and order, because
  198. * this is init stage and only single thread runs.
  199. */
  200. if (PageBuddy(page)) {
  201. pfn += (1UL << page_order(page)) - 1;
  202. continue;
  203. }
  204. if (PageReserved(page))
  205. continue;
  206. page_ext = lookup_page_ext(page);
  207. /* Maybe overraping zone */
  208. if (test_bit(PAGE_EXT_OWNER, &page_ext->flags))
  209. continue;
  210. /* Found early allocated page */
  211. set_page_owner(page, 0, 0);
  212. count++;
  213. }
  214. }
  215. pr_info("Node %d, zone %8s: page owner found early allocated %lu pages\n",
  216. pgdat->node_id, zone->name, count);
  217. }
  218. static void init_zones_in_node(pg_data_t *pgdat)
  219. {
  220. struct zone *zone;
  221. struct zone *node_zones = pgdat->node_zones;
  222. unsigned long flags;
  223. for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; ++zone) {
  224. if (!populated_zone(zone))
  225. continue;
  226. spin_lock_irqsave(&zone->lock, flags);
  227. init_pages_in_zone(pgdat, zone);
  228. spin_unlock_irqrestore(&zone->lock, flags);
  229. }
  230. }
  231. static void init_early_allocated_pages(void)
  232. {
  233. pg_data_t *pgdat;
  234. drain_all_pages(NULL);
  235. for_each_online_pgdat(pgdat)
  236. init_zones_in_node(pgdat);
  237. }
  238. static const struct file_operations proc_page_owner_operations = {
  239. .read = read_page_owner,
  240. };
  241. static int __init pageowner_init(void)
  242. {
  243. struct dentry *dentry;
  244. if (!page_owner_inited) {
  245. pr_info("page_owner is disabled\n");
  246. return 0;
  247. }
  248. dentry = debugfs_create_file("page_owner", S_IRUSR, NULL,
  249. NULL, &proc_page_owner_operations);
  250. if (IS_ERR(dentry))
  251. return PTR_ERR(dentry);
  252. return 0;
  253. }
  254. late_initcall(pageowner_init)