page_owner.c 9.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380
  1. #include <linux/debugfs.h>
  2. #include <linux/mm.h>
  3. #include <linux/slab.h>
  4. #include <linux/uaccess.h>
  5. #include <linux/bootmem.h>
  6. #include <linux/stacktrace.h>
  7. #include <linux/page_owner.h>
  8. #include <linux/jump_label.h>
  9. #include <linux/migrate.h>
  10. #include "internal.h"
  11. static bool page_owner_disabled = true;
  12. DEFINE_STATIC_KEY_FALSE(page_owner_inited);
  13. static void init_early_allocated_pages(void);
  14. static int early_page_owner_param(char *buf)
  15. {
  16. if (!buf)
  17. return -EINVAL;
  18. if (strcmp(buf, "on") == 0)
  19. page_owner_disabled = false;
  20. return 0;
  21. }
  22. early_param("page_owner", early_page_owner_param);
  23. static bool need_page_owner(void)
  24. {
  25. if (page_owner_disabled)
  26. return false;
  27. return true;
  28. }
  29. static void init_page_owner(void)
  30. {
  31. if (page_owner_disabled)
  32. return;
  33. static_branch_enable(&page_owner_inited);
  34. init_early_allocated_pages();
  35. }
  36. struct page_ext_operations page_owner_ops = {
  37. .need = need_page_owner,
  38. .init = init_page_owner,
  39. };
  40. void __reset_page_owner(struct page *page, unsigned int order)
  41. {
  42. int i;
  43. struct page_ext *page_ext;
  44. for (i = 0; i < (1 << order); i++) {
  45. page_ext = lookup_page_ext(page + i);
  46. __clear_bit(PAGE_EXT_OWNER, &page_ext->flags);
  47. }
  48. }
  49. void __set_page_owner(struct page *page, unsigned int order, gfp_t gfp_mask)
  50. {
  51. struct page_ext *page_ext = lookup_page_ext(page);
  52. struct stack_trace trace = {
  53. .nr_entries = 0,
  54. .max_entries = ARRAY_SIZE(page_ext->trace_entries),
  55. .entries = &page_ext->trace_entries[0],
  56. .skip = 3,
  57. };
  58. save_stack_trace(&trace);
  59. page_ext->order = order;
  60. page_ext->gfp_mask = gfp_mask;
  61. page_ext->nr_entries = trace.nr_entries;
  62. page_ext->last_migrate_reason = -1;
  63. __set_bit(PAGE_EXT_OWNER, &page_ext->flags);
  64. }
  65. void __set_page_owner_migrate_reason(struct page *page, int reason)
  66. {
  67. struct page_ext *page_ext = lookup_page_ext(page);
  68. page_ext->last_migrate_reason = reason;
  69. }
  70. gfp_t __get_page_owner_gfp(struct page *page)
  71. {
  72. struct page_ext *page_ext = lookup_page_ext(page);
  73. return page_ext->gfp_mask;
  74. }
  75. void __copy_page_owner(struct page *oldpage, struct page *newpage)
  76. {
  77. struct page_ext *old_ext = lookup_page_ext(oldpage);
  78. struct page_ext *new_ext = lookup_page_ext(newpage);
  79. int i;
  80. new_ext->order = old_ext->order;
  81. new_ext->gfp_mask = old_ext->gfp_mask;
  82. new_ext->nr_entries = old_ext->nr_entries;
  83. for (i = 0; i < ARRAY_SIZE(new_ext->trace_entries); i++)
  84. new_ext->trace_entries[i] = old_ext->trace_entries[i];
  85. /*
  86. * We don't clear the bit on the oldpage as it's going to be freed
  87. * after migration. Until then, the info can be useful in case of
  88. * a bug, and the overal stats will be off a bit only temporarily.
  89. * Also, migrate_misplaced_transhuge_page() can still fail the
  90. * migration and then we want the oldpage to retain the info. But
  91. * in that case we also don't need to explicitly clear the info from
  92. * the new page, which will be freed.
  93. */
  94. __set_bit(PAGE_EXT_OWNER, &new_ext->flags);
  95. }
  96. static ssize_t
  97. print_page_owner(char __user *buf, size_t count, unsigned long pfn,
  98. struct page *page, struct page_ext *page_ext)
  99. {
  100. int ret;
  101. int pageblock_mt, page_mt;
  102. char *kbuf;
  103. struct stack_trace trace = {
  104. .nr_entries = page_ext->nr_entries,
  105. .entries = &page_ext->trace_entries[0],
  106. };
  107. kbuf = kmalloc(count, GFP_KERNEL);
  108. if (!kbuf)
  109. return -ENOMEM;
  110. ret = snprintf(kbuf, count,
  111. "Page allocated via order %u, mask %#x(%pGg)\n",
  112. page_ext->order, page_ext->gfp_mask,
  113. &page_ext->gfp_mask);
  114. if (ret >= count)
  115. goto err;
  116. /* Print information relevant to grouping pages by mobility */
  117. pageblock_mt = get_pageblock_migratetype(page);
  118. page_mt = gfpflags_to_migratetype(page_ext->gfp_mask);
  119. ret += snprintf(kbuf + ret, count - ret,
  120. "PFN %lu type %s Block %lu type %s Flags %#lx(%pGp)\n",
  121. pfn,
  122. migratetype_names[page_mt],
  123. pfn >> pageblock_order,
  124. migratetype_names[pageblock_mt],
  125. page->flags, &page->flags);
  126. if (ret >= count)
  127. goto err;
  128. ret += snprint_stack_trace(kbuf + ret, count - ret, &trace, 0);
  129. if (ret >= count)
  130. goto err;
  131. if (page_ext->last_migrate_reason != -1) {
  132. ret += snprintf(kbuf + ret, count - ret,
  133. "Page has been migrated, last migrate reason: %s\n",
  134. migrate_reason_names[page_ext->last_migrate_reason]);
  135. if (ret >= count)
  136. goto err;
  137. }
  138. ret += snprintf(kbuf + ret, count - ret, "\n");
  139. if (ret >= count)
  140. goto err;
  141. if (copy_to_user(buf, kbuf, ret))
  142. ret = -EFAULT;
  143. kfree(kbuf);
  144. return ret;
  145. err:
  146. kfree(kbuf);
  147. return -ENOMEM;
  148. }
  149. void __dump_page_owner(struct page *page)
  150. {
  151. struct page_ext *page_ext = lookup_page_ext(page);
  152. struct stack_trace trace = {
  153. .nr_entries = page_ext->nr_entries,
  154. .entries = &page_ext->trace_entries[0],
  155. };
  156. gfp_t gfp_mask = page_ext->gfp_mask;
  157. int mt = gfpflags_to_migratetype(gfp_mask);
  158. if (!test_bit(PAGE_EXT_OWNER, &page_ext->flags)) {
  159. pr_alert("page_owner info is not active (free page?)\n");
  160. return;
  161. }
  162. pr_alert("page allocated via order %u, migratetype %s, gfp_mask %#x(%pGg)\n",
  163. page_ext->order, migratetype_names[mt], gfp_mask, &gfp_mask);
  164. print_stack_trace(&trace, 0);
  165. if (page_ext->last_migrate_reason != -1)
  166. pr_alert("page has been migrated, last migrate reason: %s\n",
  167. migrate_reason_names[page_ext->last_migrate_reason]);
  168. }
  169. static ssize_t
  170. read_page_owner(struct file *file, char __user *buf, size_t count, loff_t *ppos)
  171. {
  172. unsigned long pfn;
  173. struct page *page;
  174. struct page_ext *page_ext;
  175. if (!static_branch_unlikely(&page_owner_inited))
  176. return -EINVAL;
  177. page = NULL;
  178. pfn = min_low_pfn + *ppos;
  179. /* Find a valid PFN or the start of a MAX_ORDER_NR_PAGES area */
  180. while (!pfn_valid(pfn) && (pfn & (MAX_ORDER_NR_PAGES - 1)) != 0)
  181. pfn++;
  182. drain_all_pages(NULL);
  183. /* Find an allocated page */
  184. for (; pfn < max_pfn; pfn++) {
  185. /*
  186. * If the new page is in a new MAX_ORDER_NR_PAGES area,
  187. * validate the area as existing, skip it if not
  188. */
  189. if ((pfn & (MAX_ORDER_NR_PAGES - 1)) == 0 && !pfn_valid(pfn)) {
  190. pfn += MAX_ORDER_NR_PAGES - 1;
  191. continue;
  192. }
  193. /* Check for holes within a MAX_ORDER area */
  194. if (!pfn_valid_within(pfn))
  195. continue;
  196. page = pfn_to_page(pfn);
  197. if (PageBuddy(page)) {
  198. unsigned long freepage_order = page_order_unsafe(page);
  199. if (freepage_order < MAX_ORDER)
  200. pfn += (1UL << freepage_order) - 1;
  201. continue;
  202. }
  203. page_ext = lookup_page_ext(page);
  204. /*
  205. * Some pages could be missed by concurrent allocation or free,
  206. * because we don't hold the zone lock.
  207. */
  208. if (!test_bit(PAGE_EXT_OWNER, &page_ext->flags))
  209. continue;
  210. /* Record the next PFN to read in the file offset */
  211. *ppos = (pfn - min_low_pfn) + 1;
  212. return print_page_owner(buf, count, pfn, page, page_ext);
  213. }
  214. return 0;
  215. }
  216. static void init_pages_in_zone(pg_data_t *pgdat, struct zone *zone)
  217. {
  218. struct page *page;
  219. struct page_ext *page_ext;
  220. unsigned long pfn = zone->zone_start_pfn, block_end_pfn;
  221. unsigned long end_pfn = pfn + zone->spanned_pages;
  222. unsigned long count = 0;
  223. /* Scan block by block. First and last block may be incomplete */
  224. pfn = zone->zone_start_pfn;
  225. /*
  226. * Walk the zone in pageblock_nr_pages steps. If a page block spans
  227. * a zone boundary, it will be double counted between zones. This does
  228. * not matter as the mixed block count will still be correct
  229. */
  230. for (; pfn < end_pfn; ) {
  231. if (!pfn_valid(pfn)) {
  232. pfn = ALIGN(pfn + 1, MAX_ORDER_NR_PAGES);
  233. continue;
  234. }
  235. block_end_pfn = ALIGN(pfn + 1, pageblock_nr_pages);
  236. block_end_pfn = min(block_end_pfn, end_pfn);
  237. page = pfn_to_page(pfn);
  238. for (; pfn < block_end_pfn; pfn++) {
  239. if (!pfn_valid_within(pfn))
  240. continue;
  241. page = pfn_to_page(pfn);
  242. if (page_zone(page) != zone)
  243. continue;
  244. /*
  245. * We are safe to check buddy flag and order, because
  246. * this is init stage and only single thread runs.
  247. */
  248. if (PageBuddy(page)) {
  249. pfn += (1UL << page_order(page)) - 1;
  250. continue;
  251. }
  252. if (PageReserved(page))
  253. continue;
  254. page_ext = lookup_page_ext(page);
  255. /* Maybe overraping zone */
  256. if (test_bit(PAGE_EXT_OWNER, &page_ext->flags))
  257. continue;
  258. /* Found early allocated page */
  259. set_page_owner(page, 0, 0);
  260. count++;
  261. }
  262. }
  263. pr_info("Node %d, zone %8s: page owner found early allocated %lu pages\n",
  264. pgdat->node_id, zone->name, count);
  265. }
  266. static void init_zones_in_node(pg_data_t *pgdat)
  267. {
  268. struct zone *zone;
  269. struct zone *node_zones = pgdat->node_zones;
  270. unsigned long flags;
  271. for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; ++zone) {
  272. if (!populated_zone(zone))
  273. continue;
  274. spin_lock_irqsave(&zone->lock, flags);
  275. init_pages_in_zone(pgdat, zone);
  276. spin_unlock_irqrestore(&zone->lock, flags);
  277. }
  278. }
  279. static void init_early_allocated_pages(void)
  280. {
  281. pg_data_t *pgdat;
  282. drain_all_pages(NULL);
  283. for_each_online_pgdat(pgdat)
  284. init_zones_in_node(pgdat);
  285. }
  286. static const struct file_operations proc_page_owner_operations = {
  287. .read = read_page_owner,
  288. };
  289. static int __init pageowner_init(void)
  290. {
  291. struct dentry *dentry;
  292. if (!static_branch_unlikely(&page_owner_inited)) {
  293. pr_info("page_owner is disabled\n");
  294. return 0;
  295. }
  296. dentry = debugfs_create_file("page_owner", S_IRUSR, NULL,
  297. NULL, &proc_page_owner_operations);
  298. if (IS_ERR(dentry))
  299. return PTR_ERR(dentry);
  300. return 0;
  301. }
  302. late_initcall(pageowner_init)