page_owner.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513
  1. #include <linux/debugfs.h>
  2. #include <linux/mm.h>
  3. #include <linux/slab.h>
  4. #include <linux/uaccess.h>
  5. #include <linux/bootmem.h>
  6. #include <linux/stacktrace.h>
  7. #include <linux/page_owner.h>
  8. #include <linux/jump_label.h>
  9. #include <linux/migrate.h>
  10. #include <linux/stackdepot.h>
  11. #include "internal.h"
  12. /*
  13. * TODO: teach PAGE_OWNER_STACK_DEPTH (__dump_page_owner and save_stack)
  14. * to use off stack temporal storage
  15. */
  16. #define PAGE_OWNER_STACK_DEPTH (16)
  17. static bool page_owner_disabled = true;
  18. DEFINE_STATIC_KEY_FALSE(page_owner_inited);
  19. static depot_stack_handle_t dummy_handle;
  20. static depot_stack_handle_t failure_handle;
  21. static void init_early_allocated_pages(void);
  22. static int early_page_owner_param(char *buf)
  23. {
  24. if (!buf)
  25. return -EINVAL;
  26. if (strcmp(buf, "on") == 0)
  27. page_owner_disabled = false;
  28. return 0;
  29. }
  30. early_param("page_owner", early_page_owner_param);
  31. static bool need_page_owner(void)
  32. {
  33. if (page_owner_disabled)
  34. return false;
  35. return true;
  36. }
  37. static noinline void register_dummy_stack(void)
  38. {
  39. unsigned long entries[4];
  40. struct stack_trace dummy;
  41. dummy.nr_entries = 0;
  42. dummy.max_entries = ARRAY_SIZE(entries);
  43. dummy.entries = &entries[0];
  44. dummy.skip = 0;
  45. save_stack_trace(&dummy);
  46. dummy_handle = depot_save_stack(&dummy, GFP_KERNEL);
  47. }
  48. static noinline void register_failure_stack(void)
  49. {
  50. unsigned long entries[4];
  51. struct stack_trace failure;
  52. failure.nr_entries = 0;
  53. failure.max_entries = ARRAY_SIZE(entries);
  54. failure.entries = &entries[0];
  55. failure.skip = 0;
  56. save_stack_trace(&failure);
  57. failure_handle = depot_save_stack(&failure, GFP_KERNEL);
  58. }
  59. static void init_page_owner(void)
  60. {
  61. if (page_owner_disabled)
  62. return;
  63. register_dummy_stack();
  64. register_failure_stack();
  65. static_branch_enable(&page_owner_inited);
  66. init_early_allocated_pages();
  67. }
  68. struct page_ext_operations page_owner_ops = {
  69. .need = need_page_owner,
  70. .init = init_page_owner,
  71. };
  72. void __reset_page_owner(struct page *page, unsigned int order)
  73. {
  74. int i;
  75. struct page_ext *page_ext;
  76. for (i = 0; i < (1 << order); i++) {
  77. page_ext = lookup_page_ext(page + i);
  78. if (unlikely(!page_ext))
  79. continue;
  80. __clear_bit(PAGE_EXT_OWNER, &page_ext->flags);
  81. }
  82. }
  83. static inline bool check_recursive_alloc(struct stack_trace *trace,
  84. unsigned long ip)
  85. {
  86. int i, count;
  87. if (!trace->nr_entries)
  88. return false;
  89. for (i = 0, count = 0; i < trace->nr_entries; i++) {
  90. if (trace->entries[i] == ip && ++count == 2)
  91. return true;
  92. }
  93. return false;
  94. }
  95. static noinline depot_stack_handle_t save_stack(gfp_t flags)
  96. {
  97. unsigned long entries[PAGE_OWNER_STACK_DEPTH];
  98. struct stack_trace trace = {
  99. .nr_entries = 0,
  100. .entries = entries,
  101. .max_entries = PAGE_OWNER_STACK_DEPTH,
  102. .skip = 0
  103. };
  104. depot_stack_handle_t handle;
  105. save_stack_trace(&trace);
  106. if (trace.nr_entries != 0 &&
  107. trace.entries[trace.nr_entries-1] == ULONG_MAX)
  108. trace.nr_entries--;
  109. /*
  110. * We need to check recursion here because our request to stackdepot
  111. * could trigger memory allocation to save new entry. New memory
  112. * allocation would reach here and call depot_save_stack() again
  113. * if we don't catch it. There is still not enough memory in stackdepot
  114. * so it would try to allocate memory again and loop forever.
  115. */
  116. if (check_recursive_alloc(&trace, _RET_IP_))
  117. return dummy_handle;
  118. handle = depot_save_stack(&trace, flags);
  119. if (!handle)
  120. handle = failure_handle;
  121. return handle;
  122. }
  123. noinline void __set_page_owner(struct page *page, unsigned int order,
  124. gfp_t gfp_mask)
  125. {
  126. struct page_ext *page_ext = lookup_page_ext(page);
  127. if (unlikely(!page_ext))
  128. return;
  129. page_ext->handle = save_stack(gfp_mask);
  130. page_ext->order = order;
  131. page_ext->gfp_mask = gfp_mask;
  132. page_ext->last_migrate_reason = -1;
  133. __set_bit(PAGE_EXT_OWNER, &page_ext->flags);
  134. }
  135. void __set_page_owner_migrate_reason(struct page *page, int reason)
  136. {
  137. struct page_ext *page_ext = lookup_page_ext(page);
  138. if (unlikely(!page_ext))
  139. return;
  140. page_ext->last_migrate_reason = reason;
  141. }
  142. void __split_page_owner(struct page *page, unsigned int order)
  143. {
  144. int i;
  145. struct page_ext *page_ext = lookup_page_ext(page);
  146. if (unlikely(!page_ext))
  147. return;
  148. page_ext->order = 0;
  149. for (i = 1; i < (1 << order); i++)
  150. __copy_page_owner(page, page + i);
  151. }
  152. void __copy_page_owner(struct page *oldpage, struct page *newpage)
  153. {
  154. struct page_ext *old_ext = lookup_page_ext(oldpage);
  155. struct page_ext *new_ext = lookup_page_ext(newpage);
  156. if (unlikely(!old_ext || !new_ext))
  157. return;
  158. new_ext->order = old_ext->order;
  159. new_ext->gfp_mask = old_ext->gfp_mask;
  160. new_ext->last_migrate_reason = old_ext->last_migrate_reason;
  161. new_ext->handle = old_ext->handle;
  162. /*
  163. * We don't clear the bit on the oldpage as it's going to be freed
  164. * after migration. Until then, the info can be useful in case of
  165. * a bug, and the overal stats will be off a bit only temporarily.
  166. * Also, migrate_misplaced_transhuge_page() can still fail the
  167. * migration and then we want the oldpage to retain the info. But
  168. * in that case we also don't need to explicitly clear the info from
  169. * the new page, which will be freed.
  170. */
  171. __set_bit(PAGE_EXT_OWNER, &new_ext->flags);
  172. }
  173. static ssize_t
  174. print_page_owner(char __user *buf, size_t count, unsigned long pfn,
  175. struct page *page, struct page_ext *page_ext,
  176. depot_stack_handle_t handle)
  177. {
  178. int ret;
  179. int pageblock_mt, page_mt;
  180. char *kbuf;
  181. unsigned long entries[PAGE_OWNER_STACK_DEPTH];
  182. struct stack_trace trace = {
  183. .nr_entries = 0,
  184. .entries = entries,
  185. .max_entries = PAGE_OWNER_STACK_DEPTH,
  186. .skip = 0
  187. };
  188. kbuf = kmalloc(count, GFP_KERNEL);
  189. if (!kbuf)
  190. return -ENOMEM;
  191. ret = snprintf(kbuf, count,
  192. "Page allocated via order %u, mask %#x(%pGg)\n",
  193. page_ext->order, page_ext->gfp_mask,
  194. &page_ext->gfp_mask);
  195. if (ret >= count)
  196. goto err;
  197. /* Print information relevant to grouping pages by mobility */
  198. pageblock_mt = get_pageblock_migratetype(page);
  199. page_mt = gfpflags_to_migratetype(page_ext->gfp_mask);
  200. ret += snprintf(kbuf + ret, count - ret,
  201. "PFN %lu type %s Block %lu type %s Flags %#lx(%pGp)\n",
  202. pfn,
  203. migratetype_names[page_mt],
  204. pfn >> pageblock_order,
  205. migratetype_names[pageblock_mt],
  206. page->flags, &page->flags);
  207. if (ret >= count)
  208. goto err;
  209. depot_fetch_stack(handle, &trace);
  210. ret += snprint_stack_trace(kbuf + ret, count - ret, &trace, 0);
  211. if (ret >= count)
  212. goto err;
  213. if (page_ext->last_migrate_reason != -1) {
  214. ret += snprintf(kbuf + ret, count - ret,
  215. "Page has been migrated, last migrate reason: %s\n",
  216. migrate_reason_names[page_ext->last_migrate_reason]);
  217. if (ret >= count)
  218. goto err;
  219. }
  220. ret += snprintf(kbuf + ret, count - ret, "\n");
  221. if (ret >= count)
  222. goto err;
  223. if (copy_to_user(buf, kbuf, ret))
  224. ret = -EFAULT;
  225. kfree(kbuf);
  226. return ret;
  227. err:
  228. kfree(kbuf);
  229. return -ENOMEM;
  230. }
  231. void __dump_page_owner(struct page *page)
  232. {
  233. struct page_ext *page_ext = lookup_page_ext(page);
  234. unsigned long entries[PAGE_OWNER_STACK_DEPTH];
  235. struct stack_trace trace = {
  236. .nr_entries = 0,
  237. .entries = entries,
  238. .max_entries = PAGE_OWNER_STACK_DEPTH,
  239. .skip = 0
  240. };
  241. depot_stack_handle_t handle;
  242. gfp_t gfp_mask;
  243. int mt;
  244. if (unlikely(!page_ext)) {
  245. pr_alert("There is not page extension available.\n");
  246. return;
  247. }
  248. gfp_mask = page_ext->gfp_mask;
  249. mt = gfpflags_to_migratetype(gfp_mask);
  250. if (!test_bit(PAGE_EXT_OWNER, &page_ext->flags)) {
  251. pr_alert("page_owner info is not active (free page?)\n");
  252. return;
  253. }
  254. handle = READ_ONCE(page_ext->handle);
  255. if (!handle) {
  256. pr_alert("page_owner info is not active (free page?)\n");
  257. return;
  258. }
  259. depot_fetch_stack(handle, &trace);
  260. pr_alert("page allocated via order %u, migratetype %s, gfp_mask %#x(%pGg)\n",
  261. page_ext->order, migratetype_names[mt], gfp_mask, &gfp_mask);
  262. print_stack_trace(&trace, 0);
  263. if (page_ext->last_migrate_reason != -1)
  264. pr_alert("page has been migrated, last migrate reason: %s\n",
  265. migrate_reason_names[page_ext->last_migrate_reason]);
  266. }
  267. static ssize_t
  268. read_page_owner(struct file *file, char __user *buf, size_t count, loff_t *ppos)
  269. {
  270. unsigned long pfn;
  271. struct page *page;
  272. struct page_ext *page_ext;
  273. depot_stack_handle_t handle;
  274. if (!static_branch_unlikely(&page_owner_inited))
  275. return -EINVAL;
  276. page = NULL;
  277. pfn = min_low_pfn + *ppos;
  278. /* Find a valid PFN or the start of a MAX_ORDER_NR_PAGES area */
  279. while (!pfn_valid(pfn) && (pfn & (MAX_ORDER_NR_PAGES - 1)) != 0)
  280. pfn++;
  281. drain_all_pages(NULL);
  282. /* Find an allocated page */
  283. for (; pfn < max_pfn; pfn++) {
  284. /*
  285. * If the new page is in a new MAX_ORDER_NR_PAGES area,
  286. * validate the area as existing, skip it if not
  287. */
  288. if ((pfn & (MAX_ORDER_NR_PAGES - 1)) == 0 && !pfn_valid(pfn)) {
  289. pfn += MAX_ORDER_NR_PAGES - 1;
  290. continue;
  291. }
  292. /* Check for holes within a MAX_ORDER area */
  293. if (!pfn_valid_within(pfn))
  294. continue;
  295. page = pfn_to_page(pfn);
  296. if (PageBuddy(page)) {
  297. unsigned long freepage_order = page_order_unsafe(page);
  298. if (freepage_order < MAX_ORDER)
  299. pfn += (1UL << freepage_order) - 1;
  300. continue;
  301. }
  302. page_ext = lookup_page_ext(page);
  303. if (unlikely(!page_ext))
  304. continue;
  305. /*
  306. * Some pages could be missed by concurrent allocation or free,
  307. * because we don't hold the zone lock.
  308. */
  309. if (!test_bit(PAGE_EXT_OWNER, &page_ext->flags))
  310. continue;
  311. /*
  312. * Access to page_ext->handle isn't synchronous so we should
  313. * be careful to access it.
  314. */
  315. handle = READ_ONCE(page_ext->handle);
  316. if (!handle)
  317. continue;
  318. /* Record the next PFN to read in the file offset */
  319. *ppos = (pfn - min_low_pfn) + 1;
  320. return print_page_owner(buf, count, pfn, page,
  321. page_ext, handle);
  322. }
  323. return 0;
  324. }
  325. static void init_pages_in_zone(pg_data_t *pgdat, struct zone *zone)
  326. {
  327. struct page *page;
  328. struct page_ext *page_ext;
  329. unsigned long pfn = zone->zone_start_pfn, block_end_pfn;
  330. unsigned long end_pfn = pfn + zone->spanned_pages;
  331. unsigned long count = 0;
  332. /* Scan block by block. First and last block may be incomplete */
  333. pfn = zone->zone_start_pfn;
  334. /*
  335. * Walk the zone in pageblock_nr_pages steps. If a page block spans
  336. * a zone boundary, it will be double counted between zones. This does
  337. * not matter as the mixed block count will still be correct
  338. */
  339. for (; pfn < end_pfn; ) {
  340. if (!pfn_valid(pfn)) {
  341. pfn = ALIGN(pfn + 1, MAX_ORDER_NR_PAGES);
  342. continue;
  343. }
  344. block_end_pfn = ALIGN(pfn + 1, pageblock_nr_pages);
  345. block_end_pfn = min(block_end_pfn, end_pfn);
  346. page = pfn_to_page(pfn);
  347. for (; pfn < block_end_pfn; pfn++) {
  348. if (!pfn_valid_within(pfn))
  349. continue;
  350. page = pfn_to_page(pfn);
  351. if (page_zone(page) != zone)
  352. continue;
  353. /*
  354. * We are safe to check buddy flag and order, because
  355. * this is init stage and only single thread runs.
  356. */
  357. if (PageBuddy(page)) {
  358. pfn += (1UL << page_order(page)) - 1;
  359. continue;
  360. }
  361. if (PageReserved(page))
  362. continue;
  363. page_ext = lookup_page_ext(page);
  364. if (unlikely(!page_ext))
  365. continue;
  366. /* Maybe overraping zone */
  367. if (test_bit(PAGE_EXT_OWNER, &page_ext->flags))
  368. continue;
  369. /* Found early allocated page */
  370. set_page_owner(page, 0, 0);
  371. count++;
  372. }
  373. }
  374. pr_info("Node %d, zone %8s: page owner found early allocated %lu pages\n",
  375. pgdat->node_id, zone->name, count);
  376. }
  377. static void init_zones_in_node(pg_data_t *pgdat)
  378. {
  379. struct zone *zone;
  380. struct zone *node_zones = pgdat->node_zones;
  381. unsigned long flags;
  382. for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; ++zone) {
  383. if (!populated_zone(zone))
  384. continue;
  385. spin_lock_irqsave(&zone->lock, flags);
  386. init_pages_in_zone(pgdat, zone);
  387. spin_unlock_irqrestore(&zone->lock, flags);
  388. }
  389. }
  390. static void init_early_allocated_pages(void)
  391. {
  392. pg_data_t *pgdat;
  393. drain_all_pages(NULL);
  394. for_each_online_pgdat(pgdat)
  395. init_zones_in_node(pgdat);
  396. }
  397. static const struct file_operations proc_page_owner_operations = {
  398. .read = read_page_owner,
  399. };
  400. static int __init pageowner_init(void)
  401. {
  402. struct dentry *dentry;
  403. if (!static_branch_unlikely(&page_owner_inited)) {
  404. pr_info("page_owner is disabled\n");
  405. return 0;
  406. }
  407. dentry = debugfs_create_file("page_owner", S_IRUSR, NULL,
  408. NULL, &proc_page_owner_operations);
  409. if (IS_ERR(dentry))
  410. return PTR_ERR(dentry);
  411. return 0;
  412. }
  413. late_initcall(pageowner_init)