debug.c 6.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245
  1. /*
  2. * mm/debug.c
  3. *
  4. * mm/ specific debug routines.
  5. *
  6. */
  7. #include <linux/kernel.h>
  8. #include <linux/mm.h>
  9. #include <linux/trace_events.h>
  10. #include <linux/memcontrol.h>
  11. static const struct trace_print_flags pageflag_names[] = {
  12. {1UL << PG_locked, "locked" },
  13. {1UL << PG_error, "error" },
  14. {1UL << PG_referenced, "referenced" },
  15. {1UL << PG_uptodate, "uptodate" },
  16. {1UL << PG_dirty, "dirty" },
  17. {1UL << PG_lru, "lru" },
  18. {1UL << PG_active, "active" },
  19. {1UL << PG_slab, "slab" },
  20. {1UL << PG_owner_priv_1, "owner_priv_1" },
  21. {1UL << PG_arch_1, "arch_1" },
  22. {1UL << PG_reserved, "reserved" },
  23. {1UL << PG_private, "private" },
  24. {1UL << PG_private_2, "private_2" },
  25. {1UL << PG_writeback, "writeback" },
  26. #ifdef CONFIG_PAGEFLAGS_EXTENDED
  27. {1UL << PG_head, "head" },
  28. {1UL << PG_tail, "tail" },
  29. #else
  30. {1UL << PG_compound, "compound" },
  31. #endif
  32. {1UL << PG_swapcache, "swapcache" },
  33. {1UL << PG_mappedtodisk, "mappedtodisk" },
  34. {1UL << PG_reclaim, "reclaim" },
  35. {1UL << PG_swapbacked, "swapbacked" },
  36. {1UL << PG_unevictable, "unevictable" },
  37. #ifdef CONFIG_MMU
  38. {1UL << PG_mlocked, "mlocked" },
  39. #endif
  40. #ifdef CONFIG_ARCH_USES_PG_UNCACHED
  41. {1UL << PG_uncached, "uncached" },
  42. #endif
  43. #ifdef CONFIG_MEMORY_FAILURE
  44. {1UL << PG_hwpoison, "hwpoison" },
  45. #endif
  46. #ifdef CONFIG_TRANSPARENT_HUGEPAGE
  47. {1UL << PG_compound_lock, "compound_lock" },
  48. #endif
  49. #if defined(CONFIG_IDLE_PAGE_TRACKING) && defined(CONFIG_64BIT)
  50. {1UL << PG_young, "young" },
  51. {1UL << PG_idle, "idle" },
  52. #endif
  53. };
  54. static void dump_flags(unsigned long flags,
  55. const struct trace_print_flags *names, int count)
  56. {
  57. const char *delim = "";
  58. unsigned long mask;
  59. int i;
  60. pr_emerg("flags: %#lx(", flags);
  61. /* remove zone id */
  62. flags &= (1UL << NR_PAGEFLAGS) - 1;
  63. for (i = 0; i < count && flags; i++) {
  64. mask = names[i].mask;
  65. if ((flags & mask) != mask)
  66. continue;
  67. flags &= ~mask;
  68. pr_cont("%s%s", delim, names[i].name);
  69. delim = "|";
  70. }
  71. /* check for left over flags */
  72. if (flags)
  73. pr_cont("%s%#lx", delim, flags);
  74. pr_cont(")\n");
  75. }
  76. void dump_page_badflags(struct page *page, const char *reason,
  77. unsigned long badflags)
  78. {
  79. pr_emerg("page:%p count:%d mapcount:%d mapping:%p index:%#lx\n",
  80. page, atomic_read(&page->_count), page_mapcount(page),
  81. page->mapping, page->index);
  82. BUILD_BUG_ON(ARRAY_SIZE(pageflag_names) != __NR_PAGEFLAGS);
  83. dump_flags(page->flags, pageflag_names, ARRAY_SIZE(pageflag_names));
  84. if (reason)
  85. pr_alert("page dumped because: %s\n", reason);
  86. if (page->flags & badflags) {
  87. pr_alert("bad because of flags:\n");
  88. dump_flags(page->flags & badflags,
  89. pageflag_names, ARRAY_SIZE(pageflag_names));
  90. }
  91. #ifdef CONFIG_MEMCG
  92. if (page->mem_cgroup)
  93. pr_alert("page->mem_cgroup:%p\n", page->mem_cgroup);
  94. #endif
  95. }
  96. void dump_page(struct page *page, const char *reason)
  97. {
  98. dump_page_badflags(page, reason, 0);
  99. }
  100. EXPORT_SYMBOL(dump_page);
  101. #ifdef CONFIG_DEBUG_VM
  102. static const struct trace_print_flags vmaflags_names[] = {
  103. {VM_READ, "read" },
  104. {VM_WRITE, "write" },
  105. {VM_EXEC, "exec" },
  106. {VM_SHARED, "shared" },
  107. {VM_MAYREAD, "mayread" },
  108. {VM_MAYWRITE, "maywrite" },
  109. {VM_MAYEXEC, "mayexec" },
  110. {VM_MAYSHARE, "mayshare" },
  111. {VM_GROWSDOWN, "growsdown" },
  112. {VM_PFNMAP, "pfnmap" },
  113. {VM_DENYWRITE, "denywrite" },
  114. {VM_LOCKONFAULT, "lockonfault" },
  115. {VM_LOCKED, "locked" },
  116. {VM_IO, "io" },
  117. {VM_SEQ_READ, "seqread" },
  118. {VM_RAND_READ, "randread" },
  119. {VM_DONTCOPY, "dontcopy" },
  120. {VM_DONTEXPAND, "dontexpand" },
  121. {VM_ACCOUNT, "account" },
  122. {VM_NORESERVE, "noreserve" },
  123. {VM_HUGETLB, "hugetlb" },
  124. #if defined(CONFIG_X86)
  125. {VM_PAT, "pat" },
  126. #elif defined(CONFIG_PPC)
  127. {VM_SAO, "sao" },
  128. #elif defined(CONFIG_PARISC) || defined(CONFIG_METAG) || defined(CONFIG_IA64)
  129. {VM_GROWSUP, "growsup" },
  130. #elif !defined(CONFIG_MMU)
  131. {VM_MAPPED_COPY, "mappedcopy" },
  132. #else
  133. {VM_ARCH_1, "arch_1" },
  134. #endif
  135. {VM_DONTDUMP, "dontdump" },
  136. #ifdef CONFIG_MEM_SOFT_DIRTY
  137. {VM_SOFTDIRTY, "softdirty" },
  138. #endif
  139. {VM_MIXEDMAP, "mixedmap" },
  140. {VM_HUGEPAGE, "hugepage" },
  141. {VM_NOHUGEPAGE, "nohugepage" },
  142. {VM_MERGEABLE, "mergeable" },
  143. };
  144. void dump_vma(const struct vm_area_struct *vma)
  145. {
  146. pr_emerg("vma %p start %p end %p\n"
  147. "next %p prev %p mm %p\n"
  148. "prot %lx anon_vma %p vm_ops %p\n"
  149. "pgoff %lx file %p private_data %p\n",
  150. vma, (void *)vma->vm_start, (void *)vma->vm_end, vma->vm_next,
  151. vma->vm_prev, vma->vm_mm,
  152. (unsigned long)pgprot_val(vma->vm_page_prot),
  153. vma->anon_vma, vma->vm_ops, vma->vm_pgoff,
  154. vma->vm_file, vma->vm_private_data);
  155. dump_flags(vma->vm_flags, vmaflags_names, ARRAY_SIZE(vmaflags_names));
  156. }
  157. EXPORT_SYMBOL(dump_vma);
  158. void dump_mm(const struct mm_struct *mm)
  159. {
  160. pr_emerg("mm %p mmap %p seqnum %d task_size %lu\n"
  161. #ifdef CONFIG_MMU
  162. "get_unmapped_area %p\n"
  163. #endif
  164. "mmap_base %lu mmap_legacy_base %lu highest_vm_end %lu\n"
  165. "pgd %p mm_users %d mm_count %d nr_ptes %lu nr_pmds %lu map_count %d\n"
  166. "hiwater_rss %lx hiwater_vm %lx total_vm %lx locked_vm %lx\n"
  167. "pinned_vm %lx shared_vm %lx exec_vm %lx stack_vm %lx\n"
  168. "start_code %lx end_code %lx start_data %lx end_data %lx\n"
  169. "start_brk %lx brk %lx start_stack %lx\n"
  170. "arg_start %lx arg_end %lx env_start %lx env_end %lx\n"
  171. "binfmt %p flags %lx core_state %p\n"
  172. #ifdef CONFIG_AIO
  173. "ioctx_table %p\n"
  174. #endif
  175. #ifdef CONFIG_MEMCG
  176. "owner %p "
  177. #endif
  178. "exe_file %p\n"
  179. #ifdef CONFIG_MMU_NOTIFIER
  180. "mmu_notifier_mm %p\n"
  181. #endif
  182. #ifdef CONFIG_NUMA_BALANCING
  183. "numa_next_scan %lu numa_scan_offset %lu numa_scan_seq %d\n"
  184. #endif
  185. #if defined(CONFIG_NUMA_BALANCING) || defined(CONFIG_COMPACTION)
  186. "tlb_flush_pending %d\n"
  187. #endif
  188. "%s", /* This is here to hold the comma */
  189. mm, mm->mmap, mm->vmacache_seqnum, mm->task_size,
  190. #ifdef CONFIG_MMU
  191. mm->get_unmapped_area,
  192. #endif
  193. mm->mmap_base, mm->mmap_legacy_base, mm->highest_vm_end,
  194. mm->pgd, atomic_read(&mm->mm_users),
  195. atomic_read(&mm->mm_count),
  196. atomic_long_read((atomic_long_t *)&mm->nr_ptes),
  197. mm_nr_pmds((struct mm_struct *)mm),
  198. mm->map_count,
  199. mm->hiwater_rss, mm->hiwater_vm, mm->total_vm, mm->locked_vm,
  200. mm->pinned_vm, mm->shared_vm, mm->exec_vm, mm->stack_vm,
  201. mm->start_code, mm->end_code, mm->start_data, mm->end_data,
  202. mm->start_brk, mm->brk, mm->start_stack,
  203. mm->arg_start, mm->arg_end, mm->env_start, mm->env_end,
  204. mm->binfmt, mm->flags, mm->core_state,
  205. #ifdef CONFIG_AIO
  206. mm->ioctx_table,
  207. #endif
  208. #ifdef CONFIG_MEMCG
  209. mm->owner,
  210. #endif
  211. mm->exe_file,
  212. #ifdef CONFIG_MMU_NOTIFIER
  213. mm->mmu_notifier_mm,
  214. #endif
  215. #ifdef CONFIG_NUMA_BALANCING
  216. mm->numa_next_scan, mm->numa_scan_offset, mm->numa_scan_seq,
  217. #endif
  218. #if defined(CONFIG_NUMA_BALANCING) || defined(CONFIG_COMPACTION)
  219. mm->tlb_flush_pending,
  220. #endif
  221. "" /* This is here to not have a comma! */
  222. );
  223. dump_flags(mm->def_flags, vmaflags_names,
  224. ARRAY_SIZE(vmaflags_names));
  225. }
  226. #endif /* CONFIG_DEBUG_VM */