debug.c 6.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244
  1. /*
  2. * mm/debug.c
  3. *
  4. * mm/ specific debug routines.
  5. *
  6. */
  7. #include <linux/kernel.h>
  8. #include <linux/mm.h>
  9. #include <linux/trace_events.h>
  10. #include <linux/memcontrol.h>
  11. static const struct trace_print_flags pageflag_names[] = {
  12. {1UL << PG_locked, "locked" },
  13. {1UL << PG_error, "error" },
  14. {1UL << PG_referenced, "referenced" },
  15. {1UL << PG_uptodate, "uptodate" },
  16. {1UL << PG_dirty, "dirty" },
  17. {1UL << PG_lru, "lru" },
  18. {1UL << PG_active, "active" },
  19. {1UL << PG_slab, "slab" },
  20. {1UL << PG_owner_priv_1, "owner_priv_1" },
  21. {1UL << PG_arch_1, "arch_1" },
  22. {1UL << PG_reserved, "reserved" },
  23. {1UL << PG_private, "private" },
  24. {1UL << PG_private_2, "private_2" },
  25. {1UL << PG_writeback, "writeback" },
  26. #ifdef CONFIG_PAGEFLAGS_EXTENDED
  27. {1UL << PG_head, "head" },
  28. {1UL << PG_tail, "tail" },
  29. #else
  30. {1UL << PG_compound, "compound" },
  31. #endif
  32. {1UL << PG_swapcache, "swapcache" },
  33. {1UL << PG_mappedtodisk, "mappedtodisk" },
  34. {1UL << PG_reclaim, "reclaim" },
  35. {1UL << PG_swapbacked, "swapbacked" },
  36. {1UL << PG_unevictable, "unevictable" },
  37. #ifdef CONFIG_MMU
  38. {1UL << PG_mlocked, "mlocked" },
  39. #endif
  40. #ifdef CONFIG_ARCH_USES_PG_UNCACHED
  41. {1UL << PG_uncached, "uncached" },
  42. #endif
  43. #ifdef CONFIG_MEMORY_FAILURE
  44. {1UL << PG_hwpoison, "hwpoison" },
  45. #endif
  46. #ifdef CONFIG_TRANSPARENT_HUGEPAGE
  47. {1UL << PG_compound_lock, "compound_lock" },
  48. #endif
  49. #if defined(CONFIG_IDLE_PAGE_TRACKING) && defined(CONFIG_64BIT)
  50. {1UL << PG_young, "young" },
  51. {1UL << PG_idle, "idle" },
  52. #endif
  53. };
  54. static void dump_flags(unsigned long flags,
  55. const struct trace_print_flags *names, int count)
  56. {
  57. const char *delim = "";
  58. unsigned long mask;
  59. int i;
  60. pr_emerg("flags: %#lx(", flags);
  61. /* remove zone id */
  62. flags &= (1UL << NR_PAGEFLAGS) - 1;
  63. for (i = 0; i < count && flags; i++) {
  64. mask = names[i].mask;
  65. if ((flags & mask) != mask)
  66. continue;
  67. flags &= ~mask;
  68. pr_cont("%s%s", delim, names[i].name);
  69. delim = "|";
  70. }
  71. /* check for left over flags */
  72. if (flags)
  73. pr_cont("%s%#lx", delim, flags);
  74. pr_cont(")\n");
  75. }
  76. void dump_page_badflags(struct page *page, const char *reason,
  77. unsigned long badflags)
  78. {
  79. pr_emerg("page:%p count:%d mapcount:%d mapping:%p index:%#lx\n",
  80. page, atomic_read(&page->_count), page_mapcount(page),
  81. page->mapping, page->index);
  82. BUILD_BUG_ON(ARRAY_SIZE(pageflag_names) != __NR_PAGEFLAGS);
  83. dump_flags(page->flags, pageflag_names, ARRAY_SIZE(pageflag_names));
  84. if (reason)
  85. pr_alert("page dumped because: %s\n", reason);
  86. if (page->flags & badflags) {
  87. pr_alert("bad because of flags:\n");
  88. dump_flags(page->flags & badflags,
  89. pageflag_names, ARRAY_SIZE(pageflag_names));
  90. }
  91. #ifdef CONFIG_MEMCG
  92. if (page->mem_cgroup)
  93. pr_alert("page->mem_cgroup:%p\n", page->mem_cgroup);
  94. #endif
  95. }
  96. void dump_page(struct page *page, const char *reason)
  97. {
  98. dump_page_badflags(page, reason, 0);
  99. }
  100. EXPORT_SYMBOL(dump_page);
  101. #ifdef CONFIG_DEBUG_VM
  102. static const struct trace_print_flags vmaflags_names[] = {
  103. {VM_READ, "read" },
  104. {VM_WRITE, "write" },
  105. {VM_EXEC, "exec" },
  106. {VM_SHARED, "shared" },
  107. {VM_MAYREAD, "mayread" },
  108. {VM_MAYWRITE, "maywrite" },
  109. {VM_MAYEXEC, "mayexec" },
  110. {VM_MAYSHARE, "mayshare" },
  111. {VM_GROWSDOWN, "growsdown" },
  112. {VM_PFNMAP, "pfnmap" },
  113. {VM_DENYWRITE, "denywrite" },
  114. {VM_LOCKED, "locked" },
  115. {VM_IO, "io" },
  116. {VM_SEQ_READ, "seqread" },
  117. {VM_RAND_READ, "randread" },
  118. {VM_DONTCOPY, "dontcopy" },
  119. {VM_DONTEXPAND, "dontexpand" },
  120. {VM_ACCOUNT, "account" },
  121. {VM_NORESERVE, "noreserve" },
  122. {VM_HUGETLB, "hugetlb" },
  123. #if defined(CONFIG_X86)
  124. {VM_PAT, "pat" },
  125. #elif defined(CONFIG_PPC)
  126. {VM_SAO, "sao" },
  127. #elif defined(CONFIG_PARISC) || defined(CONFIG_METAG) || defined(CONFIG_IA64)
  128. {VM_GROWSUP, "growsup" },
  129. #elif !defined(CONFIG_MMU)
  130. {VM_MAPPED_COPY, "mappedcopy" },
  131. #else
  132. {VM_ARCH_1, "arch_1" },
  133. #endif
  134. {VM_DONTDUMP, "dontdump" },
  135. #ifdef CONFIG_MEM_SOFT_DIRTY
  136. {VM_SOFTDIRTY, "softdirty" },
  137. #endif
  138. {VM_MIXEDMAP, "mixedmap" },
  139. {VM_HUGEPAGE, "hugepage" },
  140. {VM_NOHUGEPAGE, "nohugepage" },
  141. {VM_MERGEABLE, "mergeable" },
  142. };
  143. void dump_vma(const struct vm_area_struct *vma)
  144. {
  145. pr_emerg("vma %p start %p end %p\n"
  146. "next %p prev %p mm %p\n"
  147. "prot %lx anon_vma %p vm_ops %p\n"
  148. "pgoff %lx file %p private_data %p\n",
  149. vma, (void *)vma->vm_start, (void *)vma->vm_end, vma->vm_next,
  150. vma->vm_prev, vma->vm_mm,
  151. (unsigned long)pgprot_val(vma->vm_page_prot),
  152. vma->anon_vma, vma->vm_ops, vma->vm_pgoff,
  153. vma->vm_file, vma->vm_private_data);
  154. dump_flags(vma->vm_flags, vmaflags_names, ARRAY_SIZE(vmaflags_names));
  155. }
  156. EXPORT_SYMBOL(dump_vma);
  157. void dump_mm(const struct mm_struct *mm)
  158. {
  159. pr_emerg("mm %p mmap %p seqnum %d task_size %lu\n"
  160. #ifdef CONFIG_MMU
  161. "get_unmapped_area %p\n"
  162. #endif
  163. "mmap_base %lu mmap_legacy_base %lu highest_vm_end %lu\n"
  164. "pgd %p mm_users %d mm_count %d nr_ptes %lu nr_pmds %lu map_count %d\n"
  165. "hiwater_rss %lx hiwater_vm %lx total_vm %lx locked_vm %lx\n"
  166. "pinned_vm %lx shared_vm %lx exec_vm %lx stack_vm %lx\n"
  167. "start_code %lx end_code %lx start_data %lx end_data %lx\n"
  168. "start_brk %lx brk %lx start_stack %lx\n"
  169. "arg_start %lx arg_end %lx env_start %lx env_end %lx\n"
  170. "binfmt %p flags %lx core_state %p\n"
  171. #ifdef CONFIG_AIO
  172. "ioctx_table %p\n"
  173. #endif
  174. #ifdef CONFIG_MEMCG
  175. "owner %p "
  176. #endif
  177. "exe_file %p\n"
  178. #ifdef CONFIG_MMU_NOTIFIER
  179. "mmu_notifier_mm %p\n"
  180. #endif
  181. #ifdef CONFIG_NUMA_BALANCING
  182. "numa_next_scan %lu numa_scan_offset %lu numa_scan_seq %d\n"
  183. #endif
  184. #if defined(CONFIG_NUMA_BALANCING) || defined(CONFIG_COMPACTION)
  185. "tlb_flush_pending %d\n"
  186. #endif
  187. "%s", /* This is here to hold the comma */
  188. mm, mm->mmap, mm->vmacache_seqnum, mm->task_size,
  189. #ifdef CONFIG_MMU
  190. mm->get_unmapped_area,
  191. #endif
  192. mm->mmap_base, mm->mmap_legacy_base, mm->highest_vm_end,
  193. mm->pgd, atomic_read(&mm->mm_users),
  194. atomic_read(&mm->mm_count),
  195. atomic_long_read((atomic_long_t *)&mm->nr_ptes),
  196. mm_nr_pmds((struct mm_struct *)mm),
  197. mm->map_count,
  198. mm->hiwater_rss, mm->hiwater_vm, mm->total_vm, mm->locked_vm,
  199. mm->pinned_vm, mm->shared_vm, mm->exec_vm, mm->stack_vm,
  200. mm->start_code, mm->end_code, mm->start_data, mm->end_data,
  201. mm->start_brk, mm->brk, mm->start_stack,
  202. mm->arg_start, mm->arg_end, mm->env_start, mm->env_end,
  203. mm->binfmt, mm->flags, mm->core_state,
  204. #ifdef CONFIG_AIO
  205. mm->ioctx_table,
  206. #endif
  207. #ifdef CONFIG_MEMCG
  208. mm->owner,
  209. #endif
  210. mm->exe_file,
  211. #ifdef CONFIG_MMU_NOTIFIER
  212. mm->mmu_notifier_mm,
  213. #endif
  214. #ifdef CONFIG_NUMA_BALANCING
  215. mm->numa_next_scan, mm->numa_scan_offset, mm->numa_scan_seq,
  216. #endif
  217. #if defined(CONFIG_NUMA_BALANCING) || defined(CONFIG_COMPACTION)
  218. mm->tlb_flush_pending,
  219. #endif
  220. "" /* This is here to not have a comma! */
  221. );
  222. dump_flags(mm->def_flags, vmaflags_names,
  223. ARRAY_SIZE(vmaflags_names));
  224. }
  225. #endif /* CONFIG_DEBUG_VM */