task_mmu.c 42 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763
  1. #include <linux/mm.h>
  2. #include <linux/vmacache.h>
  3. #include <linux/hugetlb.h>
  4. #include <linux/huge_mm.h>
  5. #include <linux/mount.h>
  6. #include <linux/seq_file.h>
  7. #include <linux/highmem.h>
  8. #include <linux/ptrace.h>
  9. #include <linux/slab.h>
  10. #include <linux/pagemap.h>
  11. #include <linux/mempolicy.h>
  12. #include <linux/rmap.h>
  13. #include <linux/swap.h>
  14. #include <linux/swapops.h>
  15. #include <linux/mmu_notifier.h>
  16. #include <linux/page_idle.h>
  17. #include <linux/shmem_fs.h>
  18. #include <asm/elf.h>
  19. #include <linux/uaccess.h>
  20. #include <asm/tlbflush.h>
  21. #include "internal.h"
  22. void task_mem(struct seq_file *m, struct mm_struct *mm)
  23. {
  24. unsigned long text, lib, swap, ptes, pmds, anon, file, shmem;
  25. unsigned long hiwater_vm, total_vm, hiwater_rss, total_rss;
  26. anon = get_mm_counter(mm, MM_ANONPAGES);
  27. file = get_mm_counter(mm, MM_FILEPAGES);
  28. shmem = get_mm_counter(mm, MM_SHMEMPAGES);
  29. /*
  30. * Note: to minimize their overhead, mm maintains hiwater_vm and
  31. * hiwater_rss only when about to *lower* total_vm or rss. Any
  32. * collector of these hiwater stats must therefore get total_vm
  33. * and rss too, which will usually be the higher. Barriers? not
  34. * worth the effort, such snapshots can always be inconsistent.
  35. */
  36. hiwater_vm = total_vm = mm->total_vm;
  37. if (hiwater_vm < mm->hiwater_vm)
  38. hiwater_vm = mm->hiwater_vm;
  39. hiwater_rss = total_rss = anon + file + shmem;
  40. if (hiwater_rss < mm->hiwater_rss)
  41. hiwater_rss = mm->hiwater_rss;
  42. text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK)) >> 10;
  43. lib = (mm->exec_vm << (PAGE_SHIFT-10)) - text;
  44. swap = get_mm_counter(mm, MM_SWAPENTS);
  45. ptes = PTRS_PER_PTE * sizeof(pte_t) * atomic_long_read(&mm->nr_ptes);
  46. pmds = PTRS_PER_PMD * sizeof(pmd_t) * mm_nr_pmds(mm);
  47. seq_printf(m,
  48. "VmPeak:\t%8lu kB\n"
  49. "VmSize:\t%8lu kB\n"
  50. "VmLck:\t%8lu kB\n"
  51. "VmPin:\t%8lu kB\n"
  52. "VmHWM:\t%8lu kB\n"
  53. "VmRSS:\t%8lu kB\n"
  54. "RssAnon:\t%8lu kB\n"
  55. "RssFile:\t%8lu kB\n"
  56. "RssShmem:\t%8lu kB\n"
  57. "VmData:\t%8lu kB\n"
  58. "VmStk:\t%8lu kB\n"
  59. "VmExe:\t%8lu kB\n"
  60. "VmLib:\t%8lu kB\n"
  61. "VmPTE:\t%8lu kB\n"
  62. "VmPMD:\t%8lu kB\n"
  63. "VmSwap:\t%8lu kB\n",
  64. hiwater_vm << (PAGE_SHIFT-10),
  65. total_vm << (PAGE_SHIFT-10),
  66. mm->locked_vm << (PAGE_SHIFT-10),
  67. mm->pinned_vm << (PAGE_SHIFT-10),
  68. hiwater_rss << (PAGE_SHIFT-10),
  69. total_rss << (PAGE_SHIFT-10),
  70. anon << (PAGE_SHIFT-10),
  71. file << (PAGE_SHIFT-10),
  72. shmem << (PAGE_SHIFT-10),
  73. mm->data_vm << (PAGE_SHIFT-10),
  74. mm->stack_vm << (PAGE_SHIFT-10), text, lib,
  75. ptes >> 10,
  76. pmds >> 10,
  77. swap << (PAGE_SHIFT-10));
  78. hugetlb_report_usage(m, mm);
  79. }
  80. unsigned long task_vsize(struct mm_struct *mm)
  81. {
  82. return PAGE_SIZE * mm->total_vm;
  83. }
  84. unsigned long task_statm(struct mm_struct *mm,
  85. unsigned long *shared, unsigned long *text,
  86. unsigned long *data, unsigned long *resident)
  87. {
  88. *shared = get_mm_counter(mm, MM_FILEPAGES) +
  89. get_mm_counter(mm, MM_SHMEMPAGES);
  90. *text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK))
  91. >> PAGE_SHIFT;
  92. *data = mm->data_vm + mm->stack_vm;
  93. *resident = *shared + get_mm_counter(mm, MM_ANONPAGES);
  94. return mm->total_vm;
  95. }
  96. #ifdef CONFIG_NUMA
  97. /*
  98. * Save get_task_policy() for show_numa_map().
  99. */
  100. static void hold_task_mempolicy(struct proc_maps_private *priv)
  101. {
  102. struct task_struct *task = priv->task;
  103. task_lock(task);
  104. priv->task_mempolicy = get_task_policy(task);
  105. mpol_get(priv->task_mempolicy);
  106. task_unlock(task);
  107. }
  108. static void release_task_mempolicy(struct proc_maps_private *priv)
  109. {
  110. mpol_put(priv->task_mempolicy);
  111. }
  112. #else
  113. static void hold_task_mempolicy(struct proc_maps_private *priv)
  114. {
  115. }
  116. static void release_task_mempolicy(struct proc_maps_private *priv)
  117. {
  118. }
  119. #endif
  120. static void vma_stop(struct proc_maps_private *priv)
  121. {
  122. struct mm_struct *mm = priv->mm;
  123. release_task_mempolicy(priv);
  124. up_read(&mm->mmap_sem);
  125. mmput(mm);
  126. }
  127. static struct vm_area_struct *
  128. m_next_vma(struct proc_maps_private *priv, struct vm_area_struct *vma)
  129. {
  130. if (vma == priv->tail_vma)
  131. return NULL;
  132. return vma->vm_next ?: priv->tail_vma;
  133. }
  134. static void m_cache_vma(struct seq_file *m, struct vm_area_struct *vma)
  135. {
  136. if (m->count < m->size) /* vma is copied successfully */
  137. m->version = m_next_vma(m->private, vma) ? vma->vm_end : -1UL;
  138. }
  139. static void *m_start(struct seq_file *m, loff_t *ppos)
  140. {
  141. struct proc_maps_private *priv = m->private;
  142. unsigned long last_addr = m->version;
  143. struct mm_struct *mm;
  144. struct vm_area_struct *vma;
  145. unsigned int pos = *ppos;
  146. /* See m_cache_vma(). Zero at the start or after lseek. */
  147. if (last_addr == -1UL)
  148. return NULL;
  149. priv->task = get_proc_task(priv->inode);
  150. if (!priv->task)
  151. return ERR_PTR(-ESRCH);
  152. mm = priv->mm;
  153. if (!mm || !mmget_not_zero(mm))
  154. return NULL;
  155. down_read(&mm->mmap_sem);
  156. hold_task_mempolicy(priv);
  157. priv->tail_vma = get_gate_vma(mm);
  158. if (last_addr) {
  159. vma = find_vma(mm, last_addr - 1);
  160. if (vma && vma->vm_start <= last_addr)
  161. vma = m_next_vma(priv, vma);
  162. if (vma)
  163. return vma;
  164. }
  165. m->version = 0;
  166. if (pos < mm->map_count) {
  167. for (vma = mm->mmap; pos; pos--) {
  168. m->version = vma->vm_start;
  169. vma = vma->vm_next;
  170. }
  171. return vma;
  172. }
  173. /* we do not bother to update m->version in this case */
  174. if (pos == mm->map_count && priv->tail_vma)
  175. return priv->tail_vma;
  176. vma_stop(priv);
  177. return NULL;
  178. }
  179. static void *m_next(struct seq_file *m, void *v, loff_t *pos)
  180. {
  181. struct proc_maps_private *priv = m->private;
  182. struct vm_area_struct *next;
  183. (*pos)++;
  184. next = m_next_vma(priv, v);
  185. if (!next)
  186. vma_stop(priv);
  187. return next;
  188. }
  189. static void m_stop(struct seq_file *m, void *v)
  190. {
  191. struct proc_maps_private *priv = m->private;
  192. if (!IS_ERR_OR_NULL(v))
  193. vma_stop(priv);
  194. if (priv->task) {
  195. put_task_struct(priv->task);
  196. priv->task = NULL;
  197. }
  198. }
  199. static int proc_maps_open(struct inode *inode, struct file *file,
  200. const struct seq_operations *ops, int psize)
  201. {
  202. struct proc_maps_private *priv = __seq_open_private(file, ops, psize);
  203. if (!priv)
  204. return -ENOMEM;
  205. priv->inode = inode;
  206. priv->mm = proc_mem_open(inode, PTRACE_MODE_READ);
  207. if (IS_ERR(priv->mm)) {
  208. int err = PTR_ERR(priv->mm);
  209. seq_release_private(inode, file);
  210. return err;
  211. }
  212. return 0;
  213. }
  214. static int proc_map_release(struct inode *inode, struct file *file)
  215. {
  216. struct seq_file *seq = file->private_data;
  217. struct proc_maps_private *priv = seq->private;
  218. if (priv->mm)
  219. mmdrop(priv->mm);
  220. return seq_release_private(inode, file);
  221. }
  222. static int do_maps_open(struct inode *inode, struct file *file,
  223. const struct seq_operations *ops)
  224. {
  225. return proc_maps_open(inode, file, ops,
  226. sizeof(struct proc_maps_private));
  227. }
  228. /*
  229. * Indicate if the VMA is a stack for the given task; for
  230. * /proc/PID/maps that is the stack of the main task.
  231. */
  232. static int is_stack(struct proc_maps_private *priv,
  233. struct vm_area_struct *vma)
  234. {
  235. /*
  236. * We make no effort to guess what a given thread considers to be
  237. * its "stack". It's not even well-defined for programs written
  238. * languages like Go.
  239. */
  240. return vma->vm_start <= vma->vm_mm->start_stack &&
  241. vma->vm_end >= vma->vm_mm->start_stack;
  242. }
  243. static void
  244. show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
  245. {
  246. struct mm_struct *mm = vma->vm_mm;
  247. struct file *file = vma->vm_file;
  248. struct proc_maps_private *priv = m->private;
  249. vm_flags_t flags = vma->vm_flags;
  250. unsigned long ino = 0;
  251. unsigned long long pgoff = 0;
  252. unsigned long start, end;
  253. dev_t dev = 0;
  254. const char *name = NULL;
  255. if (file) {
  256. struct inode *inode = file_inode(vma->vm_file);
  257. dev = inode->i_sb->s_dev;
  258. ino = inode->i_ino;
  259. pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
  260. }
  261. /* We don't show the stack guard page in /proc/maps */
  262. start = vma->vm_start;
  263. if (stack_guard_page_start(vma, start))
  264. start += PAGE_SIZE;
  265. end = vma->vm_end;
  266. if (stack_guard_page_end(vma, end))
  267. end -= PAGE_SIZE;
  268. seq_setwidth(m, 25 + sizeof(void *) * 6 - 1);
  269. seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu ",
  270. start,
  271. end,
  272. flags & VM_READ ? 'r' : '-',
  273. flags & VM_WRITE ? 'w' : '-',
  274. flags & VM_EXEC ? 'x' : '-',
  275. flags & VM_MAYSHARE ? 's' : 'p',
  276. pgoff,
  277. MAJOR(dev), MINOR(dev), ino);
  278. /*
  279. * Print the dentry name for named mappings, and a
  280. * special [heap] marker for the heap:
  281. */
  282. if (file) {
  283. seq_pad(m, ' ');
  284. seq_file_path(m, file, "\n");
  285. goto done;
  286. }
  287. if (vma->vm_ops && vma->vm_ops->name) {
  288. name = vma->vm_ops->name(vma);
  289. if (name)
  290. goto done;
  291. }
  292. name = arch_vma_name(vma);
  293. if (!name) {
  294. if (!mm) {
  295. name = "[vdso]";
  296. goto done;
  297. }
  298. if (vma->vm_start <= mm->brk &&
  299. vma->vm_end >= mm->start_brk) {
  300. name = "[heap]";
  301. goto done;
  302. }
  303. if (is_stack(priv, vma))
  304. name = "[stack]";
  305. }
  306. done:
  307. if (name) {
  308. seq_pad(m, ' ');
  309. seq_puts(m, name);
  310. }
  311. seq_putc(m, '\n');
  312. }
  313. static int show_map(struct seq_file *m, void *v, int is_pid)
  314. {
  315. show_map_vma(m, v, is_pid);
  316. m_cache_vma(m, v);
  317. return 0;
  318. }
  319. static int show_pid_map(struct seq_file *m, void *v)
  320. {
  321. return show_map(m, v, 1);
  322. }
  323. static int show_tid_map(struct seq_file *m, void *v)
  324. {
  325. return show_map(m, v, 0);
  326. }
  327. static const struct seq_operations proc_pid_maps_op = {
  328. .start = m_start,
  329. .next = m_next,
  330. .stop = m_stop,
  331. .show = show_pid_map
  332. };
  333. static const struct seq_operations proc_tid_maps_op = {
  334. .start = m_start,
  335. .next = m_next,
  336. .stop = m_stop,
  337. .show = show_tid_map
  338. };
  339. static int pid_maps_open(struct inode *inode, struct file *file)
  340. {
  341. return do_maps_open(inode, file, &proc_pid_maps_op);
  342. }
  343. static int tid_maps_open(struct inode *inode, struct file *file)
  344. {
  345. return do_maps_open(inode, file, &proc_tid_maps_op);
  346. }
  347. const struct file_operations proc_pid_maps_operations = {
  348. .open = pid_maps_open,
  349. .read = seq_read,
  350. .llseek = seq_lseek,
  351. .release = proc_map_release,
  352. };
  353. const struct file_operations proc_tid_maps_operations = {
  354. .open = tid_maps_open,
  355. .read = seq_read,
  356. .llseek = seq_lseek,
  357. .release = proc_map_release,
  358. };
  359. /*
  360. * Proportional Set Size(PSS): my share of RSS.
  361. *
  362. * PSS of a process is the count of pages it has in memory, where each
  363. * page is divided by the number of processes sharing it. So if a
  364. * process has 1000 pages all to itself, and 1000 shared with one other
  365. * process, its PSS will be 1500.
  366. *
  367. * To keep (accumulated) division errors low, we adopt a 64bit
  368. * fixed-point pss counter to minimize division errors. So (pss >>
  369. * PSS_SHIFT) would be the real byte count.
  370. *
  371. * A shift of 12 before division means (assuming 4K page size):
  372. * - 1M 3-user-pages add up to 8KB errors;
  373. * - supports mapcount up to 2^24, or 16M;
  374. * - supports PSS up to 2^52 bytes, or 4PB.
  375. */
  376. #define PSS_SHIFT 12
  377. #ifdef CONFIG_PROC_PAGE_MONITOR
  378. struct mem_size_stats {
  379. unsigned long resident;
  380. unsigned long shared_clean;
  381. unsigned long shared_dirty;
  382. unsigned long private_clean;
  383. unsigned long private_dirty;
  384. unsigned long referenced;
  385. unsigned long anonymous;
  386. unsigned long anonymous_thp;
  387. unsigned long shmem_thp;
  388. unsigned long swap;
  389. unsigned long shared_hugetlb;
  390. unsigned long private_hugetlb;
  391. u64 pss;
  392. u64 swap_pss;
  393. bool check_shmem_swap;
  394. };
  395. static void smaps_account(struct mem_size_stats *mss, struct page *page,
  396. bool compound, bool young, bool dirty)
  397. {
  398. int i, nr = compound ? 1 << compound_order(page) : 1;
  399. unsigned long size = nr * PAGE_SIZE;
  400. if (PageAnon(page))
  401. mss->anonymous += size;
  402. mss->resident += size;
  403. /* Accumulate the size in pages that have been accessed. */
  404. if (young || page_is_young(page) || PageReferenced(page))
  405. mss->referenced += size;
  406. /*
  407. * page_count(page) == 1 guarantees the page is mapped exactly once.
  408. * If any subpage of the compound page mapped with PTE it would elevate
  409. * page_count().
  410. */
  411. if (page_count(page) == 1) {
  412. if (dirty || PageDirty(page))
  413. mss->private_dirty += size;
  414. else
  415. mss->private_clean += size;
  416. mss->pss += (u64)size << PSS_SHIFT;
  417. return;
  418. }
  419. for (i = 0; i < nr; i++, page++) {
  420. int mapcount = page_mapcount(page);
  421. if (mapcount >= 2) {
  422. if (dirty || PageDirty(page))
  423. mss->shared_dirty += PAGE_SIZE;
  424. else
  425. mss->shared_clean += PAGE_SIZE;
  426. mss->pss += (PAGE_SIZE << PSS_SHIFT) / mapcount;
  427. } else {
  428. if (dirty || PageDirty(page))
  429. mss->private_dirty += PAGE_SIZE;
  430. else
  431. mss->private_clean += PAGE_SIZE;
  432. mss->pss += PAGE_SIZE << PSS_SHIFT;
  433. }
  434. }
  435. }
  436. #ifdef CONFIG_SHMEM
  437. static int smaps_pte_hole(unsigned long addr, unsigned long end,
  438. struct mm_walk *walk)
  439. {
  440. struct mem_size_stats *mss = walk->private;
  441. mss->swap += shmem_partial_swap_usage(
  442. walk->vma->vm_file->f_mapping, addr, end);
  443. return 0;
  444. }
  445. #endif
  446. static void smaps_pte_entry(pte_t *pte, unsigned long addr,
  447. struct mm_walk *walk)
  448. {
  449. struct mem_size_stats *mss = walk->private;
  450. struct vm_area_struct *vma = walk->vma;
  451. struct page *page = NULL;
  452. if (pte_present(*pte)) {
  453. page = vm_normal_page(vma, addr, *pte);
  454. } else if (is_swap_pte(*pte)) {
  455. swp_entry_t swpent = pte_to_swp_entry(*pte);
  456. if (!non_swap_entry(swpent)) {
  457. int mapcount;
  458. mss->swap += PAGE_SIZE;
  459. mapcount = swp_swapcount(swpent);
  460. if (mapcount >= 2) {
  461. u64 pss_delta = (u64)PAGE_SIZE << PSS_SHIFT;
  462. do_div(pss_delta, mapcount);
  463. mss->swap_pss += pss_delta;
  464. } else {
  465. mss->swap_pss += (u64)PAGE_SIZE << PSS_SHIFT;
  466. }
  467. } else if (is_migration_entry(swpent))
  468. page = migration_entry_to_page(swpent);
  469. } else if (unlikely(IS_ENABLED(CONFIG_SHMEM) && mss->check_shmem_swap
  470. && pte_none(*pte))) {
  471. page = find_get_entry(vma->vm_file->f_mapping,
  472. linear_page_index(vma, addr));
  473. if (!page)
  474. return;
  475. if (radix_tree_exceptional_entry(page))
  476. mss->swap += PAGE_SIZE;
  477. else
  478. put_page(page);
  479. return;
  480. }
  481. if (!page)
  482. return;
  483. smaps_account(mss, page, false, pte_young(*pte), pte_dirty(*pte));
  484. }
  485. #ifdef CONFIG_TRANSPARENT_HUGEPAGE
  486. static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr,
  487. struct mm_walk *walk)
  488. {
  489. struct mem_size_stats *mss = walk->private;
  490. struct vm_area_struct *vma = walk->vma;
  491. struct page *page;
  492. /* FOLL_DUMP will return -EFAULT on huge zero page */
  493. page = follow_trans_huge_pmd(vma, addr, pmd, FOLL_DUMP);
  494. if (IS_ERR_OR_NULL(page))
  495. return;
  496. if (PageAnon(page))
  497. mss->anonymous_thp += HPAGE_PMD_SIZE;
  498. else if (PageSwapBacked(page))
  499. mss->shmem_thp += HPAGE_PMD_SIZE;
  500. else if (is_zone_device_page(page))
  501. /* pass */;
  502. else
  503. VM_BUG_ON_PAGE(1, page);
  504. smaps_account(mss, page, true, pmd_young(*pmd), pmd_dirty(*pmd));
  505. }
  506. #else
  507. static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr,
  508. struct mm_walk *walk)
  509. {
  510. }
  511. #endif
  512. static int smaps_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
  513. struct mm_walk *walk)
  514. {
  515. struct vm_area_struct *vma = walk->vma;
  516. pte_t *pte;
  517. spinlock_t *ptl;
  518. ptl = pmd_trans_huge_lock(pmd, vma);
  519. if (ptl) {
  520. smaps_pmd_entry(pmd, addr, walk);
  521. spin_unlock(ptl);
  522. return 0;
  523. }
  524. if (pmd_trans_unstable(pmd))
  525. return 0;
  526. /*
  527. * The mmap_sem held all the way back in m_start() is what
  528. * keeps khugepaged out of here and from collapsing things
  529. * in here.
  530. */
  531. pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
  532. for (; addr != end; pte++, addr += PAGE_SIZE)
  533. smaps_pte_entry(pte, addr, walk);
  534. pte_unmap_unlock(pte - 1, ptl);
  535. cond_resched();
  536. return 0;
  537. }
  538. static void show_smap_vma_flags(struct seq_file *m, struct vm_area_struct *vma)
  539. {
  540. /*
  541. * Don't forget to update Documentation/ on changes.
  542. */
  543. static const char mnemonics[BITS_PER_LONG][2] = {
  544. /*
  545. * In case if we meet a flag we don't know about.
  546. */
  547. [0 ... (BITS_PER_LONG-1)] = "??",
  548. [ilog2(VM_READ)] = "rd",
  549. [ilog2(VM_WRITE)] = "wr",
  550. [ilog2(VM_EXEC)] = "ex",
  551. [ilog2(VM_SHARED)] = "sh",
  552. [ilog2(VM_MAYREAD)] = "mr",
  553. [ilog2(VM_MAYWRITE)] = "mw",
  554. [ilog2(VM_MAYEXEC)] = "me",
  555. [ilog2(VM_MAYSHARE)] = "ms",
  556. [ilog2(VM_GROWSDOWN)] = "gd",
  557. [ilog2(VM_PFNMAP)] = "pf",
  558. [ilog2(VM_DENYWRITE)] = "dw",
  559. #ifdef CONFIG_X86_INTEL_MPX
  560. [ilog2(VM_MPX)] = "mp",
  561. #endif
  562. [ilog2(VM_LOCKED)] = "lo",
  563. [ilog2(VM_IO)] = "io",
  564. [ilog2(VM_SEQ_READ)] = "sr",
  565. [ilog2(VM_RAND_READ)] = "rr",
  566. [ilog2(VM_DONTCOPY)] = "dc",
  567. [ilog2(VM_DONTEXPAND)] = "de",
  568. [ilog2(VM_ACCOUNT)] = "ac",
  569. [ilog2(VM_NORESERVE)] = "nr",
  570. [ilog2(VM_HUGETLB)] = "ht",
  571. [ilog2(VM_ARCH_1)] = "ar",
  572. [ilog2(VM_DONTDUMP)] = "dd",
  573. #ifdef CONFIG_MEM_SOFT_DIRTY
  574. [ilog2(VM_SOFTDIRTY)] = "sd",
  575. #endif
  576. [ilog2(VM_MIXEDMAP)] = "mm",
  577. [ilog2(VM_HUGEPAGE)] = "hg",
  578. [ilog2(VM_NOHUGEPAGE)] = "nh",
  579. [ilog2(VM_MERGEABLE)] = "mg",
  580. [ilog2(VM_UFFD_MISSING)]= "um",
  581. [ilog2(VM_UFFD_WP)] = "uw",
  582. #ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
  583. /* These come out via ProtectionKey: */
  584. [ilog2(VM_PKEY_BIT0)] = "",
  585. [ilog2(VM_PKEY_BIT1)] = "",
  586. [ilog2(VM_PKEY_BIT2)] = "",
  587. [ilog2(VM_PKEY_BIT3)] = "",
  588. #endif
  589. };
  590. size_t i;
  591. seq_puts(m, "VmFlags: ");
  592. for (i = 0; i < BITS_PER_LONG; i++) {
  593. if (!mnemonics[i][0])
  594. continue;
  595. if (vma->vm_flags & (1UL << i)) {
  596. seq_printf(m, "%c%c ",
  597. mnemonics[i][0], mnemonics[i][1]);
  598. }
  599. }
  600. seq_putc(m, '\n');
  601. }
  602. #ifdef CONFIG_HUGETLB_PAGE
  603. static int smaps_hugetlb_range(pte_t *pte, unsigned long hmask,
  604. unsigned long addr, unsigned long end,
  605. struct mm_walk *walk)
  606. {
  607. struct mem_size_stats *mss = walk->private;
  608. struct vm_area_struct *vma = walk->vma;
  609. struct page *page = NULL;
  610. if (pte_present(*pte)) {
  611. page = vm_normal_page(vma, addr, *pte);
  612. } else if (is_swap_pte(*pte)) {
  613. swp_entry_t swpent = pte_to_swp_entry(*pte);
  614. if (is_migration_entry(swpent))
  615. page = migration_entry_to_page(swpent);
  616. }
  617. if (page) {
  618. int mapcount = page_mapcount(page);
  619. if (mapcount >= 2)
  620. mss->shared_hugetlb += huge_page_size(hstate_vma(vma));
  621. else
  622. mss->private_hugetlb += huge_page_size(hstate_vma(vma));
  623. }
  624. return 0;
  625. }
  626. #endif /* HUGETLB_PAGE */
  627. void __weak arch_show_smap(struct seq_file *m, struct vm_area_struct *vma)
  628. {
  629. }
  630. static int show_smap(struct seq_file *m, void *v, int is_pid)
  631. {
  632. struct vm_area_struct *vma = v;
  633. struct mem_size_stats mss;
  634. struct mm_walk smaps_walk = {
  635. .pmd_entry = smaps_pte_range,
  636. #ifdef CONFIG_HUGETLB_PAGE
  637. .hugetlb_entry = smaps_hugetlb_range,
  638. #endif
  639. .mm = vma->vm_mm,
  640. .private = &mss,
  641. };
  642. memset(&mss, 0, sizeof mss);
  643. #ifdef CONFIG_SHMEM
  644. if (vma->vm_file && shmem_mapping(vma->vm_file->f_mapping)) {
  645. /*
  646. * For shared or readonly shmem mappings we know that all
  647. * swapped out pages belong to the shmem object, and we can
  648. * obtain the swap value much more efficiently. For private
  649. * writable mappings, we might have COW pages that are
  650. * not affected by the parent swapped out pages of the shmem
  651. * object, so we have to distinguish them during the page walk.
  652. * Unless we know that the shmem object (or the part mapped by
  653. * our VMA) has no swapped out pages at all.
  654. */
  655. unsigned long shmem_swapped = shmem_swap_usage(vma);
  656. if (!shmem_swapped || (vma->vm_flags & VM_SHARED) ||
  657. !(vma->vm_flags & VM_WRITE)) {
  658. mss.swap = shmem_swapped;
  659. } else {
  660. mss.check_shmem_swap = true;
  661. smaps_walk.pte_hole = smaps_pte_hole;
  662. }
  663. }
  664. #endif
  665. /* mmap_sem is held in m_start */
  666. walk_page_vma(vma, &smaps_walk);
  667. show_map_vma(m, vma, is_pid);
  668. seq_printf(m,
  669. "Size: %8lu kB\n"
  670. "Rss: %8lu kB\n"
  671. "Pss: %8lu kB\n"
  672. "Shared_Clean: %8lu kB\n"
  673. "Shared_Dirty: %8lu kB\n"
  674. "Private_Clean: %8lu kB\n"
  675. "Private_Dirty: %8lu kB\n"
  676. "Referenced: %8lu kB\n"
  677. "Anonymous: %8lu kB\n"
  678. "AnonHugePages: %8lu kB\n"
  679. "ShmemPmdMapped: %8lu kB\n"
  680. "Shared_Hugetlb: %8lu kB\n"
  681. "Private_Hugetlb: %7lu kB\n"
  682. "Swap: %8lu kB\n"
  683. "SwapPss: %8lu kB\n"
  684. "KernelPageSize: %8lu kB\n"
  685. "MMUPageSize: %8lu kB\n"
  686. "Locked: %8lu kB\n",
  687. (vma->vm_end - vma->vm_start) >> 10,
  688. mss.resident >> 10,
  689. (unsigned long)(mss.pss >> (10 + PSS_SHIFT)),
  690. mss.shared_clean >> 10,
  691. mss.shared_dirty >> 10,
  692. mss.private_clean >> 10,
  693. mss.private_dirty >> 10,
  694. mss.referenced >> 10,
  695. mss.anonymous >> 10,
  696. mss.anonymous_thp >> 10,
  697. mss.shmem_thp >> 10,
  698. mss.shared_hugetlb >> 10,
  699. mss.private_hugetlb >> 10,
  700. mss.swap >> 10,
  701. (unsigned long)(mss.swap_pss >> (10 + PSS_SHIFT)),
  702. vma_kernel_pagesize(vma) >> 10,
  703. vma_mmu_pagesize(vma) >> 10,
  704. (vma->vm_flags & VM_LOCKED) ?
  705. (unsigned long)(mss.pss >> (10 + PSS_SHIFT)) : 0);
  706. arch_show_smap(m, vma);
  707. show_smap_vma_flags(m, vma);
  708. m_cache_vma(m, vma);
  709. return 0;
  710. }
  711. static int show_pid_smap(struct seq_file *m, void *v)
  712. {
  713. return show_smap(m, v, 1);
  714. }
  715. static int show_tid_smap(struct seq_file *m, void *v)
  716. {
  717. return show_smap(m, v, 0);
  718. }
  719. static const struct seq_operations proc_pid_smaps_op = {
  720. .start = m_start,
  721. .next = m_next,
  722. .stop = m_stop,
  723. .show = show_pid_smap
  724. };
  725. static const struct seq_operations proc_tid_smaps_op = {
  726. .start = m_start,
  727. .next = m_next,
  728. .stop = m_stop,
  729. .show = show_tid_smap
  730. };
  731. static int pid_smaps_open(struct inode *inode, struct file *file)
  732. {
  733. return do_maps_open(inode, file, &proc_pid_smaps_op);
  734. }
  735. static int tid_smaps_open(struct inode *inode, struct file *file)
  736. {
  737. return do_maps_open(inode, file, &proc_tid_smaps_op);
  738. }
  739. const struct file_operations proc_pid_smaps_operations = {
  740. .open = pid_smaps_open,
  741. .read = seq_read,
  742. .llseek = seq_lseek,
  743. .release = proc_map_release,
  744. };
  745. const struct file_operations proc_tid_smaps_operations = {
  746. .open = tid_smaps_open,
  747. .read = seq_read,
  748. .llseek = seq_lseek,
  749. .release = proc_map_release,
  750. };
  751. enum clear_refs_types {
  752. CLEAR_REFS_ALL = 1,
  753. CLEAR_REFS_ANON,
  754. CLEAR_REFS_MAPPED,
  755. CLEAR_REFS_SOFT_DIRTY,
  756. CLEAR_REFS_MM_HIWATER_RSS,
  757. CLEAR_REFS_LAST,
  758. };
  759. struct clear_refs_private {
  760. enum clear_refs_types type;
  761. };
  762. #ifdef CONFIG_MEM_SOFT_DIRTY
  763. static inline void clear_soft_dirty(struct vm_area_struct *vma,
  764. unsigned long addr, pte_t *pte)
  765. {
  766. /*
  767. * The soft-dirty tracker uses #PF-s to catch writes
  768. * to pages, so write-protect the pte as well. See the
  769. * Documentation/vm/soft-dirty.txt for full description
  770. * of how soft-dirty works.
  771. */
  772. pte_t ptent = *pte;
  773. if (pte_present(ptent)) {
  774. ptent = ptep_modify_prot_start(vma->vm_mm, addr, pte);
  775. ptent = pte_wrprotect(ptent);
  776. ptent = pte_clear_soft_dirty(ptent);
  777. ptep_modify_prot_commit(vma->vm_mm, addr, pte, ptent);
  778. } else if (is_swap_pte(ptent)) {
  779. ptent = pte_swp_clear_soft_dirty(ptent);
  780. set_pte_at(vma->vm_mm, addr, pte, ptent);
  781. }
  782. }
  783. #else
  784. static inline void clear_soft_dirty(struct vm_area_struct *vma,
  785. unsigned long addr, pte_t *pte)
  786. {
  787. }
  788. #endif
  789. #if defined(CONFIG_MEM_SOFT_DIRTY) && defined(CONFIG_TRANSPARENT_HUGEPAGE)
  790. static inline void clear_soft_dirty_pmd(struct vm_area_struct *vma,
  791. unsigned long addr, pmd_t *pmdp)
  792. {
  793. pmd_t pmd = pmdp_huge_get_and_clear(vma->vm_mm, addr, pmdp);
  794. pmd = pmd_wrprotect(pmd);
  795. pmd = pmd_clear_soft_dirty(pmd);
  796. set_pmd_at(vma->vm_mm, addr, pmdp, pmd);
  797. }
  798. #else
  799. static inline void clear_soft_dirty_pmd(struct vm_area_struct *vma,
  800. unsigned long addr, pmd_t *pmdp)
  801. {
  802. }
  803. #endif
  804. static int clear_refs_pte_range(pmd_t *pmd, unsigned long addr,
  805. unsigned long end, struct mm_walk *walk)
  806. {
  807. struct clear_refs_private *cp = walk->private;
  808. struct vm_area_struct *vma = walk->vma;
  809. pte_t *pte, ptent;
  810. spinlock_t *ptl;
  811. struct page *page;
  812. ptl = pmd_trans_huge_lock(pmd, vma);
  813. if (ptl) {
  814. if (cp->type == CLEAR_REFS_SOFT_DIRTY) {
  815. clear_soft_dirty_pmd(vma, addr, pmd);
  816. goto out;
  817. }
  818. page = pmd_page(*pmd);
  819. /* Clear accessed and referenced bits. */
  820. pmdp_test_and_clear_young(vma, addr, pmd);
  821. test_and_clear_page_young(page);
  822. ClearPageReferenced(page);
  823. out:
  824. spin_unlock(ptl);
  825. return 0;
  826. }
  827. if (pmd_trans_unstable(pmd))
  828. return 0;
  829. pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
  830. for (; addr != end; pte++, addr += PAGE_SIZE) {
  831. ptent = *pte;
  832. if (cp->type == CLEAR_REFS_SOFT_DIRTY) {
  833. clear_soft_dirty(vma, addr, pte);
  834. continue;
  835. }
  836. if (!pte_present(ptent))
  837. continue;
  838. page = vm_normal_page(vma, addr, ptent);
  839. if (!page)
  840. continue;
  841. /* Clear accessed and referenced bits. */
  842. ptep_test_and_clear_young(vma, addr, pte);
  843. test_and_clear_page_young(page);
  844. ClearPageReferenced(page);
  845. }
  846. pte_unmap_unlock(pte - 1, ptl);
  847. cond_resched();
  848. return 0;
  849. }
  850. static int clear_refs_test_walk(unsigned long start, unsigned long end,
  851. struct mm_walk *walk)
  852. {
  853. struct clear_refs_private *cp = walk->private;
  854. struct vm_area_struct *vma = walk->vma;
  855. if (vma->vm_flags & VM_PFNMAP)
  856. return 1;
  857. /*
  858. * Writing 1 to /proc/pid/clear_refs affects all pages.
  859. * Writing 2 to /proc/pid/clear_refs only affects anonymous pages.
  860. * Writing 3 to /proc/pid/clear_refs only affects file mapped pages.
  861. * Writing 4 to /proc/pid/clear_refs affects all pages.
  862. */
  863. if (cp->type == CLEAR_REFS_ANON && vma->vm_file)
  864. return 1;
  865. if (cp->type == CLEAR_REFS_MAPPED && !vma->vm_file)
  866. return 1;
  867. return 0;
  868. }
  869. static ssize_t clear_refs_write(struct file *file, const char __user *buf,
  870. size_t count, loff_t *ppos)
  871. {
  872. struct task_struct *task;
  873. char buffer[PROC_NUMBUF];
  874. struct mm_struct *mm;
  875. struct vm_area_struct *vma;
  876. enum clear_refs_types type;
  877. int itype;
  878. int rv;
  879. memset(buffer, 0, sizeof(buffer));
  880. if (count > sizeof(buffer) - 1)
  881. count = sizeof(buffer) - 1;
  882. if (copy_from_user(buffer, buf, count))
  883. return -EFAULT;
  884. rv = kstrtoint(strstrip(buffer), 10, &itype);
  885. if (rv < 0)
  886. return rv;
  887. type = (enum clear_refs_types)itype;
  888. if (type < CLEAR_REFS_ALL || type >= CLEAR_REFS_LAST)
  889. return -EINVAL;
  890. task = get_proc_task(file_inode(file));
  891. if (!task)
  892. return -ESRCH;
  893. mm = get_task_mm(task);
  894. if (mm) {
  895. struct clear_refs_private cp = {
  896. .type = type,
  897. };
  898. struct mm_walk clear_refs_walk = {
  899. .pmd_entry = clear_refs_pte_range,
  900. .test_walk = clear_refs_test_walk,
  901. .mm = mm,
  902. .private = &cp,
  903. };
  904. if (type == CLEAR_REFS_MM_HIWATER_RSS) {
  905. if (down_write_killable(&mm->mmap_sem)) {
  906. count = -EINTR;
  907. goto out_mm;
  908. }
  909. /*
  910. * Writing 5 to /proc/pid/clear_refs resets the peak
  911. * resident set size to this mm's current rss value.
  912. */
  913. reset_mm_hiwater_rss(mm);
  914. up_write(&mm->mmap_sem);
  915. goto out_mm;
  916. }
  917. down_read(&mm->mmap_sem);
  918. if (type == CLEAR_REFS_SOFT_DIRTY) {
  919. for (vma = mm->mmap; vma; vma = vma->vm_next) {
  920. if (!(vma->vm_flags & VM_SOFTDIRTY))
  921. continue;
  922. up_read(&mm->mmap_sem);
  923. if (down_write_killable(&mm->mmap_sem)) {
  924. count = -EINTR;
  925. goto out_mm;
  926. }
  927. for (vma = mm->mmap; vma; vma = vma->vm_next) {
  928. vma->vm_flags &= ~VM_SOFTDIRTY;
  929. vma_set_page_prot(vma);
  930. }
  931. downgrade_write(&mm->mmap_sem);
  932. break;
  933. }
  934. mmu_notifier_invalidate_range_start(mm, 0, -1);
  935. }
  936. walk_page_range(0, mm->highest_vm_end, &clear_refs_walk);
  937. if (type == CLEAR_REFS_SOFT_DIRTY)
  938. mmu_notifier_invalidate_range_end(mm, 0, -1);
  939. flush_tlb_mm(mm);
  940. up_read(&mm->mmap_sem);
  941. out_mm:
  942. mmput(mm);
  943. }
  944. put_task_struct(task);
  945. return count;
  946. }
  947. const struct file_operations proc_clear_refs_operations = {
  948. .write = clear_refs_write,
  949. .llseek = noop_llseek,
  950. };
  951. typedef struct {
  952. u64 pme;
  953. } pagemap_entry_t;
  954. struct pagemapread {
  955. int pos, len; /* units: PM_ENTRY_BYTES, not bytes */
  956. pagemap_entry_t *buffer;
  957. bool show_pfn;
  958. };
  959. #define PAGEMAP_WALK_SIZE (PMD_SIZE)
  960. #define PAGEMAP_WALK_MASK (PMD_MASK)
  961. #define PM_ENTRY_BYTES sizeof(pagemap_entry_t)
  962. #define PM_PFRAME_BITS 55
  963. #define PM_PFRAME_MASK GENMASK_ULL(PM_PFRAME_BITS - 1, 0)
  964. #define PM_SOFT_DIRTY BIT_ULL(55)
  965. #define PM_MMAP_EXCLUSIVE BIT_ULL(56)
  966. #define PM_FILE BIT_ULL(61)
  967. #define PM_SWAP BIT_ULL(62)
  968. #define PM_PRESENT BIT_ULL(63)
  969. #define PM_END_OF_BUFFER 1
  970. static inline pagemap_entry_t make_pme(u64 frame, u64 flags)
  971. {
  972. return (pagemap_entry_t) { .pme = (frame & PM_PFRAME_MASK) | flags };
  973. }
  974. static int add_to_pagemap(unsigned long addr, pagemap_entry_t *pme,
  975. struct pagemapread *pm)
  976. {
  977. pm->buffer[pm->pos++] = *pme;
  978. if (pm->pos >= pm->len)
  979. return PM_END_OF_BUFFER;
  980. return 0;
  981. }
  982. static int pagemap_pte_hole(unsigned long start, unsigned long end,
  983. struct mm_walk *walk)
  984. {
  985. struct pagemapread *pm = walk->private;
  986. unsigned long addr = start;
  987. int err = 0;
  988. while (addr < end) {
  989. struct vm_area_struct *vma = find_vma(walk->mm, addr);
  990. pagemap_entry_t pme = make_pme(0, 0);
  991. /* End of address space hole, which we mark as non-present. */
  992. unsigned long hole_end;
  993. if (vma)
  994. hole_end = min(end, vma->vm_start);
  995. else
  996. hole_end = end;
  997. for (; addr < hole_end; addr += PAGE_SIZE) {
  998. err = add_to_pagemap(addr, &pme, pm);
  999. if (err)
  1000. goto out;
  1001. }
  1002. if (!vma)
  1003. break;
  1004. /* Addresses in the VMA. */
  1005. if (vma->vm_flags & VM_SOFTDIRTY)
  1006. pme = make_pme(0, PM_SOFT_DIRTY);
  1007. for (; addr < min(end, vma->vm_end); addr += PAGE_SIZE) {
  1008. err = add_to_pagemap(addr, &pme, pm);
  1009. if (err)
  1010. goto out;
  1011. }
  1012. }
  1013. out:
  1014. return err;
  1015. }
  1016. static pagemap_entry_t pte_to_pagemap_entry(struct pagemapread *pm,
  1017. struct vm_area_struct *vma, unsigned long addr, pte_t pte)
  1018. {
  1019. u64 frame = 0, flags = 0;
  1020. struct page *page = NULL;
  1021. if (pte_present(pte)) {
  1022. if (pm->show_pfn)
  1023. frame = pte_pfn(pte);
  1024. flags |= PM_PRESENT;
  1025. page = vm_normal_page(vma, addr, pte);
  1026. if (pte_soft_dirty(pte))
  1027. flags |= PM_SOFT_DIRTY;
  1028. } else if (is_swap_pte(pte)) {
  1029. swp_entry_t entry;
  1030. if (pte_swp_soft_dirty(pte))
  1031. flags |= PM_SOFT_DIRTY;
  1032. entry = pte_to_swp_entry(pte);
  1033. frame = swp_type(entry) |
  1034. (swp_offset(entry) << MAX_SWAPFILES_SHIFT);
  1035. flags |= PM_SWAP;
  1036. if (is_migration_entry(entry))
  1037. page = migration_entry_to_page(entry);
  1038. }
  1039. if (page && !PageAnon(page))
  1040. flags |= PM_FILE;
  1041. if (page && page_mapcount(page) == 1)
  1042. flags |= PM_MMAP_EXCLUSIVE;
  1043. if (vma->vm_flags & VM_SOFTDIRTY)
  1044. flags |= PM_SOFT_DIRTY;
  1045. return make_pme(frame, flags);
  1046. }
  1047. static int pagemap_pmd_range(pmd_t *pmdp, unsigned long addr, unsigned long end,
  1048. struct mm_walk *walk)
  1049. {
  1050. struct vm_area_struct *vma = walk->vma;
  1051. struct pagemapread *pm = walk->private;
  1052. spinlock_t *ptl;
  1053. pte_t *pte, *orig_pte;
  1054. int err = 0;
  1055. #ifdef CONFIG_TRANSPARENT_HUGEPAGE
  1056. ptl = pmd_trans_huge_lock(pmdp, vma);
  1057. if (ptl) {
  1058. u64 flags = 0, frame = 0;
  1059. pmd_t pmd = *pmdp;
  1060. if ((vma->vm_flags & VM_SOFTDIRTY) || pmd_soft_dirty(pmd))
  1061. flags |= PM_SOFT_DIRTY;
  1062. /*
  1063. * Currently pmd for thp is always present because thp
  1064. * can not be swapped-out, migrated, or HWPOISONed
  1065. * (split in such cases instead.)
  1066. * This if-check is just to prepare for future implementation.
  1067. */
  1068. if (pmd_present(pmd)) {
  1069. struct page *page = pmd_page(pmd);
  1070. if (page_mapcount(page) == 1)
  1071. flags |= PM_MMAP_EXCLUSIVE;
  1072. flags |= PM_PRESENT;
  1073. if (pm->show_pfn)
  1074. frame = pmd_pfn(pmd) +
  1075. ((addr & ~PMD_MASK) >> PAGE_SHIFT);
  1076. }
  1077. for (; addr != end; addr += PAGE_SIZE) {
  1078. pagemap_entry_t pme = make_pme(frame, flags);
  1079. err = add_to_pagemap(addr, &pme, pm);
  1080. if (err)
  1081. break;
  1082. if (pm->show_pfn && (flags & PM_PRESENT))
  1083. frame++;
  1084. }
  1085. spin_unlock(ptl);
  1086. return err;
  1087. }
  1088. if (pmd_trans_unstable(pmdp))
  1089. return 0;
  1090. #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
  1091. /*
  1092. * We can assume that @vma always points to a valid one and @end never
  1093. * goes beyond vma->vm_end.
  1094. */
  1095. orig_pte = pte = pte_offset_map_lock(walk->mm, pmdp, addr, &ptl);
  1096. for (; addr < end; pte++, addr += PAGE_SIZE) {
  1097. pagemap_entry_t pme;
  1098. pme = pte_to_pagemap_entry(pm, vma, addr, *pte);
  1099. err = add_to_pagemap(addr, &pme, pm);
  1100. if (err)
  1101. break;
  1102. }
  1103. pte_unmap_unlock(orig_pte, ptl);
  1104. cond_resched();
  1105. return err;
  1106. }
  1107. #ifdef CONFIG_HUGETLB_PAGE
  1108. /* This function walks within one hugetlb entry in the single call */
  1109. static int pagemap_hugetlb_range(pte_t *ptep, unsigned long hmask,
  1110. unsigned long addr, unsigned long end,
  1111. struct mm_walk *walk)
  1112. {
  1113. struct pagemapread *pm = walk->private;
  1114. struct vm_area_struct *vma = walk->vma;
  1115. u64 flags = 0, frame = 0;
  1116. int err = 0;
  1117. pte_t pte;
  1118. if (vma->vm_flags & VM_SOFTDIRTY)
  1119. flags |= PM_SOFT_DIRTY;
  1120. pte = huge_ptep_get(ptep);
  1121. if (pte_present(pte)) {
  1122. struct page *page = pte_page(pte);
  1123. if (!PageAnon(page))
  1124. flags |= PM_FILE;
  1125. if (page_mapcount(page) == 1)
  1126. flags |= PM_MMAP_EXCLUSIVE;
  1127. flags |= PM_PRESENT;
  1128. if (pm->show_pfn)
  1129. frame = pte_pfn(pte) +
  1130. ((addr & ~hmask) >> PAGE_SHIFT);
  1131. }
  1132. for (; addr != end; addr += PAGE_SIZE) {
  1133. pagemap_entry_t pme = make_pme(frame, flags);
  1134. err = add_to_pagemap(addr, &pme, pm);
  1135. if (err)
  1136. return err;
  1137. if (pm->show_pfn && (flags & PM_PRESENT))
  1138. frame++;
  1139. }
  1140. cond_resched();
  1141. return err;
  1142. }
  1143. #endif /* HUGETLB_PAGE */
  1144. /*
  1145. * /proc/pid/pagemap - an array mapping virtual pages to pfns
  1146. *
  1147. * For each page in the address space, this file contains one 64-bit entry
  1148. * consisting of the following:
  1149. *
  1150. * Bits 0-54 page frame number (PFN) if present
  1151. * Bits 0-4 swap type if swapped
  1152. * Bits 5-54 swap offset if swapped
  1153. * Bit 55 pte is soft-dirty (see Documentation/vm/soft-dirty.txt)
  1154. * Bit 56 page exclusively mapped
  1155. * Bits 57-60 zero
  1156. * Bit 61 page is file-page or shared-anon
  1157. * Bit 62 page swapped
  1158. * Bit 63 page present
  1159. *
  1160. * If the page is not present but in swap, then the PFN contains an
  1161. * encoding of the swap file number and the page's offset into the
  1162. * swap. Unmapped pages return a null PFN. This allows determining
  1163. * precisely which pages are mapped (or in swap) and comparing mapped
  1164. * pages between processes.
  1165. *
  1166. * Efficient users of this interface will use /proc/pid/maps to
  1167. * determine which areas of memory are actually mapped and llseek to
  1168. * skip over unmapped regions.
  1169. */
  1170. static ssize_t pagemap_read(struct file *file, char __user *buf,
  1171. size_t count, loff_t *ppos)
  1172. {
  1173. struct mm_struct *mm = file->private_data;
  1174. struct pagemapread pm;
  1175. struct mm_walk pagemap_walk = {};
  1176. unsigned long src;
  1177. unsigned long svpfn;
  1178. unsigned long start_vaddr;
  1179. unsigned long end_vaddr;
  1180. int ret = 0, copied = 0;
  1181. if (!mm || !mmget_not_zero(mm))
  1182. goto out;
  1183. ret = -EINVAL;
  1184. /* file position must be aligned */
  1185. if ((*ppos % PM_ENTRY_BYTES) || (count % PM_ENTRY_BYTES))
  1186. goto out_mm;
  1187. ret = 0;
  1188. if (!count)
  1189. goto out_mm;
  1190. /* do not disclose physical addresses: attack vector */
  1191. pm.show_pfn = file_ns_capable(file, &init_user_ns, CAP_SYS_ADMIN);
  1192. pm.len = (PAGEMAP_WALK_SIZE >> PAGE_SHIFT);
  1193. pm.buffer = kmalloc(pm.len * PM_ENTRY_BYTES, GFP_TEMPORARY);
  1194. ret = -ENOMEM;
  1195. if (!pm.buffer)
  1196. goto out_mm;
  1197. pagemap_walk.pmd_entry = pagemap_pmd_range;
  1198. pagemap_walk.pte_hole = pagemap_pte_hole;
  1199. #ifdef CONFIG_HUGETLB_PAGE
  1200. pagemap_walk.hugetlb_entry = pagemap_hugetlb_range;
  1201. #endif
  1202. pagemap_walk.mm = mm;
  1203. pagemap_walk.private = &pm;
  1204. src = *ppos;
  1205. svpfn = src / PM_ENTRY_BYTES;
  1206. start_vaddr = svpfn << PAGE_SHIFT;
  1207. end_vaddr = mm->task_size;
  1208. /* watch out for wraparound */
  1209. if (svpfn > mm->task_size >> PAGE_SHIFT)
  1210. start_vaddr = end_vaddr;
  1211. /*
  1212. * The odds are that this will stop walking way
  1213. * before end_vaddr, because the length of the
  1214. * user buffer is tracked in "pm", and the walk
  1215. * will stop when we hit the end of the buffer.
  1216. */
  1217. ret = 0;
  1218. while (count && (start_vaddr < end_vaddr)) {
  1219. int len;
  1220. unsigned long end;
  1221. pm.pos = 0;
  1222. end = (start_vaddr + PAGEMAP_WALK_SIZE) & PAGEMAP_WALK_MASK;
  1223. /* overflow ? */
  1224. if (end < start_vaddr || end > end_vaddr)
  1225. end = end_vaddr;
  1226. down_read(&mm->mmap_sem);
  1227. ret = walk_page_range(start_vaddr, end, &pagemap_walk);
  1228. up_read(&mm->mmap_sem);
  1229. start_vaddr = end;
  1230. len = min(count, PM_ENTRY_BYTES * pm.pos);
  1231. if (copy_to_user(buf, pm.buffer, len)) {
  1232. ret = -EFAULT;
  1233. goto out_free;
  1234. }
  1235. copied += len;
  1236. buf += len;
  1237. count -= len;
  1238. }
  1239. *ppos += copied;
  1240. if (!ret || ret == PM_END_OF_BUFFER)
  1241. ret = copied;
  1242. out_free:
  1243. kfree(pm.buffer);
  1244. out_mm:
  1245. mmput(mm);
  1246. out:
  1247. return ret;
  1248. }
  1249. static int pagemap_open(struct inode *inode, struct file *file)
  1250. {
  1251. struct mm_struct *mm;
  1252. mm = proc_mem_open(inode, PTRACE_MODE_READ);
  1253. if (IS_ERR(mm))
  1254. return PTR_ERR(mm);
  1255. file->private_data = mm;
  1256. return 0;
  1257. }
  1258. static int pagemap_release(struct inode *inode, struct file *file)
  1259. {
  1260. struct mm_struct *mm = file->private_data;
  1261. if (mm)
  1262. mmdrop(mm);
  1263. return 0;
  1264. }
  1265. const struct file_operations proc_pagemap_operations = {
  1266. .llseek = mem_lseek, /* borrow this */
  1267. .read = pagemap_read,
  1268. .open = pagemap_open,
  1269. .release = pagemap_release,
  1270. };
  1271. #endif /* CONFIG_PROC_PAGE_MONITOR */
  1272. #ifdef CONFIG_NUMA
  1273. struct numa_maps {
  1274. unsigned long pages;
  1275. unsigned long anon;
  1276. unsigned long active;
  1277. unsigned long writeback;
  1278. unsigned long mapcount_max;
  1279. unsigned long dirty;
  1280. unsigned long swapcache;
  1281. unsigned long node[MAX_NUMNODES];
  1282. };
  1283. struct numa_maps_private {
  1284. struct proc_maps_private proc_maps;
  1285. struct numa_maps md;
  1286. };
  1287. static void gather_stats(struct page *page, struct numa_maps *md, int pte_dirty,
  1288. unsigned long nr_pages)
  1289. {
  1290. int count = page_mapcount(page);
  1291. md->pages += nr_pages;
  1292. if (pte_dirty || PageDirty(page))
  1293. md->dirty += nr_pages;
  1294. if (PageSwapCache(page))
  1295. md->swapcache += nr_pages;
  1296. if (PageActive(page) || PageUnevictable(page))
  1297. md->active += nr_pages;
  1298. if (PageWriteback(page))
  1299. md->writeback += nr_pages;
  1300. if (PageAnon(page))
  1301. md->anon += nr_pages;
  1302. if (count > md->mapcount_max)
  1303. md->mapcount_max = count;
  1304. md->node[page_to_nid(page)] += nr_pages;
  1305. }
  1306. static struct page *can_gather_numa_stats(pte_t pte, struct vm_area_struct *vma,
  1307. unsigned long addr)
  1308. {
  1309. struct page *page;
  1310. int nid;
  1311. if (!pte_present(pte))
  1312. return NULL;
  1313. page = vm_normal_page(vma, addr, pte);
  1314. if (!page)
  1315. return NULL;
  1316. if (PageReserved(page))
  1317. return NULL;
  1318. nid = page_to_nid(page);
  1319. if (!node_isset(nid, node_states[N_MEMORY]))
  1320. return NULL;
  1321. return page;
  1322. }
  1323. #ifdef CONFIG_TRANSPARENT_HUGEPAGE
  1324. static struct page *can_gather_numa_stats_pmd(pmd_t pmd,
  1325. struct vm_area_struct *vma,
  1326. unsigned long addr)
  1327. {
  1328. struct page *page;
  1329. int nid;
  1330. if (!pmd_present(pmd))
  1331. return NULL;
  1332. page = vm_normal_page_pmd(vma, addr, pmd);
  1333. if (!page)
  1334. return NULL;
  1335. if (PageReserved(page))
  1336. return NULL;
  1337. nid = page_to_nid(page);
  1338. if (!node_isset(nid, node_states[N_MEMORY]))
  1339. return NULL;
  1340. return page;
  1341. }
  1342. #endif
  1343. static int gather_pte_stats(pmd_t *pmd, unsigned long addr,
  1344. unsigned long end, struct mm_walk *walk)
  1345. {
  1346. struct numa_maps *md = walk->private;
  1347. struct vm_area_struct *vma = walk->vma;
  1348. spinlock_t *ptl;
  1349. pte_t *orig_pte;
  1350. pte_t *pte;
  1351. #ifdef CONFIG_TRANSPARENT_HUGEPAGE
  1352. ptl = pmd_trans_huge_lock(pmd, vma);
  1353. if (ptl) {
  1354. struct page *page;
  1355. page = can_gather_numa_stats_pmd(*pmd, vma, addr);
  1356. if (page)
  1357. gather_stats(page, md, pmd_dirty(*pmd),
  1358. HPAGE_PMD_SIZE/PAGE_SIZE);
  1359. spin_unlock(ptl);
  1360. return 0;
  1361. }
  1362. if (pmd_trans_unstable(pmd))
  1363. return 0;
  1364. #endif
  1365. orig_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
  1366. do {
  1367. struct page *page = can_gather_numa_stats(*pte, vma, addr);
  1368. if (!page)
  1369. continue;
  1370. gather_stats(page, md, pte_dirty(*pte), 1);
  1371. } while (pte++, addr += PAGE_SIZE, addr != end);
  1372. pte_unmap_unlock(orig_pte, ptl);
  1373. cond_resched();
  1374. return 0;
  1375. }
  1376. #ifdef CONFIG_HUGETLB_PAGE
  1377. static int gather_hugetlb_stats(pte_t *pte, unsigned long hmask,
  1378. unsigned long addr, unsigned long end, struct mm_walk *walk)
  1379. {
  1380. pte_t huge_pte = huge_ptep_get(pte);
  1381. struct numa_maps *md;
  1382. struct page *page;
  1383. if (!pte_present(huge_pte))
  1384. return 0;
  1385. page = pte_page(huge_pte);
  1386. if (!page)
  1387. return 0;
  1388. md = walk->private;
  1389. gather_stats(page, md, pte_dirty(huge_pte), 1);
  1390. return 0;
  1391. }
  1392. #else
  1393. static int gather_hugetlb_stats(pte_t *pte, unsigned long hmask,
  1394. unsigned long addr, unsigned long end, struct mm_walk *walk)
  1395. {
  1396. return 0;
  1397. }
  1398. #endif
  1399. /*
  1400. * Display pages allocated per node and memory policy via /proc.
  1401. */
  1402. static int show_numa_map(struct seq_file *m, void *v, int is_pid)
  1403. {
  1404. struct numa_maps_private *numa_priv = m->private;
  1405. struct proc_maps_private *proc_priv = &numa_priv->proc_maps;
  1406. struct vm_area_struct *vma = v;
  1407. struct numa_maps *md = &numa_priv->md;
  1408. struct file *file = vma->vm_file;
  1409. struct mm_struct *mm = vma->vm_mm;
  1410. struct mm_walk walk = {
  1411. .hugetlb_entry = gather_hugetlb_stats,
  1412. .pmd_entry = gather_pte_stats,
  1413. .private = md,
  1414. .mm = mm,
  1415. };
  1416. struct mempolicy *pol;
  1417. char buffer[64];
  1418. int nid;
  1419. if (!mm)
  1420. return 0;
  1421. /* Ensure we start with an empty set of numa_maps statistics. */
  1422. memset(md, 0, sizeof(*md));
  1423. pol = __get_vma_policy(vma, vma->vm_start);
  1424. if (pol) {
  1425. mpol_to_str(buffer, sizeof(buffer), pol);
  1426. mpol_cond_put(pol);
  1427. } else {
  1428. mpol_to_str(buffer, sizeof(buffer), proc_priv->task_mempolicy);
  1429. }
  1430. seq_printf(m, "%08lx %s", vma->vm_start, buffer);
  1431. if (file) {
  1432. seq_puts(m, " file=");
  1433. seq_file_path(m, file, "\n\t= ");
  1434. } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
  1435. seq_puts(m, " heap");
  1436. } else if (is_stack(proc_priv, vma)) {
  1437. seq_puts(m, " stack");
  1438. }
  1439. if (is_vm_hugetlb_page(vma))
  1440. seq_puts(m, " huge");
  1441. /* mmap_sem is held by m_start */
  1442. walk_page_vma(vma, &walk);
  1443. if (!md->pages)
  1444. goto out;
  1445. if (md->anon)
  1446. seq_printf(m, " anon=%lu", md->anon);
  1447. if (md->dirty)
  1448. seq_printf(m, " dirty=%lu", md->dirty);
  1449. if (md->pages != md->anon && md->pages != md->dirty)
  1450. seq_printf(m, " mapped=%lu", md->pages);
  1451. if (md->mapcount_max > 1)
  1452. seq_printf(m, " mapmax=%lu", md->mapcount_max);
  1453. if (md->swapcache)
  1454. seq_printf(m, " swapcache=%lu", md->swapcache);
  1455. if (md->active < md->pages && !is_vm_hugetlb_page(vma))
  1456. seq_printf(m, " active=%lu", md->active);
  1457. if (md->writeback)
  1458. seq_printf(m, " writeback=%lu", md->writeback);
  1459. for_each_node_state(nid, N_MEMORY)
  1460. if (md->node[nid])
  1461. seq_printf(m, " N%d=%lu", nid, md->node[nid]);
  1462. seq_printf(m, " kernelpagesize_kB=%lu", vma_kernel_pagesize(vma) >> 10);
  1463. out:
  1464. seq_putc(m, '\n');
  1465. m_cache_vma(m, vma);
  1466. return 0;
  1467. }
  1468. static int show_pid_numa_map(struct seq_file *m, void *v)
  1469. {
  1470. return show_numa_map(m, v, 1);
  1471. }
  1472. static int show_tid_numa_map(struct seq_file *m, void *v)
  1473. {
  1474. return show_numa_map(m, v, 0);
  1475. }
  1476. static const struct seq_operations proc_pid_numa_maps_op = {
  1477. .start = m_start,
  1478. .next = m_next,
  1479. .stop = m_stop,
  1480. .show = show_pid_numa_map,
  1481. };
  1482. static const struct seq_operations proc_tid_numa_maps_op = {
  1483. .start = m_start,
  1484. .next = m_next,
  1485. .stop = m_stop,
  1486. .show = show_tid_numa_map,
  1487. };
  1488. static int numa_maps_open(struct inode *inode, struct file *file,
  1489. const struct seq_operations *ops)
  1490. {
  1491. return proc_maps_open(inode, file, ops,
  1492. sizeof(struct numa_maps_private));
  1493. }
  1494. static int pid_numa_maps_open(struct inode *inode, struct file *file)
  1495. {
  1496. return numa_maps_open(inode, file, &proc_pid_numa_maps_op);
  1497. }
  1498. static int tid_numa_maps_open(struct inode *inode, struct file *file)
  1499. {
  1500. return numa_maps_open(inode, file, &proc_tid_numa_maps_op);
  1501. }
  1502. const struct file_operations proc_pid_numa_maps_operations = {
  1503. .open = pid_numa_maps_open,
  1504. .read = seq_read,
  1505. .llseek = seq_lseek,
  1506. .release = proc_map_release,
  1507. };
  1508. const struct file_operations proc_tid_numa_maps_operations = {
  1509. .open = tid_numa_maps_open,
  1510. .read = seq_read,
  1511. .llseek = seq_lseek,
  1512. .release = proc_map_release,
  1513. };
  1514. #endif /* CONFIG_NUMA */