madvise.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854
  1. /*
  2. * linux/mm/madvise.c
  3. *
  4. * Copyright (C) 1999 Linus Torvalds
  5. * Copyright (C) 2002 Christoph Hellwig
  6. */
  7. #include <linux/mman.h>
  8. #include <linux/pagemap.h>
  9. #include <linux/syscalls.h>
  10. #include <linux/mempolicy.h>
  11. #include <linux/page-isolation.h>
  12. #include <linux/userfaultfd_k.h>
  13. #include <linux/hugetlb.h>
  14. #include <linux/falloc.h>
  15. #include <linux/sched.h>
  16. #include <linux/ksm.h>
  17. #include <linux/fs.h>
  18. #include <linux/file.h>
  19. #include <linux/blkdev.h>
  20. #include <linux/backing-dev.h>
  21. #include <linux/swap.h>
  22. #include <linux/swapops.h>
  23. #include <linux/shmem_fs.h>
  24. #include <linux/mmu_notifier.h>
  25. #include <asm/tlb.h>
  26. #include "internal.h"
  27. /*
  28. * Any behaviour which results in changes to the vma->vm_flags needs to
  29. * take mmap_sem for writing. Others, which simply traverse vmas, need
  30. * to only take it for reading.
  31. */
  32. static int madvise_need_mmap_write(int behavior)
  33. {
  34. switch (behavior) {
  35. case MADV_REMOVE:
  36. case MADV_WILLNEED:
  37. case MADV_DONTNEED:
  38. case MADV_FREE:
  39. return 0;
  40. default:
  41. /* be safe, default to 1. list exceptions explicitly */
  42. return 1;
  43. }
  44. }
  45. /*
  46. * We can potentially split a vm area into separate
  47. * areas, each area with its own behavior.
  48. */
  49. static long madvise_behavior(struct vm_area_struct *vma,
  50. struct vm_area_struct **prev,
  51. unsigned long start, unsigned long end, int behavior)
  52. {
  53. struct mm_struct *mm = vma->vm_mm;
  54. int error = 0;
  55. pgoff_t pgoff;
  56. unsigned long new_flags = vma->vm_flags;
  57. switch (behavior) {
  58. case MADV_NORMAL:
  59. new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ;
  60. break;
  61. case MADV_SEQUENTIAL:
  62. new_flags = (new_flags & ~VM_RAND_READ) | VM_SEQ_READ;
  63. break;
  64. case MADV_RANDOM:
  65. new_flags = (new_flags & ~VM_SEQ_READ) | VM_RAND_READ;
  66. break;
  67. case MADV_DONTFORK:
  68. new_flags |= VM_DONTCOPY;
  69. break;
  70. case MADV_DOFORK:
  71. if (vma->vm_flags & VM_IO) {
  72. error = -EINVAL;
  73. goto out;
  74. }
  75. new_flags &= ~VM_DONTCOPY;
  76. break;
  77. case MADV_DONTDUMP:
  78. new_flags |= VM_DONTDUMP;
  79. break;
  80. case MADV_DODUMP:
  81. if (new_flags & VM_SPECIAL) {
  82. error = -EINVAL;
  83. goto out;
  84. }
  85. new_flags &= ~VM_DONTDUMP;
  86. break;
  87. case MADV_MERGEABLE:
  88. case MADV_UNMERGEABLE:
  89. error = ksm_madvise(vma, start, end, behavior, &new_flags);
  90. if (error) {
  91. /*
  92. * madvise() returns EAGAIN if kernel resources, such as
  93. * slab, are temporarily unavailable.
  94. */
  95. if (error == -ENOMEM)
  96. error = -EAGAIN;
  97. goto out;
  98. }
  99. break;
  100. case MADV_HUGEPAGE:
  101. case MADV_NOHUGEPAGE:
  102. error = hugepage_madvise(vma, &new_flags, behavior);
  103. if (error) {
  104. /*
  105. * madvise() returns EAGAIN if kernel resources, such as
  106. * slab, are temporarily unavailable.
  107. */
  108. if (error == -ENOMEM)
  109. error = -EAGAIN;
  110. goto out;
  111. }
  112. break;
  113. }
  114. if (new_flags == vma->vm_flags) {
  115. *prev = vma;
  116. goto out;
  117. }
  118. pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
  119. *prev = vma_merge(mm, *prev, start, end, new_flags, vma->anon_vma,
  120. vma->vm_file, pgoff, vma_policy(vma),
  121. vma->vm_userfaultfd_ctx);
  122. if (*prev) {
  123. vma = *prev;
  124. goto success;
  125. }
  126. *prev = vma;
  127. if (start != vma->vm_start) {
  128. if (unlikely(mm->map_count >= sysctl_max_map_count)) {
  129. error = -ENOMEM;
  130. goto out;
  131. }
  132. error = __split_vma(mm, vma, start, 1);
  133. if (error) {
  134. /*
  135. * madvise() returns EAGAIN if kernel resources, such as
  136. * slab, are temporarily unavailable.
  137. */
  138. if (error == -ENOMEM)
  139. error = -EAGAIN;
  140. goto out;
  141. }
  142. }
  143. if (end != vma->vm_end) {
  144. if (unlikely(mm->map_count >= sysctl_max_map_count)) {
  145. error = -ENOMEM;
  146. goto out;
  147. }
  148. error = __split_vma(mm, vma, end, 0);
  149. if (error) {
  150. /*
  151. * madvise() returns EAGAIN if kernel resources, such as
  152. * slab, are temporarily unavailable.
  153. */
  154. if (error == -ENOMEM)
  155. error = -EAGAIN;
  156. goto out;
  157. }
  158. }
  159. success:
  160. /*
  161. * vm_flags is protected by the mmap_sem held in write mode.
  162. */
  163. vma->vm_flags = new_flags;
  164. out:
  165. return error;
  166. }
  167. #ifdef CONFIG_SWAP
  168. static int swapin_walk_pmd_entry(pmd_t *pmd, unsigned long start,
  169. unsigned long end, struct mm_walk *walk)
  170. {
  171. pte_t *orig_pte;
  172. struct vm_area_struct *vma = walk->private;
  173. unsigned long index;
  174. if (pmd_none_or_trans_huge_or_clear_bad(pmd))
  175. return 0;
  176. for (index = start; index != end; index += PAGE_SIZE) {
  177. pte_t pte;
  178. swp_entry_t entry;
  179. struct page *page;
  180. spinlock_t *ptl;
  181. orig_pte = pte_offset_map_lock(vma->vm_mm, pmd, start, &ptl);
  182. pte = *(orig_pte + ((index - start) / PAGE_SIZE));
  183. pte_unmap_unlock(orig_pte, ptl);
  184. if (pte_present(pte) || pte_none(pte))
  185. continue;
  186. entry = pte_to_swp_entry(pte);
  187. if (unlikely(non_swap_entry(entry)))
  188. continue;
  189. page = read_swap_cache_async(entry, GFP_HIGHUSER_MOVABLE,
  190. vma, index, false);
  191. if (page)
  192. put_page(page);
  193. }
  194. return 0;
  195. }
  196. static void force_swapin_readahead(struct vm_area_struct *vma,
  197. unsigned long start, unsigned long end)
  198. {
  199. struct mm_walk walk = {
  200. .mm = vma->vm_mm,
  201. .pmd_entry = swapin_walk_pmd_entry,
  202. .private = vma,
  203. };
  204. walk_page_range(start, end, &walk);
  205. lru_add_drain(); /* Push any new pages onto the LRU now */
  206. }
  207. static void force_shm_swapin_readahead(struct vm_area_struct *vma,
  208. unsigned long start, unsigned long end,
  209. struct address_space *mapping)
  210. {
  211. pgoff_t index;
  212. struct page *page;
  213. swp_entry_t swap;
  214. for (; start < end; start += PAGE_SIZE) {
  215. index = ((start - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
  216. page = find_get_entry(mapping, index);
  217. if (!radix_tree_exceptional_entry(page)) {
  218. if (page)
  219. put_page(page);
  220. continue;
  221. }
  222. swap = radix_to_swp_entry(page);
  223. page = read_swap_cache_async(swap, GFP_HIGHUSER_MOVABLE,
  224. NULL, 0, false);
  225. if (page)
  226. put_page(page);
  227. }
  228. lru_add_drain(); /* Push any new pages onto the LRU now */
  229. }
  230. #endif /* CONFIG_SWAP */
  231. /*
  232. * Schedule all required I/O operations. Do not wait for completion.
  233. */
  234. static long madvise_willneed(struct vm_area_struct *vma,
  235. struct vm_area_struct **prev,
  236. unsigned long start, unsigned long end)
  237. {
  238. struct file *file = vma->vm_file;
  239. #ifdef CONFIG_SWAP
  240. if (!file) {
  241. *prev = vma;
  242. force_swapin_readahead(vma, start, end);
  243. return 0;
  244. }
  245. if (shmem_mapping(file->f_mapping)) {
  246. *prev = vma;
  247. force_shm_swapin_readahead(vma, start, end,
  248. file->f_mapping);
  249. return 0;
  250. }
  251. #else
  252. if (!file)
  253. return -EBADF;
  254. #endif
  255. if (IS_DAX(file_inode(file))) {
  256. /* no bad return value, but ignore advice */
  257. return 0;
  258. }
  259. *prev = vma;
  260. start = ((start - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
  261. if (end > vma->vm_end)
  262. end = vma->vm_end;
  263. end = ((end - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
  264. force_page_cache_readahead(file->f_mapping, file, start, end - start);
  265. return 0;
  266. }
  267. static int madvise_free_pte_range(pmd_t *pmd, unsigned long addr,
  268. unsigned long end, struct mm_walk *walk)
  269. {
  270. struct mmu_gather *tlb = walk->private;
  271. struct mm_struct *mm = tlb->mm;
  272. struct vm_area_struct *vma = walk->vma;
  273. spinlock_t *ptl;
  274. pte_t *orig_pte, *pte, ptent;
  275. struct page *page;
  276. int nr_swap = 0;
  277. unsigned long next;
  278. next = pmd_addr_end(addr, end);
  279. if (pmd_trans_huge(*pmd))
  280. if (madvise_free_huge_pmd(tlb, vma, pmd, addr, next))
  281. goto next;
  282. if (pmd_trans_unstable(pmd))
  283. return 0;
  284. tlb_remove_check_page_size_change(tlb, PAGE_SIZE);
  285. orig_pte = pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
  286. flush_tlb_batched_pending(mm);
  287. arch_enter_lazy_mmu_mode();
  288. for (; addr != end; pte++, addr += PAGE_SIZE) {
  289. ptent = *pte;
  290. if (pte_none(ptent))
  291. continue;
  292. /*
  293. * If the pte has swp_entry, just clear page table to
  294. * prevent swap-in which is more expensive rather than
  295. * (page allocation + zeroing).
  296. */
  297. if (!pte_present(ptent)) {
  298. swp_entry_t entry;
  299. entry = pte_to_swp_entry(ptent);
  300. if (non_swap_entry(entry))
  301. continue;
  302. nr_swap--;
  303. free_swap_and_cache(entry);
  304. pte_clear_not_present_full(mm, addr, pte, tlb->fullmm);
  305. continue;
  306. }
  307. page = vm_normal_page(vma, addr, ptent);
  308. if (!page)
  309. continue;
  310. /*
  311. * If pmd isn't transhuge but the page is THP and
  312. * is owned by only this process, split it and
  313. * deactivate all pages.
  314. */
  315. if (PageTransCompound(page)) {
  316. if (page_mapcount(page) != 1)
  317. goto out;
  318. get_page(page);
  319. if (!trylock_page(page)) {
  320. put_page(page);
  321. goto out;
  322. }
  323. pte_unmap_unlock(orig_pte, ptl);
  324. if (split_huge_page(page)) {
  325. unlock_page(page);
  326. put_page(page);
  327. pte_offset_map_lock(mm, pmd, addr, &ptl);
  328. goto out;
  329. }
  330. put_page(page);
  331. unlock_page(page);
  332. pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
  333. pte--;
  334. addr -= PAGE_SIZE;
  335. continue;
  336. }
  337. VM_BUG_ON_PAGE(PageTransCompound(page), page);
  338. if (PageSwapCache(page) || PageDirty(page)) {
  339. if (!trylock_page(page))
  340. continue;
  341. /*
  342. * If page is shared with others, we couldn't clear
  343. * PG_dirty of the page.
  344. */
  345. if (page_mapcount(page) != 1) {
  346. unlock_page(page);
  347. continue;
  348. }
  349. if (PageSwapCache(page) && !try_to_free_swap(page)) {
  350. unlock_page(page);
  351. continue;
  352. }
  353. ClearPageDirty(page);
  354. unlock_page(page);
  355. }
  356. if (pte_young(ptent) || pte_dirty(ptent)) {
  357. /*
  358. * Some of architecture(ex, PPC) don't update TLB
  359. * with set_pte_at and tlb_remove_tlb_entry so for
  360. * the portability, remap the pte with old|clean
  361. * after pte clearing.
  362. */
  363. ptent = ptep_get_and_clear_full(mm, addr, pte,
  364. tlb->fullmm);
  365. ptent = pte_mkold(ptent);
  366. ptent = pte_mkclean(ptent);
  367. set_pte_at(mm, addr, pte, ptent);
  368. tlb_remove_tlb_entry(tlb, pte, addr);
  369. }
  370. mark_page_lazyfree(page);
  371. }
  372. out:
  373. if (nr_swap) {
  374. if (current->mm == mm)
  375. sync_mm_rss(mm);
  376. add_mm_counter(mm, MM_SWAPENTS, nr_swap);
  377. }
  378. arch_leave_lazy_mmu_mode();
  379. pte_unmap_unlock(orig_pte, ptl);
  380. cond_resched();
  381. next:
  382. return 0;
  383. }
  384. static void madvise_free_page_range(struct mmu_gather *tlb,
  385. struct vm_area_struct *vma,
  386. unsigned long addr, unsigned long end)
  387. {
  388. struct mm_walk free_walk = {
  389. .pmd_entry = madvise_free_pte_range,
  390. .mm = vma->vm_mm,
  391. .private = tlb,
  392. };
  393. tlb_start_vma(tlb, vma);
  394. walk_page_range(addr, end, &free_walk);
  395. tlb_end_vma(tlb, vma);
  396. }
  397. static int madvise_free_single_vma(struct vm_area_struct *vma,
  398. unsigned long start_addr, unsigned long end_addr)
  399. {
  400. unsigned long start, end;
  401. struct mm_struct *mm = vma->vm_mm;
  402. struct mmu_gather tlb;
  403. /* MADV_FREE works for only anon vma at the moment */
  404. if (!vma_is_anonymous(vma))
  405. return -EINVAL;
  406. start = max(vma->vm_start, start_addr);
  407. if (start >= vma->vm_end)
  408. return -EINVAL;
  409. end = min(vma->vm_end, end_addr);
  410. if (end <= vma->vm_start)
  411. return -EINVAL;
  412. lru_add_drain();
  413. tlb_gather_mmu(&tlb, mm, start, end);
  414. update_hiwater_rss(mm);
  415. mmu_notifier_invalidate_range_start(mm, start, end);
  416. madvise_free_page_range(&tlb, vma, start, end);
  417. mmu_notifier_invalidate_range_end(mm, start, end);
  418. tlb_finish_mmu(&tlb, start, end);
  419. return 0;
  420. }
  421. /*
  422. * Application no longer needs these pages. If the pages are dirty,
  423. * it's OK to just throw them away. The app will be more careful about
  424. * data it wants to keep. Be sure to free swap resources too. The
  425. * zap_page_range call sets things up for shrink_active_list to actually free
  426. * these pages later if no one else has touched them in the meantime,
  427. * although we could add these pages to a global reuse list for
  428. * shrink_active_list to pick up before reclaiming other pages.
  429. *
  430. * NB: This interface discards data rather than pushes it out to swap,
  431. * as some implementations do. This has performance implications for
  432. * applications like large transactional databases which want to discard
  433. * pages in anonymous maps after committing to backing store the data
  434. * that was kept in them. There is no reason to write this data out to
  435. * the swap area if the application is discarding it.
  436. *
  437. * An interface that causes the system to free clean pages and flush
  438. * dirty pages is already available as msync(MS_INVALIDATE).
  439. */
  440. static long madvise_dontneed_single_vma(struct vm_area_struct *vma,
  441. unsigned long start, unsigned long end)
  442. {
  443. zap_page_range(vma, start, end - start);
  444. return 0;
  445. }
  446. static long madvise_dontneed_free(struct vm_area_struct *vma,
  447. struct vm_area_struct **prev,
  448. unsigned long start, unsigned long end,
  449. int behavior)
  450. {
  451. *prev = vma;
  452. if (!can_madv_dontneed_vma(vma))
  453. return -EINVAL;
  454. if (!userfaultfd_remove(vma, start, end)) {
  455. *prev = NULL; /* mmap_sem has been dropped, prev is stale */
  456. down_read(&current->mm->mmap_sem);
  457. vma = find_vma(current->mm, start);
  458. if (!vma)
  459. return -ENOMEM;
  460. if (start < vma->vm_start) {
  461. /*
  462. * This "vma" under revalidation is the one
  463. * with the lowest vma->vm_start where start
  464. * is also < vma->vm_end. If start <
  465. * vma->vm_start it means an hole materialized
  466. * in the user address space within the
  467. * virtual range passed to MADV_DONTNEED
  468. * or MADV_FREE.
  469. */
  470. return -ENOMEM;
  471. }
  472. if (!can_madv_dontneed_vma(vma))
  473. return -EINVAL;
  474. if (end > vma->vm_end) {
  475. /*
  476. * Don't fail if end > vma->vm_end. If the old
  477. * vma was splitted while the mmap_sem was
  478. * released the effect of the concurrent
  479. * operation may not cause madvise() to
  480. * have an undefined result. There may be an
  481. * adjacent next vma that we'll walk
  482. * next. userfaultfd_remove() will generate an
  483. * UFFD_EVENT_REMOVE repetition on the
  484. * end-vma->vm_end range, but the manager can
  485. * handle a repetition fine.
  486. */
  487. end = vma->vm_end;
  488. }
  489. VM_WARN_ON(start >= end);
  490. }
  491. if (behavior == MADV_DONTNEED)
  492. return madvise_dontneed_single_vma(vma, start, end);
  493. else if (behavior == MADV_FREE)
  494. return madvise_free_single_vma(vma, start, end);
  495. else
  496. return -EINVAL;
  497. }
  498. /*
  499. * Application wants to free up the pages and associated backing store.
  500. * This is effectively punching a hole into the middle of a file.
  501. */
  502. static long madvise_remove(struct vm_area_struct *vma,
  503. struct vm_area_struct **prev,
  504. unsigned long start, unsigned long end)
  505. {
  506. loff_t offset;
  507. int error;
  508. struct file *f;
  509. *prev = NULL; /* tell sys_madvise we drop mmap_sem */
  510. if (vma->vm_flags & VM_LOCKED)
  511. return -EINVAL;
  512. f = vma->vm_file;
  513. if (!f || !f->f_mapping || !f->f_mapping->host) {
  514. return -EINVAL;
  515. }
  516. if ((vma->vm_flags & (VM_SHARED|VM_WRITE)) != (VM_SHARED|VM_WRITE))
  517. return -EACCES;
  518. offset = (loff_t)(start - vma->vm_start)
  519. + ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
  520. /*
  521. * Filesystem's fallocate may need to take i_mutex. We need to
  522. * explicitly grab a reference because the vma (and hence the
  523. * vma's reference to the file) can go away as soon as we drop
  524. * mmap_sem.
  525. */
  526. get_file(f);
  527. if (userfaultfd_remove(vma, start, end)) {
  528. /* mmap_sem was not released by userfaultfd_remove() */
  529. up_read(&current->mm->mmap_sem);
  530. }
  531. error = vfs_fallocate(f,
  532. FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
  533. offset, end - start);
  534. fput(f);
  535. down_read(&current->mm->mmap_sem);
  536. return error;
  537. }
  538. #ifdef CONFIG_MEMORY_FAILURE
  539. /*
  540. * Error injection support for memory error handling.
  541. */
  542. static int madvise_inject_error(int behavior,
  543. unsigned long start, unsigned long end)
  544. {
  545. struct page *page;
  546. if (!capable(CAP_SYS_ADMIN))
  547. return -EPERM;
  548. for (; start < end; start += PAGE_SIZE <<
  549. compound_order(compound_head(page))) {
  550. int ret;
  551. ret = get_user_pages_fast(start, 1, 0, &page);
  552. if (ret != 1)
  553. return ret;
  554. if (PageHWPoison(page)) {
  555. put_page(page);
  556. continue;
  557. }
  558. if (behavior == MADV_SOFT_OFFLINE) {
  559. pr_info("Soft offlining pfn %#lx at process virtual address %#lx\n",
  560. page_to_pfn(page), start);
  561. ret = soft_offline_page(page, MF_COUNT_INCREASED);
  562. if (ret)
  563. return ret;
  564. continue;
  565. }
  566. pr_info("Injecting memory failure for pfn %#lx at process virtual address %#lx\n",
  567. page_to_pfn(page), start);
  568. ret = memory_failure(page_to_pfn(page), 0, MF_COUNT_INCREASED);
  569. if (ret)
  570. return ret;
  571. }
  572. return 0;
  573. }
  574. #endif
  575. static long
  576. madvise_vma(struct vm_area_struct *vma, struct vm_area_struct **prev,
  577. unsigned long start, unsigned long end, int behavior)
  578. {
  579. switch (behavior) {
  580. case MADV_REMOVE:
  581. return madvise_remove(vma, prev, start, end);
  582. case MADV_WILLNEED:
  583. return madvise_willneed(vma, prev, start, end);
  584. case MADV_FREE:
  585. case MADV_DONTNEED:
  586. return madvise_dontneed_free(vma, prev, start, end, behavior);
  587. default:
  588. return madvise_behavior(vma, prev, start, end, behavior);
  589. }
  590. }
  591. static bool
  592. madvise_behavior_valid(int behavior)
  593. {
  594. switch (behavior) {
  595. case MADV_DOFORK:
  596. case MADV_DONTFORK:
  597. case MADV_NORMAL:
  598. case MADV_SEQUENTIAL:
  599. case MADV_RANDOM:
  600. case MADV_REMOVE:
  601. case MADV_WILLNEED:
  602. case MADV_DONTNEED:
  603. case MADV_FREE:
  604. #ifdef CONFIG_KSM
  605. case MADV_MERGEABLE:
  606. case MADV_UNMERGEABLE:
  607. #endif
  608. #ifdef CONFIG_TRANSPARENT_HUGEPAGE
  609. case MADV_HUGEPAGE:
  610. case MADV_NOHUGEPAGE:
  611. #endif
  612. case MADV_DONTDUMP:
  613. case MADV_DODUMP:
  614. #ifdef CONFIG_MEMORY_FAILURE
  615. case MADV_SOFT_OFFLINE:
  616. case MADV_HWPOISON:
  617. #endif
  618. return true;
  619. default:
  620. return false;
  621. }
  622. }
  623. /*
  624. * The madvise(2) system call.
  625. *
  626. * Applications can use madvise() to advise the kernel how it should
  627. * handle paging I/O in this VM area. The idea is to help the kernel
  628. * use appropriate read-ahead and caching techniques. The information
  629. * provided is advisory only, and can be safely disregarded by the
  630. * kernel without affecting the correct operation of the application.
  631. *
  632. * behavior values:
  633. * MADV_NORMAL - the default behavior is to read clusters. This
  634. * results in some read-ahead and read-behind.
  635. * MADV_RANDOM - the system should read the minimum amount of data
  636. * on any access, since it is unlikely that the appli-
  637. * cation will need more than what it asks for.
  638. * MADV_SEQUENTIAL - pages in the given range will probably be accessed
  639. * once, so they can be aggressively read ahead, and
  640. * can be freed soon after they are accessed.
  641. * MADV_WILLNEED - the application is notifying the system to read
  642. * some pages ahead.
  643. * MADV_DONTNEED - the application is finished with the given range,
  644. * so the kernel can free resources associated with it.
  645. * MADV_FREE - the application marks pages in the given range as lazy free,
  646. * where actual purges are postponed until memory pressure happens.
  647. * MADV_REMOVE - the application wants to free up the given range of
  648. * pages and associated backing store.
  649. * MADV_DONTFORK - omit this area from child's address space when forking:
  650. * typically, to avoid COWing pages pinned by get_user_pages().
  651. * MADV_DOFORK - cancel MADV_DONTFORK: no longer omit this area when forking.
  652. * MADV_HWPOISON - trigger memory error handler as if the given memory range
  653. * were corrupted by unrecoverable hardware memory failure.
  654. * MADV_SOFT_OFFLINE - try to soft-offline the given range of memory.
  655. * MADV_MERGEABLE - the application recommends that KSM try to merge pages in
  656. * this area with pages of identical content from other such areas.
  657. * MADV_UNMERGEABLE- cancel MADV_MERGEABLE: no longer merge pages with others.
  658. * MADV_HUGEPAGE - the application wants to back the given range by transparent
  659. * huge pages in the future. Existing pages might be coalesced and
  660. * new pages might be allocated as THP.
  661. * MADV_NOHUGEPAGE - mark the given range as not worth being backed by
  662. * transparent huge pages so the existing pages will not be
  663. * coalesced into THP and new pages will not be allocated as THP.
  664. * MADV_DONTDUMP - the application wants to prevent pages in the given range
  665. * from being included in its core dump.
  666. * MADV_DODUMP - cancel MADV_DONTDUMP: no longer exclude from core dump.
  667. *
  668. * return values:
  669. * zero - success
  670. * -EINVAL - start + len < 0, start is not page-aligned,
  671. * "behavior" is not a valid value, or application
  672. * is attempting to release locked or shared pages.
  673. * -ENOMEM - addresses in the specified range are not currently
  674. * mapped, or are outside the AS of the process.
  675. * -EIO - an I/O error occurred while paging in data.
  676. * -EBADF - map exists, but area maps something that isn't a file.
  677. * -EAGAIN - a kernel resource was temporarily unavailable.
  678. */
  679. SYSCALL_DEFINE3(madvise, unsigned long, start, size_t, len_in, int, behavior)
  680. {
  681. unsigned long end, tmp;
  682. struct vm_area_struct *vma, *prev;
  683. int unmapped_error = 0;
  684. int error = -EINVAL;
  685. int write;
  686. size_t len;
  687. struct blk_plug plug;
  688. if (!madvise_behavior_valid(behavior))
  689. return error;
  690. if (start & ~PAGE_MASK)
  691. return error;
  692. len = (len_in + ~PAGE_MASK) & PAGE_MASK;
  693. /* Check to see whether len was rounded up from small -ve to zero */
  694. if (len_in && !len)
  695. return error;
  696. end = start + len;
  697. if (end < start)
  698. return error;
  699. error = 0;
  700. if (end == start)
  701. return error;
  702. #ifdef CONFIG_MEMORY_FAILURE
  703. if (behavior == MADV_HWPOISON || behavior == MADV_SOFT_OFFLINE)
  704. return madvise_inject_error(behavior, start, start + len_in);
  705. #endif
  706. write = madvise_need_mmap_write(behavior);
  707. if (write) {
  708. if (down_write_killable(&current->mm->mmap_sem))
  709. return -EINTR;
  710. } else {
  711. down_read(&current->mm->mmap_sem);
  712. }
  713. /*
  714. * If the interval [start,end) covers some unmapped address
  715. * ranges, just ignore them, but return -ENOMEM at the end.
  716. * - different from the way of handling in mlock etc.
  717. */
  718. vma = find_vma_prev(current->mm, start, &prev);
  719. if (vma && start > vma->vm_start)
  720. prev = vma;
  721. blk_start_plug(&plug);
  722. for (;;) {
  723. /* Still start < end. */
  724. error = -ENOMEM;
  725. if (!vma)
  726. goto out;
  727. /* Here start < (end|vma->vm_end). */
  728. if (start < vma->vm_start) {
  729. unmapped_error = -ENOMEM;
  730. start = vma->vm_start;
  731. if (start >= end)
  732. goto out;
  733. }
  734. /* Here vma->vm_start <= start < (end|vma->vm_end) */
  735. tmp = vma->vm_end;
  736. if (end < tmp)
  737. tmp = end;
  738. /* Here vma->vm_start <= start < tmp <= (end|vma->vm_end). */
  739. error = madvise_vma(vma, &prev, start, tmp, behavior);
  740. if (error)
  741. goto out;
  742. start = tmp;
  743. if (prev && start < prev->vm_end)
  744. start = prev->vm_end;
  745. error = unmapped_error;
  746. if (start >= end)
  747. goto out;
  748. if (prev)
  749. vma = prev->vm_next;
  750. else /* madvise_remove dropped mmap_sem */
  751. vma = find_vma(current->mm, start);
  752. }
  753. out:
  754. blk_finish_plug(&plug);
  755. if (write)
  756. up_write(&current->mm->mmap_sem);
  757. else
  758. up_read(&current->mm->mmap_sem);
  759. return error;
  760. }