madvise.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551
  1. /*
  2. * linux/mm/madvise.c
  3. *
  4. * Copyright (C) 1999 Linus Torvalds
  5. * Copyright (C) 2002 Christoph Hellwig
  6. */
  7. #include <linux/mman.h>
  8. #include <linux/pagemap.h>
  9. #include <linux/syscalls.h>
  10. #include <linux/mempolicy.h>
  11. #include <linux/page-isolation.h>
  12. #include <linux/hugetlb.h>
  13. #include <linux/falloc.h>
  14. #include <linux/sched.h>
  15. #include <linux/ksm.h>
  16. #include <linux/fs.h>
  17. #include <linux/file.h>
  18. #include <linux/blkdev.h>
  19. #include <linux/backing-dev.h>
  20. #include <linux/swap.h>
  21. #include <linux/swapops.h>
  22. /*
  23. * Any behaviour which results in changes to the vma->vm_flags needs to
  24. * take mmap_sem for writing. Others, which simply traverse vmas, need
  25. * to only take it for reading.
  26. */
  27. static int madvise_need_mmap_write(int behavior)
  28. {
  29. switch (behavior) {
  30. case MADV_REMOVE:
  31. case MADV_WILLNEED:
  32. case MADV_DONTNEED:
  33. return 0;
  34. default:
  35. /* be safe, default to 1. list exceptions explicitly */
  36. return 1;
  37. }
  38. }
  39. /*
  40. * We can potentially split a vm area into separate
  41. * areas, each area with its own behavior.
  42. */
  43. static long madvise_behavior(struct vm_area_struct *vma,
  44. struct vm_area_struct **prev,
  45. unsigned long start, unsigned long end, int behavior)
  46. {
  47. struct mm_struct *mm = vma->vm_mm;
  48. int error = 0;
  49. pgoff_t pgoff;
  50. unsigned long new_flags = vma->vm_flags;
  51. switch (behavior) {
  52. case MADV_NORMAL:
  53. new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ;
  54. break;
  55. case MADV_SEQUENTIAL:
  56. new_flags = (new_flags & ~VM_RAND_READ) | VM_SEQ_READ;
  57. break;
  58. case MADV_RANDOM:
  59. new_flags = (new_flags & ~VM_SEQ_READ) | VM_RAND_READ;
  60. break;
  61. case MADV_DONTFORK:
  62. new_flags |= VM_DONTCOPY;
  63. break;
  64. case MADV_DOFORK:
  65. if (vma->vm_flags & VM_IO) {
  66. error = -EINVAL;
  67. goto out;
  68. }
  69. new_flags &= ~VM_DONTCOPY;
  70. break;
  71. case MADV_DONTDUMP:
  72. new_flags |= VM_DONTDUMP;
  73. break;
  74. case MADV_DODUMP:
  75. if (new_flags & VM_SPECIAL) {
  76. error = -EINVAL;
  77. goto out;
  78. }
  79. new_flags &= ~VM_DONTDUMP;
  80. break;
  81. case MADV_MERGEABLE:
  82. case MADV_UNMERGEABLE:
  83. error = ksm_madvise(vma, start, end, behavior, &new_flags);
  84. if (error)
  85. goto out;
  86. break;
  87. case MADV_HUGEPAGE:
  88. case MADV_NOHUGEPAGE:
  89. error = hugepage_madvise(vma, &new_flags, behavior);
  90. if (error)
  91. goto out;
  92. break;
  93. }
  94. if (new_flags == vma->vm_flags) {
  95. *prev = vma;
  96. goto out;
  97. }
  98. pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
  99. *prev = vma_merge(mm, *prev, start, end, new_flags, vma->anon_vma,
  100. vma->vm_file, pgoff, vma_policy(vma),
  101. vma->vm_userfaultfd_ctx);
  102. if (*prev) {
  103. vma = *prev;
  104. goto success;
  105. }
  106. *prev = vma;
  107. if (start != vma->vm_start) {
  108. error = split_vma(mm, vma, start, 1);
  109. if (error)
  110. goto out;
  111. }
  112. if (end != vma->vm_end) {
  113. error = split_vma(mm, vma, end, 0);
  114. if (error)
  115. goto out;
  116. }
  117. success:
  118. /*
  119. * vm_flags is protected by the mmap_sem held in write mode.
  120. */
  121. vma->vm_flags = new_flags;
  122. out:
  123. if (error == -ENOMEM)
  124. error = -EAGAIN;
  125. return error;
  126. }
  127. #ifdef CONFIG_SWAP
  128. static int swapin_walk_pmd_entry(pmd_t *pmd, unsigned long start,
  129. unsigned long end, struct mm_walk *walk)
  130. {
  131. pte_t *orig_pte;
  132. struct vm_area_struct *vma = walk->private;
  133. unsigned long index;
  134. if (pmd_none_or_trans_huge_or_clear_bad(pmd))
  135. return 0;
  136. for (index = start; index != end; index += PAGE_SIZE) {
  137. pte_t pte;
  138. swp_entry_t entry;
  139. struct page *page;
  140. spinlock_t *ptl;
  141. orig_pte = pte_offset_map_lock(vma->vm_mm, pmd, start, &ptl);
  142. pte = *(orig_pte + ((index - start) / PAGE_SIZE));
  143. pte_unmap_unlock(orig_pte, ptl);
  144. if (pte_present(pte) || pte_none(pte))
  145. continue;
  146. entry = pte_to_swp_entry(pte);
  147. if (unlikely(non_swap_entry(entry)))
  148. continue;
  149. page = read_swap_cache_async(entry, GFP_HIGHUSER_MOVABLE,
  150. vma, index);
  151. if (page)
  152. page_cache_release(page);
  153. }
  154. return 0;
  155. }
  156. static void force_swapin_readahead(struct vm_area_struct *vma,
  157. unsigned long start, unsigned long end)
  158. {
  159. struct mm_walk walk = {
  160. .mm = vma->vm_mm,
  161. .pmd_entry = swapin_walk_pmd_entry,
  162. .private = vma,
  163. };
  164. walk_page_range(start, end, &walk);
  165. lru_add_drain(); /* Push any new pages onto the LRU now */
  166. }
  167. static void force_shm_swapin_readahead(struct vm_area_struct *vma,
  168. unsigned long start, unsigned long end,
  169. struct address_space *mapping)
  170. {
  171. pgoff_t index;
  172. struct page *page;
  173. swp_entry_t swap;
  174. for (; start < end; start += PAGE_SIZE) {
  175. index = ((start - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
  176. page = find_get_entry(mapping, index);
  177. if (!radix_tree_exceptional_entry(page)) {
  178. if (page)
  179. page_cache_release(page);
  180. continue;
  181. }
  182. swap = radix_to_swp_entry(page);
  183. page = read_swap_cache_async(swap, GFP_HIGHUSER_MOVABLE,
  184. NULL, 0);
  185. if (page)
  186. page_cache_release(page);
  187. }
  188. lru_add_drain(); /* Push any new pages onto the LRU now */
  189. }
  190. #endif /* CONFIG_SWAP */
  191. /*
  192. * Schedule all required I/O operations. Do not wait for completion.
  193. */
  194. static long madvise_willneed(struct vm_area_struct *vma,
  195. struct vm_area_struct **prev,
  196. unsigned long start, unsigned long end)
  197. {
  198. struct file *file = vma->vm_file;
  199. #ifdef CONFIG_SWAP
  200. if (!file) {
  201. *prev = vma;
  202. force_swapin_readahead(vma, start, end);
  203. return 0;
  204. }
  205. if (shmem_mapping(file->f_mapping)) {
  206. *prev = vma;
  207. force_shm_swapin_readahead(vma, start, end,
  208. file->f_mapping);
  209. return 0;
  210. }
  211. #else
  212. if (!file)
  213. return -EBADF;
  214. #endif
  215. if (IS_DAX(file_inode(file))) {
  216. /* no bad return value, but ignore advice */
  217. return 0;
  218. }
  219. *prev = vma;
  220. start = ((start - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
  221. if (end > vma->vm_end)
  222. end = vma->vm_end;
  223. end = ((end - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
  224. force_page_cache_readahead(file->f_mapping, file, start, end - start);
  225. return 0;
  226. }
  227. /*
  228. * Application no longer needs these pages. If the pages are dirty,
  229. * it's OK to just throw them away. The app will be more careful about
  230. * data it wants to keep. Be sure to free swap resources too. The
  231. * zap_page_range call sets things up for shrink_active_list to actually free
  232. * these pages later if no one else has touched them in the meantime,
  233. * although we could add these pages to a global reuse list for
  234. * shrink_active_list to pick up before reclaiming other pages.
  235. *
  236. * NB: This interface discards data rather than pushes it out to swap,
  237. * as some implementations do. This has performance implications for
  238. * applications like large transactional databases which want to discard
  239. * pages in anonymous maps after committing to backing store the data
  240. * that was kept in them. There is no reason to write this data out to
  241. * the swap area if the application is discarding it.
  242. *
  243. * An interface that causes the system to free clean pages and flush
  244. * dirty pages is already available as msync(MS_INVALIDATE).
  245. */
  246. static long madvise_dontneed(struct vm_area_struct *vma,
  247. struct vm_area_struct **prev,
  248. unsigned long start, unsigned long end)
  249. {
  250. *prev = vma;
  251. if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP))
  252. return -EINVAL;
  253. zap_page_range(vma, start, end - start, NULL);
  254. return 0;
  255. }
  256. /*
  257. * Application wants to free up the pages and associated backing store.
  258. * This is effectively punching a hole into the middle of a file.
  259. */
  260. static long madvise_remove(struct vm_area_struct *vma,
  261. struct vm_area_struct **prev,
  262. unsigned long start, unsigned long end)
  263. {
  264. loff_t offset;
  265. int error;
  266. struct file *f;
  267. *prev = NULL; /* tell sys_madvise we drop mmap_sem */
  268. if (vma->vm_flags & VM_LOCKED)
  269. return -EINVAL;
  270. f = vma->vm_file;
  271. if (!f || !f->f_mapping || !f->f_mapping->host) {
  272. return -EINVAL;
  273. }
  274. if ((vma->vm_flags & (VM_SHARED|VM_WRITE)) != (VM_SHARED|VM_WRITE))
  275. return -EACCES;
  276. offset = (loff_t)(start - vma->vm_start)
  277. + ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
  278. /*
  279. * Filesystem's fallocate may need to take i_mutex. We need to
  280. * explicitly grab a reference because the vma (and hence the
  281. * vma's reference to the file) can go away as soon as we drop
  282. * mmap_sem.
  283. */
  284. get_file(f);
  285. up_read(&current->mm->mmap_sem);
  286. error = vfs_fallocate(f,
  287. FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
  288. offset, end - start);
  289. fput(f);
  290. down_read(&current->mm->mmap_sem);
  291. return error;
  292. }
  293. #ifdef CONFIG_MEMORY_FAILURE
  294. /*
  295. * Error injection support for memory error handling.
  296. */
  297. static int madvise_hwpoison(int bhv, unsigned long start, unsigned long end)
  298. {
  299. struct page *p;
  300. if (!capable(CAP_SYS_ADMIN))
  301. return -EPERM;
  302. for (; start < end; start += PAGE_SIZE <<
  303. compound_order(compound_head(p))) {
  304. int ret;
  305. ret = get_user_pages_fast(start, 1, 0, &p);
  306. if (ret != 1)
  307. return ret;
  308. if (PageHWPoison(p)) {
  309. put_page(p);
  310. continue;
  311. }
  312. if (bhv == MADV_SOFT_OFFLINE) {
  313. pr_info("Soft offlining page %#lx at %#lx\n",
  314. page_to_pfn(p), start);
  315. ret = soft_offline_page(p, MF_COUNT_INCREASED);
  316. if (ret)
  317. return ret;
  318. continue;
  319. }
  320. pr_info("Injecting memory failure for page %#lx at %#lx\n",
  321. page_to_pfn(p), start);
  322. /* Ignore return value for now */
  323. memory_failure(page_to_pfn(p), 0, MF_COUNT_INCREASED);
  324. }
  325. return 0;
  326. }
  327. #endif
  328. static long
  329. madvise_vma(struct vm_area_struct *vma, struct vm_area_struct **prev,
  330. unsigned long start, unsigned long end, int behavior)
  331. {
  332. switch (behavior) {
  333. case MADV_REMOVE:
  334. return madvise_remove(vma, prev, start, end);
  335. case MADV_WILLNEED:
  336. return madvise_willneed(vma, prev, start, end);
  337. case MADV_DONTNEED:
  338. return madvise_dontneed(vma, prev, start, end);
  339. default:
  340. return madvise_behavior(vma, prev, start, end, behavior);
  341. }
  342. }
  343. static bool
  344. madvise_behavior_valid(int behavior)
  345. {
  346. switch (behavior) {
  347. case MADV_DOFORK:
  348. case MADV_DONTFORK:
  349. case MADV_NORMAL:
  350. case MADV_SEQUENTIAL:
  351. case MADV_RANDOM:
  352. case MADV_REMOVE:
  353. case MADV_WILLNEED:
  354. case MADV_DONTNEED:
  355. #ifdef CONFIG_KSM
  356. case MADV_MERGEABLE:
  357. case MADV_UNMERGEABLE:
  358. #endif
  359. #ifdef CONFIG_TRANSPARENT_HUGEPAGE
  360. case MADV_HUGEPAGE:
  361. case MADV_NOHUGEPAGE:
  362. #endif
  363. case MADV_DONTDUMP:
  364. case MADV_DODUMP:
  365. return true;
  366. default:
  367. return false;
  368. }
  369. }
  370. /*
  371. * The madvise(2) system call.
  372. *
  373. * Applications can use madvise() to advise the kernel how it should
  374. * handle paging I/O in this VM area. The idea is to help the kernel
  375. * use appropriate read-ahead and caching techniques. The information
  376. * provided is advisory only, and can be safely disregarded by the
  377. * kernel without affecting the correct operation of the application.
  378. *
  379. * behavior values:
  380. * MADV_NORMAL - the default behavior is to read clusters. This
  381. * results in some read-ahead and read-behind.
  382. * MADV_RANDOM - the system should read the minimum amount of data
  383. * on any access, since it is unlikely that the appli-
  384. * cation will need more than what it asks for.
  385. * MADV_SEQUENTIAL - pages in the given range will probably be accessed
  386. * once, so they can be aggressively read ahead, and
  387. * can be freed soon after they are accessed.
  388. * MADV_WILLNEED - the application is notifying the system to read
  389. * some pages ahead.
  390. * MADV_DONTNEED - the application is finished with the given range,
  391. * so the kernel can free resources associated with it.
  392. * MADV_REMOVE - the application wants to free up the given range of
  393. * pages and associated backing store.
  394. * MADV_DONTFORK - omit this area from child's address space when forking:
  395. * typically, to avoid COWing pages pinned by get_user_pages().
  396. * MADV_DOFORK - cancel MADV_DONTFORK: no longer omit this area when forking.
  397. * MADV_MERGEABLE - the application recommends that KSM try to merge pages in
  398. * this area with pages of identical content from other such areas.
  399. * MADV_UNMERGEABLE- cancel MADV_MERGEABLE: no longer merge pages with others.
  400. *
  401. * return values:
  402. * zero - success
  403. * -EINVAL - start + len < 0, start is not page-aligned,
  404. * "behavior" is not a valid value, or application
  405. * is attempting to release locked or shared pages.
  406. * -ENOMEM - addresses in the specified range are not currently
  407. * mapped, or are outside the AS of the process.
  408. * -EIO - an I/O error occurred while paging in data.
  409. * -EBADF - map exists, but area maps something that isn't a file.
  410. * -EAGAIN - a kernel resource was temporarily unavailable.
  411. */
  412. SYSCALL_DEFINE3(madvise, unsigned long, start, size_t, len_in, int, behavior)
  413. {
  414. unsigned long end, tmp;
  415. struct vm_area_struct *vma, *prev;
  416. int unmapped_error = 0;
  417. int error = -EINVAL;
  418. int write;
  419. size_t len;
  420. struct blk_plug plug;
  421. #ifdef CONFIG_MEMORY_FAILURE
  422. if (behavior == MADV_HWPOISON || behavior == MADV_SOFT_OFFLINE)
  423. return madvise_hwpoison(behavior, start, start+len_in);
  424. #endif
  425. if (!madvise_behavior_valid(behavior))
  426. return error;
  427. if (start & ~PAGE_MASK)
  428. return error;
  429. len = (len_in + ~PAGE_MASK) & PAGE_MASK;
  430. /* Check to see whether len was rounded up from small -ve to zero */
  431. if (len_in && !len)
  432. return error;
  433. end = start + len;
  434. if (end < start)
  435. return error;
  436. error = 0;
  437. if (end == start)
  438. return error;
  439. write = madvise_need_mmap_write(behavior);
  440. if (write)
  441. down_write(&current->mm->mmap_sem);
  442. else
  443. down_read(&current->mm->mmap_sem);
  444. /*
  445. * If the interval [start,end) covers some unmapped address
  446. * ranges, just ignore them, but return -ENOMEM at the end.
  447. * - different from the way of handling in mlock etc.
  448. */
  449. vma = find_vma_prev(current->mm, start, &prev);
  450. if (vma && start > vma->vm_start)
  451. prev = vma;
  452. blk_start_plug(&plug);
  453. for (;;) {
  454. /* Still start < end. */
  455. error = -ENOMEM;
  456. if (!vma)
  457. goto out;
  458. /* Here start < (end|vma->vm_end). */
  459. if (start < vma->vm_start) {
  460. unmapped_error = -ENOMEM;
  461. start = vma->vm_start;
  462. if (start >= end)
  463. goto out;
  464. }
  465. /* Here vma->vm_start <= start < (end|vma->vm_end) */
  466. tmp = vma->vm_end;
  467. if (end < tmp)
  468. tmp = end;
  469. /* Here vma->vm_start <= start < tmp <= (end|vma->vm_end). */
  470. error = madvise_vma(vma, &prev, start, tmp, behavior);
  471. if (error)
  472. goto out;
  473. start = tmp;
  474. if (prev && start < prev->vm_end)
  475. start = prev->vm_end;
  476. error = unmapped_error;
  477. if (start >= end)
  478. goto out;
  479. if (prev)
  480. vma = prev->vm_next;
  481. else /* madvise_remove dropped mmap_sem */
  482. vma = find_vma(current->mm, start);
  483. }
  484. out:
  485. blk_finish_plug(&plug);
  486. if (write)
  487. up_write(&current->mm->mmap_sem);
  488. else
  489. up_read(&current->mm->mmap_sem);
  490. return error;
  491. }