truncate.c 26 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917
  1. /*
  2. * mm/truncate.c - code for taking down pages from address_spaces
  3. *
  4. * Copyright (C) 2002, Linus Torvalds
  5. *
  6. * 10Sep2002 Andrew Morton
  7. * Initial version.
  8. */
  9. #include <linux/kernel.h>
  10. #include <linux/backing-dev.h>
  11. #include <linux/dax.h>
  12. #include <linux/gfp.h>
  13. #include <linux/mm.h>
  14. #include <linux/swap.h>
  15. #include <linux/export.h>
  16. #include <linux/pagemap.h>
  17. #include <linux/highmem.h>
  18. #include <linux/pagevec.h>
  19. #include <linux/task_io_accounting_ops.h>
  20. #include <linux/buffer_head.h> /* grr. try_to_release_page,
  21. do_invalidatepage */
  22. #include <linux/shmem_fs.h>
  23. #include <linux/cleancache.h>
  24. #include <linux/rmap.h>
  25. #include "internal.h"
  26. /*
  27. * Regular page slots are stabilized by the page lock even without the tree
  28. * itself locked. These unlocked entries need verification under the tree
  29. * lock.
  30. */
  31. static inline void __clear_shadow_entry(struct address_space *mapping,
  32. pgoff_t index, void *entry)
  33. {
  34. struct radix_tree_node *node;
  35. void **slot;
  36. if (!__radix_tree_lookup(&mapping->i_pages, index, &node, &slot))
  37. return;
  38. if (*slot != entry)
  39. return;
  40. __radix_tree_replace(&mapping->i_pages, node, slot, NULL,
  41. workingset_update_node);
  42. mapping->nrexceptional--;
  43. }
  44. static void clear_shadow_entry(struct address_space *mapping, pgoff_t index,
  45. void *entry)
  46. {
  47. xa_lock_irq(&mapping->i_pages);
  48. __clear_shadow_entry(mapping, index, entry);
  49. xa_unlock_irq(&mapping->i_pages);
  50. }
  51. /*
  52. * Unconditionally remove exceptional entries. Usually called from truncate
  53. * path. Note that the pagevec may be altered by this function by removing
  54. * exceptional entries similar to what pagevec_remove_exceptionals does.
  55. */
  56. static void truncate_exceptional_pvec_entries(struct address_space *mapping,
  57. struct pagevec *pvec, pgoff_t *indices,
  58. pgoff_t end)
  59. {
  60. int i, j;
  61. bool dax, lock;
  62. /* Handled by shmem itself */
  63. if (shmem_mapping(mapping))
  64. return;
  65. for (j = 0; j < pagevec_count(pvec); j++)
  66. if (radix_tree_exceptional_entry(pvec->pages[j]))
  67. break;
  68. if (j == pagevec_count(pvec))
  69. return;
  70. dax = dax_mapping(mapping);
  71. lock = !dax && indices[j] < end;
  72. if (lock)
  73. xa_lock_irq(&mapping->i_pages);
  74. for (i = j; i < pagevec_count(pvec); i++) {
  75. struct page *page = pvec->pages[i];
  76. pgoff_t index = indices[i];
  77. if (!radix_tree_exceptional_entry(page)) {
  78. pvec->pages[j++] = page;
  79. continue;
  80. }
  81. if (index >= end)
  82. continue;
  83. if (unlikely(dax)) {
  84. dax_delete_mapping_entry(mapping, index);
  85. continue;
  86. }
  87. __clear_shadow_entry(mapping, index, page);
  88. }
  89. if (lock)
  90. xa_unlock_irq(&mapping->i_pages);
  91. pvec->nr = j;
  92. }
  93. /*
  94. * Invalidate exceptional entry if easily possible. This handles exceptional
  95. * entries for invalidate_inode_pages().
  96. */
  97. static int invalidate_exceptional_entry(struct address_space *mapping,
  98. pgoff_t index, void *entry)
  99. {
  100. /* Handled by shmem itself, or for DAX we do nothing. */
  101. if (shmem_mapping(mapping) || dax_mapping(mapping))
  102. return 1;
  103. clear_shadow_entry(mapping, index, entry);
  104. return 1;
  105. }
  106. /*
  107. * Invalidate exceptional entry if clean. This handles exceptional entries for
  108. * invalidate_inode_pages2() so for DAX it evicts only clean entries.
  109. */
  110. static int invalidate_exceptional_entry2(struct address_space *mapping,
  111. pgoff_t index, void *entry)
  112. {
  113. /* Handled by shmem itself */
  114. if (shmem_mapping(mapping))
  115. return 1;
  116. if (dax_mapping(mapping))
  117. return dax_invalidate_mapping_entry_sync(mapping, index);
  118. clear_shadow_entry(mapping, index, entry);
  119. return 1;
  120. }
  121. /**
  122. * do_invalidatepage - invalidate part or all of a page
  123. * @page: the page which is affected
  124. * @offset: start of the range to invalidate
  125. * @length: length of the range to invalidate
  126. *
  127. * do_invalidatepage() is called when all or part of the page has become
  128. * invalidated by a truncate operation.
  129. *
  130. * do_invalidatepage() does not have to release all buffers, but it must
  131. * ensure that no dirty buffer is left outside @offset and that no I/O
  132. * is underway against any of the blocks which are outside the truncation
  133. * point. Because the caller is about to free (and possibly reuse) those
  134. * blocks on-disk.
  135. */
  136. void do_invalidatepage(struct page *page, unsigned int offset,
  137. unsigned int length)
  138. {
  139. void (*invalidatepage)(struct page *, unsigned int, unsigned int);
  140. invalidatepage = page->mapping->a_ops->invalidatepage;
  141. #ifdef CONFIG_BLOCK
  142. if (!invalidatepage)
  143. invalidatepage = block_invalidatepage;
  144. #endif
  145. if (invalidatepage)
  146. (*invalidatepage)(page, offset, length);
  147. }
  148. /*
  149. * If truncate cannot remove the fs-private metadata from the page, the page
  150. * becomes orphaned. It will be left on the LRU and may even be mapped into
  151. * user pagetables if we're racing with filemap_fault().
  152. *
  153. * We need to bale out if page->mapping is no longer equal to the original
  154. * mapping. This happens a) when the VM reclaimed the page while we waited on
  155. * its lock, b) when a concurrent invalidate_mapping_pages got there first and
  156. * c) when tmpfs swizzles a page between a tmpfs inode and swapper_space.
  157. */
  158. static void
  159. truncate_cleanup_page(struct address_space *mapping, struct page *page)
  160. {
  161. if (page_mapped(page)) {
  162. pgoff_t nr = PageTransHuge(page) ? HPAGE_PMD_NR : 1;
  163. unmap_mapping_pages(mapping, page->index, nr, false);
  164. }
  165. if (page_has_private(page))
  166. do_invalidatepage(page, 0, PAGE_SIZE);
  167. /*
  168. * Some filesystems seem to re-dirty the page even after
  169. * the VM has canceled the dirty bit (eg ext3 journaling).
  170. * Hence dirty accounting check is placed after invalidation.
  171. */
  172. cancel_dirty_page(page);
  173. ClearPageMappedToDisk(page);
  174. }
  175. /*
  176. * This is for invalidate_mapping_pages(). That function can be called at
  177. * any time, and is not supposed to throw away dirty pages. But pages can
  178. * be marked dirty at any time too, so use remove_mapping which safely
  179. * discards clean, unused pages.
  180. *
  181. * Returns non-zero if the page was successfully invalidated.
  182. */
  183. static int
  184. invalidate_complete_page(struct address_space *mapping, struct page *page)
  185. {
  186. int ret;
  187. if (page->mapping != mapping)
  188. return 0;
  189. if (page_has_private(page) && !try_to_release_page(page, 0))
  190. return 0;
  191. ret = remove_mapping(mapping, page);
  192. return ret;
  193. }
  194. int truncate_inode_page(struct address_space *mapping, struct page *page)
  195. {
  196. VM_BUG_ON_PAGE(PageTail(page), page);
  197. if (page->mapping != mapping)
  198. return -EIO;
  199. truncate_cleanup_page(mapping, page);
  200. delete_from_page_cache(page);
  201. return 0;
  202. }
  203. /*
  204. * Used to get rid of pages on hardware memory corruption.
  205. */
  206. int generic_error_remove_page(struct address_space *mapping, struct page *page)
  207. {
  208. if (!mapping)
  209. return -EINVAL;
  210. /*
  211. * Only punch for normal data pages for now.
  212. * Handling other types like directories would need more auditing.
  213. */
  214. if (!S_ISREG(mapping->host->i_mode))
  215. return -EIO;
  216. return truncate_inode_page(mapping, page);
  217. }
  218. EXPORT_SYMBOL(generic_error_remove_page);
  219. /*
  220. * Safely invalidate one page from its pagecache mapping.
  221. * It only drops clean, unused pages. The page must be locked.
  222. *
  223. * Returns 1 if the page is successfully invalidated, otherwise 0.
  224. */
  225. int invalidate_inode_page(struct page *page)
  226. {
  227. struct address_space *mapping = page_mapping(page);
  228. if (!mapping)
  229. return 0;
  230. if (PageDirty(page) || PageWriteback(page))
  231. return 0;
  232. if (page_mapped(page))
  233. return 0;
  234. return invalidate_complete_page(mapping, page);
  235. }
  236. /**
  237. * truncate_inode_pages_range - truncate range of pages specified by start & end byte offsets
  238. * @mapping: mapping to truncate
  239. * @lstart: offset from which to truncate
  240. * @lend: offset to which to truncate (inclusive)
  241. *
  242. * Truncate the page cache, removing the pages that are between
  243. * specified offsets (and zeroing out partial pages
  244. * if lstart or lend + 1 is not page aligned).
  245. *
  246. * Truncate takes two passes - the first pass is nonblocking. It will not
  247. * block on page locks and it will not block on writeback. The second pass
  248. * will wait. This is to prevent as much IO as possible in the affected region.
  249. * The first pass will remove most pages, so the search cost of the second pass
  250. * is low.
  251. *
  252. * We pass down the cache-hot hint to the page freeing code. Even if the
  253. * mapping is large, it is probably the case that the final pages are the most
  254. * recently touched, and freeing happens in ascending file offset order.
  255. *
  256. * Note that since ->invalidatepage() accepts range to invalidate
  257. * truncate_inode_pages_range is able to handle cases where lend + 1 is not
  258. * page aligned properly.
  259. */
  260. void truncate_inode_pages_range(struct address_space *mapping,
  261. loff_t lstart, loff_t lend)
  262. {
  263. pgoff_t start; /* inclusive */
  264. pgoff_t end; /* exclusive */
  265. unsigned int partial_start; /* inclusive */
  266. unsigned int partial_end; /* exclusive */
  267. struct pagevec pvec;
  268. pgoff_t indices[PAGEVEC_SIZE];
  269. pgoff_t index;
  270. int i;
  271. if (mapping->nrpages == 0 && mapping->nrexceptional == 0)
  272. goto out;
  273. /* Offsets within partial pages */
  274. partial_start = lstart & (PAGE_SIZE - 1);
  275. partial_end = (lend + 1) & (PAGE_SIZE - 1);
  276. /*
  277. * 'start' and 'end' always covers the range of pages to be fully
  278. * truncated. Partial pages are covered with 'partial_start' at the
  279. * start of the range and 'partial_end' at the end of the range.
  280. * Note that 'end' is exclusive while 'lend' is inclusive.
  281. */
  282. start = (lstart + PAGE_SIZE - 1) >> PAGE_SHIFT;
  283. if (lend == -1)
  284. /*
  285. * lend == -1 indicates end-of-file so we have to set 'end'
  286. * to the highest possible pgoff_t and since the type is
  287. * unsigned we're using -1.
  288. */
  289. end = -1;
  290. else
  291. end = (lend + 1) >> PAGE_SHIFT;
  292. pagevec_init(&pvec);
  293. index = start;
  294. while (index < end && pagevec_lookup_entries(&pvec, mapping, index,
  295. min(end - index, (pgoff_t)PAGEVEC_SIZE),
  296. indices)) {
  297. /*
  298. * Pagevec array has exceptional entries and we may also fail
  299. * to lock some pages. So we store pages that can be deleted
  300. * in a new pagevec.
  301. */
  302. struct pagevec locked_pvec;
  303. pagevec_init(&locked_pvec);
  304. for (i = 0; i < pagevec_count(&pvec); i++) {
  305. struct page *page = pvec.pages[i];
  306. /* We rely upon deletion not changing page->index */
  307. index = indices[i];
  308. if (index >= end)
  309. break;
  310. if (radix_tree_exceptional_entry(page))
  311. continue;
  312. if (!trylock_page(page))
  313. continue;
  314. WARN_ON(page_to_index(page) != index);
  315. if (PageWriteback(page)) {
  316. unlock_page(page);
  317. continue;
  318. }
  319. if (page->mapping != mapping) {
  320. unlock_page(page);
  321. continue;
  322. }
  323. pagevec_add(&locked_pvec, page);
  324. }
  325. for (i = 0; i < pagevec_count(&locked_pvec); i++)
  326. truncate_cleanup_page(mapping, locked_pvec.pages[i]);
  327. delete_from_page_cache_batch(mapping, &locked_pvec);
  328. for (i = 0; i < pagevec_count(&locked_pvec); i++)
  329. unlock_page(locked_pvec.pages[i]);
  330. truncate_exceptional_pvec_entries(mapping, &pvec, indices, end);
  331. pagevec_release(&pvec);
  332. cond_resched();
  333. index++;
  334. }
  335. if (partial_start) {
  336. struct page *page = find_lock_page(mapping, start - 1);
  337. if (page) {
  338. unsigned int top = PAGE_SIZE;
  339. if (start > end) {
  340. /* Truncation within a single page */
  341. top = partial_end;
  342. partial_end = 0;
  343. }
  344. wait_on_page_writeback(page);
  345. zero_user_segment(page, partial_start, top);
  346. cleancache_invalidate_page(mapping, page);
  347. if (page_has_private(page))
  348. do_invalidatepage(page, partial_start,
  349. top - partial_start);
  350. unlock_page(page);
  351. put_page(page);
  352. }
  353. }
  354. if (partial_end) {
  355. struct page *page = find_lock_page(mapping, end);
  356. if (page) {
  357. wait_on_page_writeback(page);
  358. zero_user_segment(page, 0, partial_end);
  359. cleancache_invalidate_page(mapping, page);
  360. if (page_has_private(page))
  361. do_invalidatepage(page, 0,
  362. partial_end);
  363. unlock_page(page);
  364. put_page(page);
  365. }
  366. }
  367. /*
  368. * If the truncation happened within a single page no pages
  369. * will be released, just zeroed, so we can bail out now.
  370. */
  371. if (start >= end)
  372. goto out;
  373. index = start;
  374. for ( ; ; ) {
  375. cond_resched();
  376. if (!pagevec_lookup_entries(&pvec, mapping, index,
  377. min(end - index, (pgoff_t)PAGEVEC_SIZE), indices)) {
  378. /* If all gone from start onwards, we're done */
  379. if (index == start)
  380. break;
  381. /* Otherwise restart to make sure all gone */
  382. index = start;
  383. continue;
  384. }
  385. if (index == start && indices[0] >= end) {
  386. /* All gone out of hole to be punched, we're done */
  387. pagevec_remove_exceptionals(&pvec);
  388. pagevec_release(&pvec);
  389. break;
  390. }
  391. for (i = 0; i < pagevec_count(&pvec); i++) {
  392. struct page *page = pvec.pages[i];
  393. /* We rely upon deletion not changing page->index */
  394. index = indices[i];
  395. if (index >= end) {
  396. /* Restart punch to make sure all gone */
  397. index = start - 1;
  398. break;
  399. }
  400. if (radix_tree_exceptional_entry(page))
  401. continue;
  402. lock_page(page);
  403. WARN_ON(page_to_index(page) != index);
  404. wait_on_page_writeback(page);
  405. truncate_inode_page(mapping, page);
  406. unlock_page(page);
  407. }
  408. truncate_exceptional_pvec_entries(mapping, &pvec, indices, end);
  409. pagevec_release(&pvec);
  410. index++;
  411. }
  412. out:
  413. cleancache_invalidate_inode(mapping);
  414. }
  415. EXPORT_SYMBOL(truncate_inode_pages_range);
  416. /**
  417. * truncate_inode_pages - truncate *all* the pages from an offset
  418. * @mapping: mapping to truncate
  419. * @lstart: offset from which to truncate
  420. *
  421. * Called under (and serialised by) inode->i_mutex.
  422. *
  423. * Note: When this function returns, there can be a page in the process of
  424. * deletion (inside __delete_from_page_cache()) in the specified range. Thus
  425. * mapping->nrpages can be non-zero when this function returns even after
  426. * truncation of the whole mapping.
  427. */
  428. void truncate_inode_pages(struct address_space *mapping, loff_t lstart)
  429. {
  430. truncate_inode_pages_range(mapping, lstart, (loff_t)-1);
  431. }
  432. EXPORT_SYMBOL(truncate_inode_pages);
  433. /**
  434. * truncate_inode_pages_final - truncate *all* pages before inode dies
  435. * @mapping: mapping to truncate
  436. *
  437. * Called under (and serialized by) inode->i_mutex.
  438. *
  439. * Filesystems have to use this in the .evict_inode path to inform the
  440. * VM that this is the final truncate and the inode is going away.
  441. */
  442. void truncate_inode_pages_final(struct address_space *mapping)
  443. {
  444. unsigned long nrexceptional;
  445. unsigned long nrpages;
  446. /*
  447. * Page reclaim can not participate in regular inode lifetime
  448. * management (can't call iput()) and thus can race with the
  449. * inode teardown. Tell it when the address space is exiting,
  450. * so that it does not install eviction information after the
  451. * final truncate has begun.
  452. */
  453. mapping_set_exiting(mapping);
  454. /*
  455. * When reclaim installs eviction entries, it increases
  456. * nrexceptional first, then decreases nrpages. Make sure we see
  457. * this in the right order or we might miss an entry.
  458. */
  459. nrpages = mapping->nrpages;
  460. smp_rmb();
  461. nrexceptional = mapping->nrexceptional;
  462. if (nrpages || nrexceptional) {
  463. /*
  464. * As truncation uses a lockless tree lookup, cycle
  465. * the tree lock to make sure any ongoing tree
  466. * modification that does not see AS_EXITING is
  467. * completed before starting the final truncate.
  468. */
  469. xa_lock_irq(&mapping->i_pages);
  470. xa_unlock_irq(&mapping->i_pages);
  471. truncate_inode_pages(mapping, 0);
  472. }
  473. }
  474. EXPORT_SYMBOL(truncate_inode_pages_final);
  475. /**
  476. * invalidate_mapping_pages - Invalidate all the unlocked pages of one inode
  477. * @mapping: the address_space which holds the pages to invalidate
  478. * @start: the offset 'from' which to invalidate
  479. * @end: the offset 'to' which to invalidate (inclusive)
  480. *
  481. * This function only removes the unlocked pages, if you want to
  482. * remove all the pages of one inode, you must call truncate_inode_pages.
  483. *
  484. * invalidate_mapping_pages() will not block on IO activity. It will not
  485. * invalidate pages which are dirty, locked, under writeback or mapped into
  486. * pagetables.
  487. */
  488. unsigned long invalidate_mapping_pages(struct address_space *mapping,
  489. pgoff_t start, pgoff_t end)
  490. {
  491. pgoff_t indices[PAGEVEC_SIZE];
  492. struct pagevec pvec;
  493. pgoff_t index = start;
  494. unsigned long ret;
  495. unsigned long count = 0;
  496. int i;
  497. pagevec_init(&pvec);
  498. while (index <= end && pagevec_lookup_entries(&pvec, mapping, index,
  499. min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1,
  500. indices)) {
  501. for (i = 0; i < pagevec_count(&pvec); i++) {
  502. struct page *page = pvec.pages[i];
  503. /* We rely upon deletion not changing page->index */
  504. index = indices[i];
  505. if (index > end)
  506. break;
  507. if (radix_tree_exceptional_entry(page)) {
  508. invalidate_exceptional_entry(mapping, index,
  509. page);
  510. continue;
  511. }
  512. if (!trylock_page(page))
  513. continue;
  514. WARN_ON(page_to_index(page) != index);
  515. /* Middle of THP: skip */
  516. if (PageTransTail(page)) {
  517. unlock_page(page);
  518. continue;
  519. } else if (PageTransHuge(page)) {
  520. index += HPAGE_PMD_NR - 1;
  521. i += HPAGE_PMD_NR - 1;
  522. /*
  523. * 'end' is in the middle of THP. Don't
  524. * invalidate the page as the part outside of
  525. * 'end' could be still useful.
  526. */
  527. if (index > end) {
  528. unlock_page(page);
  529. continue;
  530. }
  531. }
  532. ret = invalidate_inode_page(page);
  533. unlock_page(page);
  534. /*
  535. * Invalidation is a hint that the page is no longer
  536. * of interest and try to speed up its reclaim.
  537. */
  538. if (!ret)
  539. deactivate_file_page(page);
  540. count += ret;
  541. }
  542. pagevec_remove_exceptionals(&pvec);
  543. pagevec_release(&pvec);
  544. cond_resched();
  545. index++;
  546. }
  547. return count;
  548. }
  549. EXPORT_SYMBOL(invalidate_mapping_pages);
  550. /*
  551. * This is like invalidate_complete_page(), except it ignores the page's
  552. * refcount. We do this because invalidate_inode_pages2() needs stronger
  553. * invalidation guarantees, and cannot afford to leave pages behind because
  554. * shrink_page_list() has a temp ref on them, or because they're transiently
  555. * sitting in the lru_cache_add() pagevecs.
  556. */
  557. static int
  558. invalidate_complete_page2(struct address_space *mapping, struct page *page)
  559. {
  560. unsigned long flags;
  561. if (page->mapping != mapping)
  562. return 0;
  563. if (page_has_private(page) && !try_to_release_page(page, GFP_KERNEL))
  564. return 0;
  565. xa_lock_irqsave(&mapping->i_pages, flags);
  566. if (PageDirty(page))
  567. goto failed;
  568. BUG_ON(page_has_private(page));
  569. __delete_from_page_cache(page, NULL);
  570. xa_unlock_irqrestore(&mapping->i_pages, flags);
  571. if (mapping->a_ops->freepage)
  572. mapping->a_ops->freepage(page);
  573. put_page(page); /* pagecache ref */
  574. return 1;
  575. failed:
  576. xa_unlock_irqrestore(&mapping->i_pages, flags);
  577. return 0;
  578. }
  579. static int do_launder_page(struct address_space *mapping, struct page *page)
  580. {
  581. if (!PageDirty(page))
  582. return 0;
  583. if (page->mapping != mapping || mapping->a_ops->launder_page == NULL)
  584. return 0;
  585. return mapping->a_ops->launder_page(page);
  586. }
  587. /**
  588. * invalidate_inode_pages2_range - remove range of pages from an address_space
  589. * @mapping: the address_space
  590. * @start: the page offset 'from' which to invalidate
  591. * @end: the page offset 'to' which to invalidate (inclusive)
  592. *
  593. * Any pages which are found to be mapped into pagetables are unmapped prior to
  594. * invalidation.
  595. *
  596. * Returns -EBUSY if any pages could not be invalidated.
  597. */
  598. int invalidate_inode_pages2_range(struct address_space *mapping,
  599. pgoff_t start, pgoff_t end)
  600. {
  601. pgoff_t indices[PAGEVEC_SIZE];
  602. struct pagevec pvec;
  603. pgoff_t index;
  604. int i;
  605. int ret = 0;
  606. int ret2 = 0;
  607. int did_range_unmap = 0;
  608. if (mapping->nrpages == 0 && mapping->nrexceptional == 0)
  609. goto out;
  610. pagevec_init(&pvec);
  611. index = start;
  612. while (index <= end && pagevec_lookup_entries(&pvec, mapping, index,
  613. min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1,
  614. indices)) {
  615. for (i = 0; i < pagevec_count(&pvec); i++) {
  616. struct page *page = pvec.pages[i];
  617. /* We rely upon deletion not changing page->index */
  618. index = indices[i];
  619. if (index > end)
  620. break;
  621. if (radix_tree_exceptional_entry(page)) {
  622. if (!invalidate_exceptional_entry2(mapping,
  623. index, page))
  624. ret = -EBUSY;
  625. continue;
  626. }
  627. lock_page(page);
  628. WARN_ON(page_to_index(page) != index);
  629. if (page->mapping != mapping) {
  630. unlock_page(page);
  631. continue;
  632. }
  633. wait_on_page_writeback(page);
  634. if (page_mapped(page)) {
  635. if (!did_range_unmap) {
  636. /*
  637. * Zap the rest of the file in one hit.
  638. */
  639. unmap_mapping_pages(mapping, index,
  640. (1 + end - index), false);
  641. did_range_unmap = 1;
  642. } else {
  643. /*
  644. * Just zap this page
  645. */
  646. unmap_mapping_pages(mapping, index,
  647. 1, false);
  648. }
  649. }
  650. BUG_ON(page_mapped(page));
  651. ret2 = do_launder_page(mapping, page);
  652. if (ret2 == 0) {
  653. if (!invalidate_complete_page2(mapping, page))
  654. ret2 = -EBUSY;
  655. }
  656. if (ret2 < 0)
  657. ret = ret2;
  658. unlock_page(page);
  659. }
  660. pagevec_remove_exceptionals(&pvec);
  661. pagevec_release(&pvec);
  662. cond_resched();
  663. index++;
  664. }
  665. /*
  666. * For DAX we invalidate page tables after invalidating radix tree. We
  667. * could invalidate page tables while invalidating each entry however
  668. * that would be expensive. And doing range unmapping before doesn't
  669. * work as we have no cheap way to find whether radix tree entry didn't
  670. * get remapped later.
  671. */
  672. if (dax_mapping(mapping)) {
  673. unmap_mapping_pages(mapping, start, end - start + 1, false);
  674. }
  675. out:
  676. cleancache_invalidate_inode(mapping);
  677. return ret;
  678. }
  679. EXPORT_SYMBOL_GPL(invalidate_inode_pages2_range);
  680. /**
  681. * invalidate_inode_pages2 - remove all pages from an address_space
  682. * @mapping: the address_space
  683. *
  684. * Any pages which are found to be mapped into pagetables are unmapped prior to
  685. * invalidation.
  686. *
  687. * Returns -EBUSY if any pages could not be invalidated.
  688. */
  689. int invalidate_inode_pages2(struct address_space *mapping)
  690. {
  691. return invalidate_inode_pages2_range(mapping, 0, -1);
  692. }
  693. EXPORT_SYMBOL_GPL(invalidate_inode_pages2);
  694. /**
  695. * truncate_pagecache - unmap and remove pagecache that has been truncated
  696. * @inode: inode
  697. * @newsize: new file size
  698. *
  699. * inode's new i_size must already be written before truncate_pagecache
  700. * is called.
  701. *
  702. * This function should typically be called before the filesystem
  703. * releases resources associated with the freed range (eg. deallocates
  704. * blocks). This way, pagecache will always stay logically coherent
  705. * with on-disk format, and the filesystem would not have to deal with
  706. * situations such as writepage being called for a page that has already
  707. * had its underlying blocks deallocated.
  708. */
  709. void truncate_pagecache(struct inode *inode, loff_t newsize)
  710. {
  711. struct address_space *mapping = inode->i_mapping;
  712. loff_t holebegin = round_up(newsize, PAGE_SIZE);
  713. /*
  714. * unmap_mapping_range is called twice, first simply for
  715. * efficiency so that truncate_inode_pages does fewer
  716. * single-page unmaps. However after this first call, and
  717. * before truncate_inode_pages finishes, it is possible for
  718. * private pages to be COWed, which remain after
  719. * truncate_inode_pages finishes, hence the second
  720. * unmap_mapping_range call must be made for correctness.
  721. */
  722. unmap_mapping_range(mapping, holebegin, 0, 1);
  723. truncate_inode_pages(mapping, newsize);
  724. unmap_mapping_range(mapping, holebegin, 0, 1);
  725. }
  726. EXPORT_SYMBOL(truncate_pagecache);
  727. /**
  728. * truncate_setsize - update inode and pagecache for a new file size
  729. * @inode: inode
  730. * @newsize: new file size
  731. *
  732. * truncate_setsize updates i_size and performs pagecache truncation (if
  733. * necessary) to @newsize. It will be typically be called from the filesystem's
  734. * setattr function when ATTR_SIZE is passed in.
  735. *
  736. * Must be called with a lock serializing truncates and writes (generally
  737. * i_mutex but e.g. xfs uses a different lock) and before all filesystem
  738. * specific block truncation has been performed.
  739. */
  740. void truncate_setsize(struct inode *inode, loff_t newsize)
  741. {
  742. loff_t oldsize = inode->i_size;
  743. i_size_write(inode, newsize);
  744. if (newsize > oldsize)
  745. pagecache_isize_extended(inode, oldsize, newsize);
  746. truncate_pagecache(inode, newsize);
  747. }
  748. EXPORT_SYMBOL(truncate_setsize);
  749. /**
  750. * pagecache_isize_extended - update pagecache after extension of i_size
  751. * @inode: inode for which i_size was extended
  752. * @from: original inode size
  753. * @to: new inode size
  754. *
  755. * Handle extension of inode size either caused by extending truncate or by
  756. * write starting after current i_size. We mark the page straddling current
  757. * i_size RO so that page_mkwrite() is called on the nearest write access to
  758. * the page. This way filesystem can be sure that page_mkwrite() is called on
  759. * the page before user writes to the page via mmap after the i_size has been
  760. * changed.
  761. *
  762. * The function must be called after i_size is updated so that page fault
  763. * coming after we unlock the page will already see the new i_size.
  764. * The function must be called while we still hold i_mutex - this not only
  765. * makes sure i_size is stable but also that userspace cannot observe new
  766. * i_size value before we are prepared to store mmap writes at new inode size.
  767. */
  768. void pagecache_isize_extended(struct inode *inode, loff_t from, loff_t to)
  769. {
  770. int bsize = i_blocksize(inode);
  771. loff_t rounded_from;
  772. struct page *page;
  773. pgoff_t index;
  774. WARN_ON(to > inode->i_size);
  775. if (from >= to || bsize == PAGE_SIZE)
  776. return;
  777. /* Page straddling @from will not have any hole block created? */
  778. rounded_from = round_up(from, bsize);
  779. if (to <= rounded_from || !(rounded_from & (PAGE_SIZE - 1)))
  780. return;
  781. index = from >> PAGE_SHIFT;
  782. page = find_lock_page(inode->i_mapping, index);
  783. /* Page not cached? Nothing to do */
  784. if (!page)
  785. return;
  786. /*
  787. * See clear_page_dirty_for_io() for details why set_page_dirty()
  788. * is needed.
  789. */
  790. if (page_mkclean(page))
  791. set_page_dirty(page);
  792. unlock_page(page);
  793. put_page(page);
  794. }
  795. EXPORT_SYMBOL(pagecache_isize_extended);
  796. /**
  797. * truncate_pagecache_range - unmap and remove pagecache that is hole-punched
  798. * @inode: inode
  799. * @lstart: offset of beginning of hole
  800. * @lend: offset of last byte of hole
  801. *
  802. * This function should typically be called before the filesystem
  803. * releases resources associated with the freed range (eg. deallocates
  804. * blocks). This way, pagecache will always stay logically coherent
  805. * with on-disk format, and the filesystem would not have to deal with
  806. * situations such as writepage being called for a page that has already
  807. * had its underlying blocks deallocated.
  808. */
  809. void truncate_pagecache_range(struct inode *inode, loff_t lstart, loff_t lend)
  810. {
  811. struct address_space *mapping = inode->i_mapping;
  812. loff_t unmap_start = round_up(lstart, PAGE_SIZE);
  813. loff_t unmap_end = round_down(1 + lend, PAGE_SIZE) - 1;
  814. /*
  815. * This rounding is currently just for example: unmap_mapping_range
  816. * expands its hole outwards, whereas we want it to contract the hole
  817. * inwards. However, existing callers of truncate_pagecache_range are
  818. * doing their own page rounding first. Note that unmap_mapping_range
  819. * allows holelen 0 for all, and we allow lend -1 for end of file.
  820. */
  821. /*
  822. * Unlike in truncate_pagecache, unmap_mapping_range is called only
  823. * once (before truncating pagecache), and without "even_cows" flag:
  824. * hole-punching should not remove private COWed pages from the hole.
  825. */
  826. if ((u64)unmap_end > (u64)unmap_start)
  827. unmap_mapping_range(mapping, unmap_start,
  828. 1 + unmap_end - unmap_start, 0);
  829. truncate_inode_pages_range(mapping, lstart, lend);
  830. }
  831. EXPORT_SYMBOL(truncate_pagecache_range);