truncate.c 26 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914
  1. /*
  2. * mm/truncate.c - code for taking down pages from address_spaces
  3. *
  4. * Copyright (C) 2002, Linus Torvalds
  5. *
  6. * 10Sep2002 Andrew Morton
  7. * Initial version.
  8. */
  9. #include <linux/kernel.h>
  10. #include <linux/backing-dev.h>
  11. #include <linux/dax.h>
  12. #include <linux/gfp.h>
  13. #include <linux/mm.h>
  14. #include <linux/swap.h>
  15. #include <linux/export.h>
  16. #include <linux/pagemap.h>
  17. #include <linux/highmem.h>
  18. #include <linux/pagevec.h>
  19. #include <linux/task_io_accounting_ops.h>
  20. #include <linux/buffer_head.h> /* grr. try_to_release_page,
  21. do_invalidatepage */
  22. #include <linux/shmem_fs.h>
  23. #include <linux/cleancache.h>
  24. #include <linux/rmap.h>
  25. #include "internal.h"
  26. /*
  27. * Regular page slots are stabilized by the page lock even without the tree
  28. * itself locked. These unlocked entries need verification under the tree
  29. * lock.
  30. */
  31. static inline void __clear_shadow_entry(struct address_space *mapping,
  32. pgoff_t index, void *entry)
  33. {
  34. XA_STATE(xas, &mapping->i_pages, index);
  35. xas_set_update(&xas, workingset_update_node);
  36. if (xas_load(&xas) != entry)
  37. return;
  38. xas_store(&xas, NULL);
  39. mapping->nrexceptional--;
  40. }
  41. static void clear_shadow_entry(struct address_space *mapping, pgoff_t index,
  42. void *entry)
  43. {
  44. xa_lock_irq(&mapping->i_pages);
  45. __clear_shadow_entry(mapping, index, entry);
  46. xa_unlock_irq(&mapping->i_pages);
  47. }
  48. /*
  49. * Unconditionally remove exceptional entries. Usually called from truncate
  50. * path. Note that the pagevec may be altered by this function by removing
  51. * exceptional entries similar to what pagevec_remove_exceptionals does.
  52. */
  53. static void truncate_exceptional_pvec_entries(struct address_space *mapping,
  54. struct pagevec *pvec, pgoff_t *indices,
  55. pgoff_t end)
  56. {
  57. int i, j;
  58. bool dax, lock;
  59. /* Handled by shmem itself */
  60. if (shmem_mapping(mapping))
  61. return;
  62. for (j = 0; j < pagevec_count(pvec); j++)
  63. if (xa_is_value(pvec->pages[j]))
  64. break;
  65. if (j == pagevec_count(pvec))
  66. return;
  67. dax = dax_mapping(mapping);
  68. lock = !dax && indices[j] < end;
  69. if (lock)
  70. xa_lock_irq(&mapping->i_pages);
  71. for (i = j; i < pagevec_count(pvec); i++) {
  72. struct page *page = pvec->pages[i];
  73. pgoff_t index = indices[i];
  74. if (!xa_is_value(page)) {
  75. pvec->pages[j++] = page;
  76. continue;
  77. }
  78. if (index >= end)
  79. continue;
  80. if (unlikely(dax)) {
  81. dax_delete_mapping_entry(mapping, index);
  82. continue;
  83. }
  84. __clear_shadow_entry(mapping, index, page);
  85. }
  86. if (lock)
  87. xa_unlock_irq(&mapping->i_pages);
  88. pvec->nr = j;
  89. }
  90. /*
  91. * Invalidate exceptional entry if easily possible. This handles exceptional
  92. * entries for invalidate_inode_pages().
  93. */
  94. static int invalidate_exceptional_entry(struct address_space *mapping,
  95. pgoff_t index, void *entry)
  96. {
  97. /* Handled by shmem itself, or for DAX we do nothing. */
  98. if (shmem_mapping(mapping) || dax_mapping(mapping))
  99. return 1;
  100. clear_shadow_entry(mapping, index, entry);
  101. return 1;
  102. }
  103. /*
  104. * Invalidate exceptional entry if clean. This handles exceptional entries for
  105. * invalidate_inode_pages2() so for DAX it evicts only clean entries.
  106. */
  107. static int invalidate_exceptional_entry2(struct address_space *mapping,
  108. pgoff_t index, void *entry)
  109. {
  110. /* Handled by shmem itself */
  111. if (shmem_mapping(mapping))
  112. return 1;
  113. if (dax_mapping(mapping))
  114. return dax_invalidate_mapping_entry_sync(mapping, index);
  115. clear_shadow_entry(mapping, index, entry);
  116. return 1;
  117. }
  118. /**
  119. * do_invalidatepage - invalidate part or all of a page
  120. * @page: the page which is affected
  121. * @offset: start of the range to invalidate
  122. * @length: length of the range to invalidate
  123. *
  124. * do_invalidatepage() is called when all or part of the page has become
  125. * invalidated by a truncate operation.
  126. *
  127. * do_invalidatepage() does not have to release all buffers, but it must
  128. * ensure that no dirty buffer is left outside @offset and that no I/O
  129. * is underway against any of the blocks which are outside the truncation
  130. * point. Because the caller is about to free (and possibly reuse) those
  131. * blocks on-disk.
  132. */
  133. void do_invalidatepage(struct page *page, unsigned int offset,
  134. unsigned int length)
  135. {
  136. void (*invalidatepage)(struct page *, unsigned int, unsigned int);
  137. invalidatepage = page->mapping->a_ops->invalidatepage;
  138. #ifdef CONFIG_BLOCK
  139. if (!invalidatepage)
  140. invalidatepage = block_invalidatepage;
  141. #endif
  142. if (invalidatepage)
  143. (*invalidatepage)(page, offset, length);
  144. }
  145. /*
  146. * If truncate cannot remove the fs-private metadata from the page, the page
  147. * becomes orphaned. It will be left on the LRU and may even be mapped into
  148. * user pagetables if we're racing with filemap_fault().
  149. *
  150. * We need to bale out if page->mapping is no longer equal to the original
  151. * mapping. This happens a) when the VM reclaimed the page while we waited on
  152. * its lock, b) when a concurrent invalidate_mapping_pages got there first and
  153. * c) when tmpfs swizzles a page between a tmpfs inode and swapper_space.
  154. */
  155. static void
  156. truncate_cleanup_page(struct address_space *mapping, struct page *page)
  157. {
  158. if (page_mapped(page)) {
  159. pgoff_t nr = PageTransHuge(page) ? HPAGE_PMD_NR : 1;
  160. unmap_mapping_pages(mapping, page->index, nr, false);
  161. }
  162. if (page_has_private(page))
  163. do_invalidatepage(page, 0, PAGE_SIZE);
  164. /*
  165. * Some filesystems seem to re-dirty the page even after
  166. * the VM has canceled the dirty bit (eg ext3 journaling).
  167. * Hence dirty accounting check is placed after invalidation.
  168. */
  169. cancel_dirty_page(page);
  170. ClearPageMappedToDisk(page);
  171. }
  172. /*
  173. * This is for invalidate_mapping_pages(). That function can be called at
  174. * any time, and is not supposed to throw away dirty pages. But pages can
  175. * be marked dirty at any time too, so use remove_mapping which safely
  176. * discards clean, unused pages.
  177. *
  178. * Returns non-zero if the page was successfully invalidated.
  179. */
  180. static int
  181. invalidate_complete_page(struct address_space *mapping, struct page *page)
  182. {
  183. int ret;
  184. if (page->mapping != mapping)
  185. return 0;
  186. if (page_has_private(page) && !try_to_release_page(page, 0))
  187. return 0;
  188. ret = remove_mapping(mapping, page);
  189. return ret;
  190. }
  191. int truncate_inode_page(struct address_space *mapping, struct page *page)
  192. {
  193. VM_BUG_ON_PAGE(PageTail(page), page);
  194. if (page->mapping != mapping)
  195. return -EIO;
  196. truncate_cleanup_page(mapping, page);
  197. delete_from_page_cache(page);
  198. return 0;
  199. }
  200. /*
  201. * Used to get rid of pages on hardware memory corruption.
  202. */
  203. int generic_error_remove_page(struct address_space *mapping, struct page *page)
  204. {
  205. if (!mapping)
  206. return -EINVAL;
  207. /*
  208. * Only punch for normal data pages for now.
  209. * Handling other types like directories would need more auditing.
  210. */
  211. if (!S_ISREG(mapping->host->i_mode))
  212. return -EIO;
  213. return truncate_inode_page(mapping, page);
  214. }
  215. EXPORT_SYMBOL(generic_error_remove_page);
  216. /*
  217. * Safely invalidate one page from its pagecache mapping.
  218. * It only drops clean, unused pages. The page must be locked.
  219. *
  220. * Returns 1 if the page is successfully invalidated, otherwise 0.
  221. */
  222. int invalidate_inode_page(struct page *page)
  223. {
  224. struct address_space *mapping = page_mapping(page);
  225. if (!mapping)
  226. return 0;
  227. if (PageDirty(page) || PageWriteback(page))
  228. return 0;
  229. if (page_mapped(page))
  230. return 0;
  231. return invalidate_complete_page(mapping, page);
  232. }
  233. /**
  234. * truncate_inode_pages_range - truncate range of pages specified by start & end byte offsets
  235. * @mapping: mapping to truncate
  236. * @lstart: offset from which to truncate
  237. * @lend: offset to which to truncate (inclusive)
  238. *
  239. * Truncate the page cache, removing the pages that are between
  240. * specified offsets (and zeroing out partial pages
  241. * if lstart or lend + 1 is not page aligned).
  242. *
  243. * Truncate takes two passes - the first pass is nonblocking. It will not
  244. * block on page locks and it will not block on writeback. The second pass
  245. * will wait. This is to prevent as much IO as possible in the affected region.
  246. * The first pass will remove most pages, so the search cost of the second pass
  247. * is low.
  248. *
  249. * We pass down the cache-hot hint to the page freeing code. Even if the
  250. * mapping is large, it is probably the case that the final pages are the most
  251. * recently touched, and freeing happens in ascending file offset order.
  252. *
  253. * Note that since ->invalidatepage() accepts range to invalidate
  254. * truncate_inode_pages_range is able to handle cases where lend + 1 is not
  255. * page aligned properly.
  256. */
  257. void truncate_inode_pages_range(struct address_space *mapping,
  258. loff_t lstart, loff_t lend)
  259. {
  260. pgoff_t start; /* inclusive */
  261. pgoff_t end; /* exclusive */
  262. unsigned int partial_start; /* inclusive */
  263. unsigned int partial_end; /* exclusive */
  264. struct pagevec pvec;
  265. pgoff_t indices[PAGEVEC_SIZE];
  266. pgoff_t index;
  267. int i;
  268. if (mapping->nrpages == 0 && mapping->nrexceptional == 0)
  269. goto out;
  270. /* Offsets within partial pages */
  271. partial_start = lstart & (PAGE_SIZE - 1);
  272. partial_end = (lend + 1) & (PAGE_SIZE - 1);
  273. /*
  274. * 'start' and 'end' always covers the range of pages to be fully
  275. * truncated. Partial pages are covered with 'partial_start' at the
  276. * start of the range and 'partial_end' at the end of the range.
  277. * Note that 'end' is exclusive while 'lend' is inclusive.
  278. */
  279. start = (lstart + PAGE_SIZE - 1) >> PAGE_SHIFT;
  280. if (lend == -1)
  281. /*
  282. * lend == -1 indicates end-of-file so we have to set 'end'
  283. * to the highest possible pgoff_t and since the type is
  284. * unsigned we're using -1.
  285. */
  286. end = -1;
  287. else
  288. end = (lend + 1) >> PAGE_SHIFT;
  289. pagevec_init(&pvec);
  290. index = start;
  291. while (index < end && pagevec_lookup_entries(&pvec, mapping, index,
  292. min(end - index, (pgoff_t)PAGEVEC_SIZE),
  293. indices)) {
  294. /*
  295. * Pagevec array has exceptional entries and we may also fail
  296. * to lock some pages. So we store pages that can be deleted
  297. * in a new pagevec.
  298. */
  299. struct pagevec locked_pvec;
  300. pagevec_init(&locked_pvec);
  301. for (i = 0; i < pagevec_count(&pvec); i++) {
  302. struct page *page = pvec.pages[i];
  303. /* We rely upon deletion not changing page->index */
  304. index = indices[i];
  305. if (index >= end)
  306. break;
  307. if (xa_is_value(page))
  308. continue;
  309. if (!trylock_page(page))
  310. continue;
  311. WARN_ON(page_to_index(page) != index);
  312. if (PageWriteback(page)) {
  313. unlock_page(page);
  314. continue;
  315. }
  316. if (page->mapping != mapping) {
  317. unlock_page(page);
  318. continue;
  319. }
  320. pagevec_add(&locked_pvec, page);
  321. }
  322. for (i = 0; i < pagevec_count(&locked_pvec); i++)
  323. truncate_cleanup_page(mapping, locked_pvec.pages[i]);
  324. delete_from_page_cache_batch(mapping, &locked_pvec);
  325. for (i = 0; i < pagevec_count(&locked_pvec); i++)
  326. unlock_page(locked_pvec.pages[i]);
  327. truncate_exceptional_pvec_entries(mapping, &pvec, indices, end);
  328. pagevec_release(&pvec);
  329. cond_resched();
  330. index++;
  331. }
  332. if (partial_start) {
  333. struct page *page = find_lock_page(mapping, start - 1);
  334. if (page) {
  335. unsigned int top = PAGE_SIZE;
  336. if (start > end) {
  337. /* Truncation within a single page */
  338. top = partial_end;
  339. partial_end = 0;
  340. }
  341. wait_on_page_writeback(page);
  342. zero_user_segment(page, partial_start, top);
  343. cleancache_invalidate_page(mapping, page);
  344. if (page_has_private(page))
  345. do_invalidatepage(page, partial_start,
  346. top - partial_start);
  347. unlock_page(page);
  348. put_page(page);
  349. }
  350. }
  351. if (partial_end) {
  352. struct page *page = find_lock_page(mapping, end);
  353. if (page) {
  354. wait_on_page_writeback(page);
  355. zero_user_segment(page, 0, partial_end);
  356. cleancache_invalidate_page(mapping, page);
  357. if (page_has_private(page))
  358. do_invalidatepage(page, 0,
  359. partial_end);
  360. unlock_page(page);
  361. put_page(page);
  362. }
  363. }
  364. /*
  365. * If the truncation happened within a single page no pages
  366. * will be released, just zeroed, so we can bail out now.
  367. */
  368. if (start >= end)
  369. goto out;
  370. index = start;
  371. for ( ; ; ) {
  372. cond_resched();
  373. if (!pagevec_lookup_entries(&pvec, mapping, index,
  374. min(end - index, (pgoff_t)PAGEVEC_SIZE), indices)) {
  375. /* If all gone from start onwards, we're done */
  376. if (index == start)
  377. break;
  378. /* Otherwise restart to make sure all gone */
  379. index = start;
  380. continue;
  381. }
  382. if (index == start && indices[0] >= end) {
  383. /* All gone out of hole to be punched, we're done */
  384. pagevec_remove_exceptionals(&pvec);
  385. pagevec_release(&pvec);
  386. break;
  387. }
  388. for (i = 0; i < pagevec_count(&pvec); i++) {
  389. struct page *page = pvec.pages[i];
  390. /* We rely upon deletion not changing page->index */
  391. index = indices[i];
  392. if (index >= end) {
  393. /* Restart punch to make sure all gone */
  394. index = start - 1;
  395. break;
  396. }
  397. if (xa_is_value(page))
  398. continue;
  399. lock_page(page);
  400. WARN_ON(page_to_index(page) != index);
  401. wait_on_page_writeback(page);
  402. truncate_inode_page(mapping, page);
  403. unlock_page(page);
  404. }
  405. truncate_exceptional_pvec_entries(mapping, &pvec, indices, end);
  406. pagevec_release(&pvec);
  407. index++;
  408. }
  409. out:
  410. cleancache_invalidate_inode(mapping);
  411. }
  412. EXPORT_SYMBOL(truncate_inode_pages_range);
  413. /**
  414. * truncate_inode_pages - truncate *all* the pages from an offset
  415. * @mapping: mapping to truncate
  416. * @lstart: offset from which to truncate
  417. *
  418. * Called under (and serialised by) inode->i_mutex.
  419. *
  420. * Note: When this function returns, there can be a page in the process of
  421. * deletion (inside __delete_from_page_cache()) in the specified range. Thus
  422. * mapping->nrpages can be non-zero when this function returns even after
  423. * truncation of the whole mapping.
  424. */
  425. void truncate_inode_pages(struct address_space *mapping, loff_t lstart)
  426. {
  427. truncate_inode_pages_range(mapping, lstart, (loff_t)-1);
  428. }
  429. EXPORT_SYMBOL(truncate_inode_pages);
  430. /**
  431. * truncate_inode_pages_final - truncate *all* pages before inode dies
  432. * @mapping: mapping to truncate
  433. *
  434. * Called under (and serialized by) inode->i_mutex.
  435. *
  436. * Filesystems have to use this in the .evict_inode path to inform the
  437. * VM that this is the final truncate and the inode is going away.
  438. */
  439. void truncate_inode_pages_final(struct address_space *mapping)
  440. {
  441. unsigned long nrexceptional;
  442. unsigned long nrpages;
  443. /*
  444. * Page reclaim can not participate in regular inode lifetime
  445. * management (can't call iput()) and thus can race with the
  446. * inode teardown. Tell it when the address space is exiting,
  447. * so that it does not install eviction information after the
  448. * final truncate has begun.
  449. */
  450. mapping_set_exiting(mapping);
  451. /*
  452. * When reclaim installs eviction entries, it increases
  453. * nrexceptional first, then decreases nrpages. Make sure we see
  454. * this in the right order or we might miss an entry.
  455. */
  456. nrpages = mapping->nrpages;
  457. smp_rmb();
  458. nrexceptional = mapping->nrexceptional;
  459. if (nrpages || nrexceptional) {
  460. /*
  461. * As truncation uses a lockless tree lookup, cycle
  462. * the tree lock to make sure any ongoing tree
  463. * modification that does not see AS_EXITING is
  464. * completed before starting the final truncate.
  465. */
  466. xa_lock_irq(&mapping->i_pages);
  467. xa_unlock_irq(&mapping->i_pages);
  468. truncate_inode_pages(mapping, 0);
  469. }
  470. }
  471. EXPORT_SYMBOL(truncate_inode_pages_final);
  472. /**
  473. * invalidate_mapping_pages - Invalidate all the unlocked pages of one inode
  474. * @mapping: the address_space which holds the pages to invalidate
  475. * @start: the offset 'from' which to invalidate
  476. * @end: the offset 'to' which to invalidate (inclusive)
  477. *
  478. * This function only removes the unlocked pages, if you want to
  479. * remove all the pages of one inode, you must call truncate_inode_pages.
  480. *
  481. * invalidate_mapping_pages() will not block on IO activity. It will not
  482. * invalidate pages which are dirty, locked, under writeback or mapped into
  483. * pagetables.
  484. */
  485. unsigned long invalidate_mapping_pages(struct address_space *mapping,
  486. pgoff_t start, pgoff_t end)
  487. {
  488. pgoff_t indices[PAGEVEC_SIZE];
  489. struct pagevec pvec;
  490. pgoff_t index = start;
  491. unsigned long ret;
  492. unsigned long count = 0;
  493. int i;
  494. pagevec_init(&pvec);
  495. while (index <= end && pagevec_lookup_entries(&pvec, mapping, index,
  496. min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1,
  497. indices)) {
  498. for (i = 0; i < pagevec_count(&pvec); i++) {
  499. struct page *page = pvec.pages[i];
  500. /* We rely upon deletion not changing page->index */
  501. index = indices[i];
  502. if (index > end)
  503. break;
  504. if (xa_is_value(page)) {
  505. invalidate_exceptional_entry(mapping, index,
  506. page);
  507. continue;
  508. }
  509. if (!trylock_page(page))
  510. continue;
  511. WARN_ON(page_to_index(page) != index);
  512. /* Middle of THP: skip */
  513. if (PageTransTail(page)) {
  514. unlock_page(page);
  515. continue;
  516. } else if (PageTransHuge(page)) {
  517. index += HPAGE_PMD_NR - 1;
  518. i += HPAGE_PMD_NR - 1;
  519. /*
  520. * 'end' is in the middle of THP. Don't
  521. * invalidate the page as the part outside of
  522. * 'end' could be still useful.
  523. */
  524. if (index > end) {
  525. unlock_page(page);
  526. continue;
  527. }
  528. }
  529. ret = invalidate_inode_page(page);
  530. unlock_page(page);
  531. /*
  532. * Invalidation is a hint that the page is no longer
  533. * of interest and try to speed up its reclaim.
  534. */
  535. if (!ret)
  536. deactivate_file_page(page);
  537. count += ret;
  538. }
  539. pagevec_remove_exceptionals(&pvec);
  540. pagevec_release(&pvec);
  541. cond_resched();
  542. index++;
  543. }
  544. return count;
  545. }
  546. EXPORT_SYMBOL(invalidate_mapping_pages);
  547. /*
  548. * This is like invalidate_complete_page(), except it ignores the page's
  549. * refcount. We do this because invalidate_inode_pages2() needs stronger
  550. * invalidation guarantees, and cannot afford to leave pages behind because
  551. * shrink_page_list() has a temp ref on them, or because they're transiently
  552. * sitting in the lru_cache_add() pagevecs.
  553. */
  554. static int
  555. invalidate_complete_page2(struct address_space *mapping, struct page *page)
  556. {
  557. unsigned long flags;
  558. if (page->mapping != mapping)
  559. return 0;
  560. if (page_has_private(page) && !try_to_release_page(page, GFP_KERNEL))
  561. return 0;
  562. xa_lock_irqsave(&mapping->i_pages, flags);
  563. if (PageDirty(page))
  564. goto failed;
  565. BUG_ON(page_has_private(page));
  566. __delete_from_page_cache(page, NULL);
  567. xa_unlock_irqrestore(&mapping->i_pages, flags);
  568. if (mapping->a_ops->freepage)
  569. mapping->a_ops->freepage(page);
  570. put_page(page); /* pagecache ref */
  571. return 1;
  572. failed:
  573. xa_unlock_irqrestore(&mapping->i_pages, flags);
  574. return 0;
  575. }
  576. static int do_launder_page(struct address_space *mapping, struct page *page)
  577. {
  578. if (!PageDirty(page))
  579. return 0;
  580. if (page->mapping != mapping || mapping->a_ops->launder_page == NULL)
  581. return 0;
  582. return mapping->a_ops->launder_page(page);
  583. }
  584. /**
  585. * invalidate_inode_pages2_range - remove range of pages from an address_space
  586. * @mapping: the address_space
  587. * @start: the page offset 'from' which to invalidate
  588. * @end: the page offset 'to' which to invalidate (inclusive)
  589. *
  590. * Any pages which are found to be mapped into pagetables are unmapped prior to
  591. * invalidation.
  592. *
  593. * Returns -EBUSY if any pages could not be invalidated.
  594. */
  595. int invalidate_inode_pages2_range(struct address_space *mapping,
  596. pgoff_t start, pgoff_t end)
  597. {
  598. pgoff_t indices[PAGEVEC_SIZE];
  599. struct pagevec pvec;
  600. pgoff_t index;
  601. int i;
  602. int ret = 0;
  603. int ret2 = 0;
  604. int did_range_unmap = 0;
  605. if (mapping->nrpages == 0 && mapping->nrexceptional == 0)
  606. goto out;
  607. pagevec_init(&pvec);
  608. index = start;
  609. while (index <= end && pagevec_lookup_entries(&pvec, mapping, index,
  610. min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1,
  611. indices)) {
  612. for (i = 0; i < pagevec_count(&pvec); i++) {
  613. struct page *page = pvec.pages[i];
  614. /* We rely upon deletion not changing page->index */
  615. index = indices[i];
  616. if (index > end)
  617. break;
  618. if (xa_is_value(page)) {
  619. if (!invalidate_exceptional_entry2(mapping,
  620. index, page))
  621. ret = -EBUSY;
  622. continue;
  623. }
  624. lock_page(page);
  625. WARN_ON(page_to_index(page) != index);
  626. if (page->mapping != mapping) {
  627. unlock_page(page);
  628. continue;
  629. }
  630. wait_on_page_writeback(page);
  631. if (page_mapped(page)) {
  632. if (!did_range_unmap) {
  633. /*
  634. * Zap the rest of the file in one hit.
  635. */
  636. unmap_mapping_pages(mapping, index,
  637. (1 + end - index), false);
  638. did_range_unmap = 1;
  639. } else {
  640. /*
  641. * Just zap this page
  642. */
  643. unmap_mapping_pages(mapping, index,
  644. 1, false);
  645. }
  646. }
  647. BUG_ON(page_mapped(page));
  648. ret2 = do_launder_page(mapping, page);
  649. if (ret2 == 0) {
  650. if (!invalidate_complete_page2(mapping, page))
  651. ret2 = -EBUSY;
  652. }
  653. if (ret2 < 0)
  654. ret = ret2;
  655. unlock_page(page);
  656. }
  657. pagevec_remove_exceptionals(&pvec);
  658. pagevec_release(&pvec);
  659. cond_resched();
  660. index++;
  661. }
  662. /*
  663. * For DAX we invalidate page tables after invalidating page cache. We
  664. * could invalidate page tables while invalidating each entry however
  665. * that would be expensive. And doing range unmapping before doesn't
  666. * work as we have no cheap way to find whether page cache entry didn't
  667. * get remapped later.
  668. */
  669. if (dax_mapping(mapping)) {
  670. unmap_mapping_pages(mapping, start, end - start + 1, false);
  671. }
  672. out:
  673. cleancache_invalidate_inode(mapping);
  674. return ret;
  675. }
  676. EXPORT_SYMBOL_GPL(invalidate_inode_pages2_range);
  677. /**
  678. * invalidate_inode_pages2 - remove all pages from an address_space
  679. * @mapping: the address_space
  680. *
  681. * Any pages which are found to be mapped into pagetables are unmapped prior to
  682. * invalidation.
  683. *
  684. * Returns -EBUSY if any pages could not be invalidated.
  685. */
  686. int invalidate_inode_pages2(struct address_space *mapping)
  687. {
  688. return invalidate_inode_pages2_range(mapping, 0, -1);
  689. }
  690. EXPORT_SYMBOL_GPL(invalidate_inode_pages2);
  691. /**
  692. * truncate_pagecache - unmap and remove pagecache that has been truncated
  693. * @inode: inode
  694. * @newsize: new file size
  695. *
  696. * inode's new i_size must already be written before truncate_pagecache
  697. * is called.
  698. *
  699. * This function should typically be called before the filesystem
  700. * releases resources associated with the freed range (eg. deallocates
  701. * blocks). This way, pagecache will always stay logically coherent
  702. * with on-disk format, and the filesystem would not have to deal with
  703. * situations such as writepage being called for a page that has already
  704. * had its underlying blocks deallocated.
  705. */
  706. void truncate_pagecache(struct inode *inode, loff_t newsize)
  707. {
  708. struct address_space *mapping = inode->i_mapping;
  709. loff_t holebegin = round_up(newsize, PAGE_SIZE);
  710. /*
  711. * unmap_mapping_range is called twice, first simply for
  712. * efficiency so that truncate_inode_pages does fewer
  713. * single-page unmaps. However after this first call, and
  714. * before truncate_inode_pages finishes, it is possible for
  715. * private pages to be COWed, which remain after
  716. * truncate_inode_pages finishes, hence the second
  717. * unmap_mapping_range call must be made for correctness.
  718. */
  719. unmap_mapping_range(mapping, holebegin, 0, 1);
  720. truncate_inode_pages(mapping, newsize);
  721. unmap_mapping_range(mapping, holebegin, 0, 1);
  722. }
  723. EXPORT_SYMBOL(truncate_pagecache);
  724. /**
  725. * truncate_setsize - update inode and pagecache for a new file size
  726. * @inode: inode
  727. * @newsize: new file size
  728. *
  729. * truncate_setsize updates i_size and performs pagecache truncation (if
  730. * necessary) to @newsize. It will be typically be called from the filesystem's
  731. * setattr function when ATTR_SIZE is passed in.
  732. *
  733. * Must be called with a lock serializing truncates and writes (generally
  734. * i_mutex but e.g. xfs uses a different lock) and before all filesystem
  735. * specific block truncation has been performed.
  736. */
  737. void truncate_setsize(struct inode *inode, loff_t newsize)
  738. {
  739. loff_t oldsize = inode->i_size;
  740. i_size_write(inode, newsize);
  741. if (newsize > oldsize)
  742. pagecache_isize_extended(inode, oldsize, newsize);
  743. truncate_pagecache(inode, newsize);
  744. }
  745. EXPORT_SYMBOL(truncate_setsize);
  746. /**
  747. * pagecache_isize_extended - update pagecache after extension of i_size
  748. * @inode: inode for which i_size was extended
  749. * @from: original inode size
  750. * @to: new inode size
  751. *
  752. * Handle extension of inode size either caused by extending truncate or by
  753. * write starting after current i_size. We mark the page straddling current
  754. * i_size RO so that page_mkwrite() is called on the nearest write access to
  755. * the page. This way filesystem can be sure that page_mkwrite() is called on
  756. * the page before user writes to the page via mmap after the i_size has been
  757. * changed.
  758. *
  759. * The function must be called after i_size is updated so that page fault
  760. * coming after we unlock the page will already see the new i_size.
  761. * The function must be called while we still hold i_mutex - this not only
  762. * makes sure i_size is stable but also that userspace cannot observe new
  763. * i_size value before we are prepared to store mmap writes at new inode size.
  764. */
  765. void pagecache_isize_extended(struct inode *inode, loff_t from, loff_t to)
  766. {
  767. int bsize = i_blocksize(inode);
  768. loff_t rounded_from;
  769. struct page *page;
  770. pgoff_t index;
  771. WARN_ON(to > inode->i_size);
  772. if (from >= to || bsize == PAGE_SIZE)
  773. return;
  774. /* Page straddling @from will not have any hole block created? */
  775. rounded_from = round_up(from, bsize);
  776. if (to <= rounded_from || !(rounded_from & (PAGE_SIZE - 1)))
  777. return;
  778. index = from >> PAGE_SHIFT;
  779. page = find_lock_page(inode->i_mapping, index);
  780. /* Page not cached? Nothing to do */
  781. if (!page)
  782. return;
  783. /*
  784. * See clear_page_dirty_for_io() for details why set_page_dirty()
  785. * is needed.
  786. */
  787. if (page_mkclean(page))
  788. set_page_dirty(page);
  789. unlock_page(page);
  790. put_page(page);
  791. }
  792. EXPORT_SYMBOL(pagecache_isize_extended);
  793. /**
  794. * truncate_pagecache_range - unmap and remove pagecache that is hole-punched
  795. * @inode: inode
  796. * @lstart: offset of beginning of hole
  797. * @lend: offset of last byte of hole
  798. *
  799. * This function should typically be called before the filesystem
  800. * releases resources associated with the freed range (eg. deallocates
  801. * blocks). This way, pagecache will always stay logically coherent
  802. * with on-disk format, and the filesystem would not have to deal with
  803. * situations such as writepage being called for a page that has already
  804. * had its underlying blocks deallocated.
  805. */
  806. void truncate_pagecache_range(struct inode *inode, loff_t lstart, loff_t lend)
  807. {
  808. struct address_space *mapping = inode->i_mapping;
  809. loff_t unmap_start = round_up(lstart, PAGE_SIZE);
  810. loff_t unmap_end = round_down(1 + lend, PAGE_SIZE) - 1;
  811. /*
  812. * This rounding is currently just for example: unmap_mapping_range
  813. * expands its hole outwards, whereas we want it to contract the hole
  814. * inwards. However, existing callers of truncate_pagecache_range are
  815. * doing their own page rounding first. Note that unmap_mapping_range
  816. * allows holelen 0 for all, and we allow lend -1 for end of file.
  817. */
  818. /*
  819. * Unlike in truncate_pagecache, unmap_mapping_range is called only
  820. * once (before truncating pagecache), and without "even_cows" flag:
  821. * hole-punching should not remove private COWed pages from the hole.
  822. */
  823. if ((u64)unmap_end > (u64)unmap_start)
  824. unmap_mapping_range(mapping, unmap_start,
  825. 1 + unmap_end - unmap_start, 0);
  826. truncate_inode_pages_range(mapping, lstart, lend);
  827. }
  828. EXPORT_SYMBOL(truncate_pagecache_range);