dax.c 31 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123
  1. /*
  2. * fs/dax.c - Direct Access filesystem code
  3. * Copyright (c) 2013-2014 Intel Corporation
  4. * Author: Matthew Wilcox <matthew.r.wilcox@intel.com>
  5. * Author: Ross Zwisler <ross.zwisler@linux.intel.com>
  6. *
  7. * This program is free software; you can redistribute it and/or modify it
  8. * under the terms and conditions of the GNU General Public License,
  9. * version 2, as published by the Free Software Foundation.
  10. *
  11. * This program is distributed in the hope it will be useful, but WITHOUT
  12. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  13. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  14. * more details.
  15. */
  16. #include <linux/atomic.h>
  17. #include <linux/blkdev.h>
  18. #include <linux/buffer_head.h>
  19. #include <linux/dax.h>
  20. #include <linux/fs.h>
  21. #include <linux/genhd.h>
  22. #include <linux/highmem.h>
  23. #include <linux/memcontrol.h>
  24. #include <linux/mm.h>
  25. #include <linux/mutex.h>
  26. #include <linux/pagevec.h>
  27. #include <linux/pmem.h>
  28. #include <linux/sched.h>
  29. #include <linux/uio.h>
  30. #include <linux/vmstat.h>
  31. #include <linux/pfn_t.h>
  32. #include <linux/sizes.h>
  33. #define RADIX_DAX_MASK 0xf
  34. #define RADIX_DAX_SHIFT 4
  35. #define RADIX_DAX_PTE (0x4 | RADIX_TREE_EXCEPTIONAL_ENTRY)
  36. #define RADIX_DAX_PMD (0x8 | RADIX_TREE_EXCEPTIONAL_ENTRY)
  37. #define RADIX_DAX_TYPE(entry) ((unsigned long)entry & RADIX_DAX_MASK)
  38. #define RADIX_DAX_SECTOR(entry) (((unsigned long)entry >> RADIX_DAX_SHIFT))
  39. #define RADIX_DAX_ENTRY(sector, pmd) ((void *)((unsigned long)sector << \
  40. RADIX_DAX_SHIFT | (pmd ? RADIX_DAX_PMD : RADIX_DAX_PTE)))
  41. static long dax_map_atomic(struct block_device *bdev, struct blk_dax_ctl *dax)
  42. {
  43. struct request_queue *q = bdev->bd_queue;
  44. long rc = -EIO;
  45. dax->addr = (void __pmem *) ERR_PTR(-EIO);
  46. if (blk_queue_enter(q, true) != 0)
  47. return rc;
  48. rc = bdev_direct_access(bdev, dax);
  49. if (rc < 0) {
  50. dax->addr = (void __pmem *) ERR_PTR(rc);
  51. blk_queue_exit(q);
  52. return rc;
  53. }
  54. return rc;
  55. }
  56. static void dax_unmap_atomic(struct block_device *bdev,
  57. const struct blk_dax_ctl *dax)
  58. {
  59. if (IS_ERR(dax->addr))
  60. return;
  61. blk_queue_exit(bdev->bd_queue);
  62. }
  63. struct page *read_dax_sector(struct block_device *bdev, sector_t n)
  64. {
  65. struct page *page = alloc_pages(GFP_KERNEL, 0);
  66. struct blk_dax_ctl dax = {
  67. .size = PAGE_SIZE,
  68. .sector = n & ~((((int) PAGE_SIZE) / 512) - 1),
  69. };
  70. long rc;
  71. if (!page)
  72. return ERR_PTR(-ENOMEM);
  73. rc = dax_map_atomic(bdev, &dax);
  74. if (rc < 0)
  75. return ERR_PTR(rc);
  76. memcpy_from_pmem(page_address(page), dax.addr, PAGE_SIZE);
  77. dax_unmap_atomic(bdev, &dax);
  78. return page;
  79. }
  80. /*
  81. * dax_clear_sectors() is called from within transaction context from XFS,
  82. * and hence this means the stack from this point must follow GFP_NOFS
  83. * semantics for all operations.
  84. */
  85. int dax_clear_sectors(struct block_device *bdev, sector_t _sector, long _size)
  86. {
  87. struct blk_dax_ctl dax = {
  88. .sector = _sector,
  89. .size = _size,
  90. };
  91. might_sleep();
  92. do {
  93. long count, sz;
  94. count = dax_map_atomic(bdev, &dax);
  95. if (count < 0)
  96. return count;
  97. sz = min_t(long, count, SZ_128K);
  98. clear_pmem(dax.addr, sz);
  99. dax.size -= sz;
  100. dax.sector += sz / 512;
  101. dax_unmap_atomic(bdev, &dax);
  102. cond_resched();
  103. } while (dax.size);
  104. wmb_pmem();
  105. return 0;
  106. }
  107. EXPORT_SYMBOL_GPL(dax_clear_sectors);
  108. static bool buffer_written(struct buffer_head *bh)
  109. {
  110. return buffer_mapped(bh) && !buffer_unwritten(bh);
  111. }
  112. /*
  113. * When ext4 encounters a hole, it returns without modifying the buffer_head
  114. * which means that we can't trust b_size. To cope with this, we set b_state
  115. * to 0 before calling get_block and, if any bit is set, we know we can trust
  116. * b_size. Unfortunate, really, since ext4 knows precisely how long a hole is
  117. * and would save us time calling get_block repeatedly.
  118. */
  119. static bool buffer_size_valid(struct buffer_head *bh)
  120. {
  121. return bh->b_state != 0;
  122. }
  123. static sector_t to_sector(const struct buffer_head *bh,
  124. const struct inode *inode)
  125. {
  126. sector_t sector = bh->b_blocknr << (inode->i_blkbits - 9);
  127. return sector;
  128. }
  129. static ssize_t dax_io(struct inode *inode, struct iov_iter *iter,
  130. loff_t start, loff_t end, get_block_t get_block,
  131. struct buffer_head *bh)
  132. {
  133. loff_t pos = start, max = start, bh_max = start;
  134. bool hole = false, need_wmb = false;
  135. struct block_device *bdev = NULL;
  136. int rw = iov_iter_rw(iter), rc;
  137. long map_len = 0;
  138. struct blk_dax_ctl dax = {
  139. .addr = (void __pmem *) ERR_PTR(-EIO),
  140. };
  141. unsigned blkbits = inode->i_blkbits;
  142. sector_t file_blks = (i_size_read(inode) + (1 << blkbits) - 1)
  143. >> blkbits;
  144. if (rw == READ)
  145. end = min(end, i_size_read(inode));
  146. while (pos < end) {
  147. size_t len;
  148. if (pos == max) {
  149. long page = pos >> PAGE_SHIFT;
  150. sector_t block = page << (PAGE_SHIFT - blkbits);
  151. unsigned first = pos - (block << blkbits);
  152. long size;
  153. if (pos == bh_max) {
  154. bh->b_size = PAGE_ALIGN(end - pos);
  155. bh->b_state = 0;
  156. rc = get_block(inode, block, bh, rw == WRITE);
  157. if (rc)
  158. break;
  159. if (!buffer_size_valid(bh))
  160. bh->b_size = 1 << blkbits;
  161. bh_max = pos - first + bh->b_size;
  162. bdev = bh->b_bdev;
  163. /*
  164. * We allow uninitialized buffers for writes
  165. * beyond EOF as those cannot race with faults
  166. */
  167. WARN_ON_ONCE(
  168. (buffer_new(bh) && block < file_blks) ||
  169. (rw == WRITE && buffer_unwritten(bh)));
  170. } else {
  171. unsigned done = bh->b_size -
  172. (bh_max - (pos - first));
  173. bh->b_blocknr += done >> blkbits;
  174. bh->b_size -= done;
  175. }
  176. hole = rw == READ && !buffer_written(bh);
  177. if (hole) {
  178. size = bh->b_size - first;
  179. } else {
  180. dax_unmap_atomic(bdev, &dax);
  181. dax.sector = to_sector(bh, inode);
  182. dax.size = bh->b_size;
  183. map_len = dax_map_atomic(bdev, &dax);
  184. if (map_len < 0) {
  185. rc = map_len;
  186. break;
  187. }
  188. dax.addr += first;
  189. size = map_len - first;
  190. }
  191. max = min(pos + size, end);
  192. }
  193. if (iov_iter_rw(iter) == WRITE) {
  194. len = copy_from_iter_pmem(dax.addr, max - pos, iter);
  195. need_wmb = true;
  196. } else if (!hole)
  197. len = copy_to_iter((void __force *) dax.addr, max - pos,
  198. iter);
  199. else
  200. len = iov_iter_zero(max - pos, iter);
  201. if (!len) {
  202. rc = -EFAULT;
  203. break;
  204. }
  205. pos += len;
  206. if (!IS_ERR(dax.addr))
  207. dax.addr += len;
  208. }
  209. if (need_wmb)
  210. wmb_pmem();
  211. dax_unmap_atomic(bdev, &dax);
  212. return (pos == start) ? rc : pos - start;
  213. }
  214. /**
  215. * dax_do_io - Perform I/O to a DAX file
  216. * @iocb: The control block for this I/O
  217. * @inode: The file which the I/O is directed at
  218. * @iter: The addresses to do I/O from or to
  219. * @pos: The file offset where the I/O starts
  220. * @get_block: The filesystem method used to translate file offsets to blocks
  221. * @end_io: A filesystem callback for I/O completion
  222. * @flags: See below
  223. *
  224. * This function uses the same locking scheme as do_blockdev_direct_IO:
  225. * If @flags has DIO_LOCKING set, we assume that the i_mutex is held by the
  226. * caller for writes. For reads, we take and release the i_mutex ourselves.
  227. * If DIO_LOCKING is not set, the filesystem takes care of its own locking.
  228. * As with do_blockdev_direct_IO(), we increment i_dio_count while the I/O
  229. * is in progress.
  230. */
  231. ssize_t dax_do_io(struct kiocb *iocb, struct inode *inode,
  232. struct iov_iter *iter, loff_t pos, get_block_t get_block,
  233. dio_iodone_t end_io, int flags)
  234. {
  235. struct buffer_head bh;
  236. ssize_t retval = -EINVAL;
  237. loff_t end = pos + iov_iter_count(iter);
  238. memset(&bh, 0, sizeof(bh));
  239. bh.b_bdev = inode->i_sb->s_bdev;
  240. if ((flags & DIO_LOCKING) && iov_iter_rw(iter) == READ) {
  241. struct address_space *mapping = inode->i_mapping;
  242. inode_lock(inode);
  243. retval = filemap_write_and_wait_range(mapping, pos, end - 1);
  244. if (retval) {
  245. inode_unlock(inode);
  246. goto out;
  247. }
  248. }
  249. /* Protects against truncate */
  250. if (!(flags & DIO_SKIP_DIO_COUNT))
  251. inode_dio_begin(inode);
  252. retval = dax_io(inode, iter, pos, end, get_block, &bh);
  253. if ((flags & DIO_LOCKING) && iov_iter_rw(iter) == READ)
  254. inode_unlock(inode);
  255. if (end_io) {
  256. int err;
  257. err = end_io(iocb, pos, retval, bh.b_private);
  258. if (err)
  259. retval = err;
  260. }
  261. if (!(flags & DIO_SKIP_DIO_COUNT))
  262. inode_dio_end(inode);
  263. out:
  264. return retval;
  265. }
  266. EXPORT_SYMBOL_GPL(dax_do_io);
  267. /*
  268. * The user has performed a load from a hole in the file. Allocating
  269. * a new page in the file would cause excessive storage usage for
  270. * workloads with sparse files. We allocate a page cache page instead.
  271. * We'll kick it out of the page cache if it's ever written to,
  272. * otherwise it will simply fall out of the page cache under memory
  273. * pressure without ever having been dirtied.
  274. */
  275. static int dax_load_hole(struct address_space *mapping, struct page *page,
  276. struct vm_fault *vmf)
  277. {
  278. unsigned long size;
  279. struct inode *inode = mapping->host;
  280. if (!page)
  281. page = find_or_create_page(mapping, vmf->pgoff,
  282. GFP_KERNEL | __GFP_ZERO);
  283. if (!page)
  284. return VM_FAULT_OOM;
  285. /* Recheck i_size under page lock to avoid truncate race */
  286. size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT;
  287. if (vmf->pgoff >= size) {
  288. unlock_page(page);
  289. put_page(page);
  290. return VM_FAULT_SIGBUS;
  291. }
  292. vmf->page = page;
  293. return VM_FAULT_LOCKED;
  294. }
  295. static int copy_user_bh(struct page *to, struct inode *inode,
  296. struct buffer_head *bh, unsigned long vaddr)
  297. {
  298. struct blk_dax_ctl dax = {
  299. .sector = to_sector(bh, inode),
  300. .size = bh->b_size,
  301. };
  302. struct block_device *bdev = bh->b_bdev;
  303. void *vto;
  304. if (dax_map_atomic(bdev, &dax) < 0)
  305. return PTR_ERR(dax.addr);
  306. vto = kmap_atomic(to);
  307. copy_user_page(vto, (void __force *)dax.addr, vaddr, to);
  308. kunmap_atomic(vto);
  309. dax_unmap_atomic(bdev, &dax);
  310. return 0;
  311. }
  312. #define NO_SECTOR -1
  313. #define DAX_PMD_INDEX(page_index) (page_index & (PMD_MASK >> PAGE_SHIFT))
  314. static int dax_radix_entry(struct address_space *mapping, pgoff_t index,
  315. sector_t sector, bool pmd_entry, bool dirty)
  316. {
  317. struct radix_tree_root *page_tree = &mapping->page_tree;
  318. pgoff_t pmd_index = DAX_PMD_INDEX(index);
  319. int type, error = 0;
  320. void *entry;
  321. WARN_ON_ONCE(pmd_entry && !dirty);
  322. if (dirty)
  323. __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
  324. spin_lock_irq(&mapping->tree_lock);
  325. entry = radix_tree_lookup(page_tree, pmd_index);
  326. if (entry && RADIX_DAX_TYPE(entry) == RADIX_DAX_PMD) {
  327. index = pmd_index;
  328. goto dirty;
  329. }
  330. entry = radix_tree_lookup(page_tree, index);
  331. if (entry) {
  332. type = RADIX_DAX_TYPE(entry);
  333. if (WARN_ON_ONCE(type != RADIX_DAX_PTE &&
  334. type != RADIX_DAX_PMD)) {
  335. error = -EIO;
  336. goto unlock;
  337. }
  338. if (!pmd_entry || type == RADIX_DAX_PMD)
  339. goto dirty;
  340. /*
  341. * We only insert dirty PMD entries into the radix tree. This
  342. * means we don't need to worry about removing a dirty PTE
  343. * entry and inserting a clean PMD entry, thus reducing the
  344. * range we would flush with a follow-up fsync/msync call.
  345. */
  346. radix_tree_delete(&mapping->page_tree, index);
  347. mapping->nrexceptional--;
  348. }
  349. if (sector == NO_SECTOR) {
  350. /*
  351. * This can happen during correct operation if our pfn_mkwrite
  352. * fault raced against a hole punch operation. If this
  353. * happens the pte that was hole punched will have been
  354. * unmapped and the radix tree entry will have been removed by
  355. * the time we are called, but the call will still happen. We
  356. * will return all the way up to wp_pfn_shared(), where the
  357. * pte_same() check will fail, eventually causing page fault
  358. * to be retried by the CPU.
  359. */
  360. goto unlock;
  361. }
  362. error = radix_tree_insert(page_tree, index,
  363. RADIX_DAX_ENTRY(sector, pmd_entry));
  364. if (error)
  365. goto unlock;
  366. mapping->nrexceptional++;
  367. dirty:
  368. if (dirty)
  369. radix_tree_tag_set(page_tree, index, PAGECACHE_TAG_DIRTY);
  370. unlock:
  371. spin_unlock_irq(&mapping->tree_lock);
  372. return error;
  373. }
  374. static int dax_writeback_one(struct block_device *bdev,
  375. struct address_space *mapping, pgoff_t index, void *entry)
  376. {
  377. struct radix_tree_root *page_tree = &mapping->page_tree;
  378. int type = RADIX_DAX_TYPE(entry);
  379. struct radix_tree_node *node;
  380. struct blk_dax_ctl dax;
  381. void **slot;
  382. int ret = 0;
  383. spin_lock_irq(&mapping->tree_lock);
  384. /*
  385. * Regular page slots are stabilized by the page lock even
  386. * without the tree itself locked. These unlocked entries
  387. * need verification under the tree lock.
  388. */
  389. if (!__radix_tree_lookup(page_tree, index, &node, &slot))
  390. goto unlock;
  391. if (*slot != entry)
  392. goto unlock;
  393. /* another fsync thread may have already written back this entry */
  394. if (!radix_tree_tag_get(page_tree, index, PAGECACHE_TAG_TOWRITE))
  395. goto unlock;
  396. if (WARN_ON_ONCE(type != RADIX_DAX_PTE && type != RADIX_DAX_PMD)) {
  397. ret = -EIO;
  398. goto unlock;
  399. }
  400. dax.sector = RADIX_DAX_SECTOR(entry);
  401. dax.size = (type == RADIX_DAX_PMD ? PMD_SIZE : PAGE_SIZE);
  402. spin_unlock_irq(&mapping->tree_lock);
  403. /*
  404. * We cannot hold tree_lock while calling dax_map_atomic() because it
  405. * eventually calls cond_resched().
  406. */
  407. ret = dax_map_atomic(bdev, &dax);
  408. if (ret < 0)
  409. return ret;
  410. if (WARN_ON_ONCE(ret < dax.size)) {
  411. ret = -EIO;
  412. goto unmap;
  413. }
  414. wb_cache_pmem(dax.addr, dax.size);
  415. spin_lock_irq(&mapping->tree_lock);
  416. radix_tree_tag_clear(page_tree, index, PAGECACHE_TAG_TOWRITE);
  417. spin_unlock_irq(&mapping->tree_lock);
  418. unmap:
  419. dax_unmap_atomic(bdev, &dax);
  420. return ret;
  421. unlock:
  422. spin_unlock_irq(&mapping->tree_lock);
  423. return ret;
  424. }
  425. /*
  426. * Flush the mapping to the persistent domain within the byte range of [start,
  427. * end]. This is required by data integrity operations to ensure file data is
  428. * on persistent storage prior to completion of the operation.
  429. */
  430. int dax_writeback_mapping_range(struct address_space *mapping,
  431. struct block_device *bdev, struct writeback_control *wbc)
  432. {
  433. struct inode *inode = mapping->host;
  434. pgoff_t start_index, end_index, pmd_index;
  435. pgoff_t indices[PAGEVEC_SIZE];
  436. struct pagevec pvec;
  437. bool done = false;
  438. int i, ret = 0;
  439. void *entry;
  440. if (WARN_ON_ONCE(inode->i_blkbits != PAGE_SHIFT))
  441. return -EIO;
  442. if (!mapping->nrexceptional || wbc->sync_mode != WB_SYNC_ALL)
  443. return 0;
  444. start_index = wbc->range_start >> PAGE_SHIFT;
  445. end_index = wbc->range_end >> PAGE_SHIFT;
  446. pmd_index = DAX_PMD_INDEX(start_index);
  447. rcu_read_lock();
  448. entry = radix_tree_lookup(&mapping->page_tree, pmd_index);
  449. rcu_read_unlock();
  450. /* see if the start of our range is covered by a PMD entry */
  451. if (entry && RADIX_DAX_TYPE(entry) == RADIX_DAX_PMD)
  452. start_index = pmd_index;
  453. tag_pages_for_writeback(mapping, start_index, end_index);
  454. pagevec_init(&pvec, 0);
  455. while (!done) {
  456. pvec.nr = find_get_entries_tag(mapping, start_index,
  457. PAGECACHE_TAG_TOWRITE, PAGEVEC_SIZE,
  458. pvec.pages, indices);
  459. if (pvec.nr == 0)
  460. break;
  461. for (i = 0; i < pvec.nr; i++) {
  462. if (indices[i] > end_index) {
  463. done = true;
  464. break;
  465. }
  466. ret = dax_writeback_one(bdev, mapping, indices[i],
  467. pvec.pages[i]);
  468. if (ret < 0)
  469. return ret;
  470. }
  471. }
  472. wmb_pmem();
  473. return 0;
  474. }
  475. EXPORT_SYMBOL_GPL(dax_writeback_mapping_range);
  476. static int dax_insert_mapping(struct inode *inode, struct buffer_head *bh,
  477. struct vm_area_struct *vma, struct vm_fault *vmf)
  478. {
  479. unsigned long vaddr = (unsigned long)vmf->virtual_address;
  480. struct address_space *mapping = inode->i_mapping;
  481. struct block_device *bdev = bh->b_bdev;
  482. struct blk_dax_ctl dax = {
  483. .sector = to_sector(bh, inode),
  484. .size = bh->b_size,
  485. };
  486. pgoff_t size;
  487. int error;
  488. i_mmap_lock_read(mapping);
  489. /*
  490. * Check truncate didn't happen while we were allocating a block.
  491. * If it did, this block may or may not be still allocated to the
  492. * file. We can't tell the filesystem to free it because we can't
  493. * take i_mutex here. In the worst case, the file still has blocks
  494. * allocated past the end of the file.
  495. */
  496. size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT;
  497. if (unlikely(vmf->pgoff >= size)) {
  498. error = -EIO;
  499. goto out;
  500. }
  501. if (dax_map_atomic(bdev, &dax) < 0) {
  502. error = PTR_ERR(dax.addr);
  503. goto out;
  504. }
  505. dax_unmap_atomic(bdev, &dax);
  506. error = dax_radix_entry(mapping, vmf->pgoff, dax.sector, false,
  507. vmf->flags & FAULT_FLAG_WRITE);
  508. if (error)
  509. goto out;
  510. error = vm_insert_mixed(vma, vaddr, dax.pfn);
  511. out:
  512. i_mmap_unlock_read(mapping);
  513. return error;
  514. }
  515. /**
  516. * __dax_fault - handle a page fault on a DAX file
  517. * @vma: The virtual memory area where the fault occurred
  518. * @vmf: The description of the fault
  519. * @get_block: The filesystem method used to translate file offsets to blocks
  520. *
  521. * When a page fault occurs, filesystems may call this helper in their
  522. * fault handler for DAX files. __dax_fault() assumes the caller has done all
  523. * the necessary locking for the page fault to proceed successfully.
  524. */
  525. int __dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
  526. get_block_t get_block)
  527. {
  528. struct file *file = vma->vm_file;
  529. struct address_space *mapping = file->f_mapping;
  530. struct inode *inode = mapping->host;
  531. struct page *page;
  532. struct buffer_head bh;
  533. unsigned long vaddr = (unsigned long)vmf->virtual_address;
  534. unsigned blkbits = inode->i_blkbits;
  535. sector_t block;
  536. pgoff_t size;
  537. int error;
  538. int major = 0;
  539. size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT;
  540. if (vmf->pgoff >= size)
  541. return VM_FAULT_SIGBUS;
  542. memset(&bh, 0, sizeof(bh));
  543. block = (sector_t)vmf->pgoff << (PAGE_SHIFT - blkbits);
  544. bh.b_bdev = inode->i_sb->s_bdev;
  545. bh.b_size = PAGE_SIZE;
  546. repeat:
  547. page = find_get_page(mapping, vmf->pgoff);
  548. if (page) {
  549. if (!lock_page_or_retry(page, vma->vm_mm, vmf->flags)) {
  550. put_page(page);
  551. return VM_FAULT_RETRY;
  552. }
  553. if (unlikely(page->mapping != mapping)) {
  554. unlock_page(page);
  555. put_page(page);
  556. goto repeat;
  557. }
  558. size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT;
  559. if (unlikely(vmf->pgoff >= size)) {
  560. /*
  561. * We have a struct page covering a hole in the file
  562. * from a read fault and we've raced with a truncate
  563. */
  564. error = -EIO;
  565. goto unlock_page;
  566. }
  567. }
  568. error = get_block(inode, block, &bh, 0);
  569. if (!error && (bh.b_size < PAGE_SIZE))
  570. error = -EIO; /* fs corruption? */
  571. if (error)
  572. goto unlock_page;
  573. if (!buffer_mapped(&bh) && !vmf->cow_page) {
  574. if (vmf->flags & FAULT_FLAG_WRITE) {
  575. error = get_block(inode, block, &bh, 1);
  576. count_vm_event(PGMAJFAULT);
  577. mem_cgroup_count_vm_event(vma->vm_mm, PGMAJFAULT);
  578. major = VM_FAULT_MAJOR;
  579. if (!error && (bh.b_size < PAGE_SIZE))
  580. error = -EIO;
  581. if (error)
  582. goto unlock_page;
  583. } else {
  584. return dax_load_hole(mapping, page, vmf);
  585. }
  586. }
  587. if (vmf->cow_page) {
  588. struct page *new_page = vmf->cow_page;
  589. if (buffer_written(&bh))
  590. error = copy_user_bh(new_page, inode, &bh, vaddr);
  591. else
  592. clear_user_highpage(new_page, vaddr);
  593. if (error)
  594. goto unlock_page;
  595. vmf->page = page;
  596. if (!page) {
  597. i_mmap_lock_read(mapping);
  598. /* Check we didn't race with truncate */
  599. size = (i_size_read(inode) + PAGE_SIZE - 1) >>
  600. PAGE_SHIFT;
  601. if (vmf->pgoff >= size) {
  602. i_mmap_unlock_read(mapping);
  603. error = -EIO;
  604. goto out;
  605. }
  606. }
  607. return VM_FAULT_LOCKED;
  608. }
  609. /* Check we didn't race with a read fault installing a new page */
  610. if (!page && major)
  611. page = find_lock_page(mapping, vmf->pgoff);
  612. if (page) {
  613. unmap_mapping_range(mapping, vmf->pgoff << PAGE_SHIFT,
  614. PAGE_SIZE, 0);
  615. delete_from_page_cache(page);
  616. unlock_page(page);
  617. put_page(page);
  618. page = NULL;
  619. }
  620. /* Filesystem should not return unwritten buffers to us! */
  621. WARN_ON_ONCE(buffer_unwritten(&bh) || buffer_new(&bh));
  622. error = dax_insert_mapping(inode, &bh, vma, vmf);
  623. out:
  624. if (error == -ENOMEM)
  625. return VM_FAULT_OOM | major;
  626. /* -EBUSY is fine, somebody else faulted on the same PTE */
  627. if ((error < 0) && (error != -EBUSY))
  628. return VM_FAULT_SIGBUS | major;
  629. return VM_FAULT_NOPAGE | major;
  630. unlock_page:
  631. if (page) {
  632. unlock_page(page);
  633. put_page(page);
  634. }
  635. goto out;
  636. }
  637. EXPORT_SYMBOL(__dax_fault);
  638. /**
  639. * dax_fault - handle a page fault on a DAX file
  640. * @vma: The virtual memory area where the fault occurred
  641. * @vmf: The description of the fault
  642. * @get_block: The filesystem method used to translate file offsets to blocks
  643. *
  644. * When a page fault occurs, filesystems may call this helper in their
  645. * fault handler for DAX files.
  646. */
  647. int dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
  648. get_block_t get_block)
  649. {
  650. int result;
  651. struct super_block *sb = file_inode(vma->vm_file)->i_sb;
  652. if (vmf->flags & FAULT_FLAG_WRITE) {
  653. sb_start_pagefault(sb);
  654. file_update_time(vma->vm_file);
  655. }
  656. result = __dax_fault(vma, vmf, get_block);
  657. if (vmf->flags & FAULT_FLAG_WRITE)
  658. sb_end_pagefault(sb);
  659. return result;
  660. }
  661. EXPORT_SYMBOL_GPL(dax_fault);
  662. #ifdef CONFIG_TRANSPARENT_HUGEPAGE
  663. /*
  664. * The 'colour' (ie low bits) within a PMD of a page offset. This comes up
  665. * more often than one might expect in the below function.
  666. */
  667. #define PG_PMD_COLOUR ((PMD_SIZE >> PAGE_SHIFT) - 1)
  668. static void __dax_dbg(struct buffer_head *bh, unsigned long address,
  669. const char *reason, const char *fn)
  670. {
  671. if (bh) {
  672. char bname[BDEVNAME_SIZE];
  673. bdevname(bh->b_bdev, bname);
  674. pr_debug("%s: %s addr: %lx dev %s state %lx start %lld "
  675. "length %zd fallback: %s\n", fn, current->comm,
  676. address, bname, bh->b_state, (u64)bh->b_blocknr,
  677. bh->b_size, reason);
  678. } else {
  679. pr_debug("%s: %s addr: %lx fallback: %s\n", fn,
  680. current->comm, address, reason);
  681. }
  682. }
  683. #define dax_pmd_dbg(bh, address, reason) __dax_dbg(bh, address, reason, "dax_pmd")
  684. int __dax_pmd_fault(struct vm_area_struct *vma, unsigned long address,
  685. pmd_t *pmd, unsigned int flags, get_block_t get_block)
  686. {
  687. struct file *file = vma->vm_file;
  688. struct address_space *mapping = file->f_mapping;
  689. struct inode *inode = mapping->host;
  690. struct buffer_head bh;
  691. unsigned blkbits = inode->i_blkbits;
  692. unsigned long pmd_addr = address & PMD_MASK;
  693. bool write = flags & FAULT_FLAG_WRITE;
  694. struct block_device *bdev;
  695. pgoff_t size, pgoff;
  696. sector_t block;
  697. int error, result = 0;
  698. bool alloc = false;
  699. /* dax pmd mappings require pfn_t_devmap() */
  700. if (!IS_ENABLED(CONFIG_FS_DAX_PMD))
  701. return VM_FAULT_FALLBACK;
  702. /* Fall back to PTEs if we're going to COW */
  703. if (write && !(vma->vm_flags & VM_SHARED)) {
  704. split_huge_pmd(vma, pmd, address);
  705. dax_pmd_dbg(NULL, address, "cow write");
  706. return VM_FAULT_FALLBACK;
  707. }
  708. /* If the PMD would extend outside the VMA */
  709. if (pmd_addr < vma->vm_start) {
  710. dax_pmd_dbg(NULL, address, "vma start unaligned");
  711. return VM_FAULT_FALLBACK;
  712. }
  713. if ((pmd_addr + PMD_SIZE) > vma->vm_end) {
  714. dax_pmd_dbg(NULL, address, "vma end unaligned");
  715. return VM_FAULT_FALLBACK;
  716. }
  717. pgoff = linear_page_index(vma, pmd_addr);
  718. size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT;
  719. if (pgoff >= size)
  720. return VM_FAULT_SIGBUS;
  721. /* If the PMD would cover blocks out of the file */
  722. if ((pgoff | PG_PMD_COLOUR) >= size) {
  723. dax_pmd_dbg(NULL, address,
  724. "offset + huge page size > file size");
  725. return VM_FAULT_FALLBACK;
  726. }
  727. memset(&bh, 0, sizeof(bh));
  728. bh.b_bdev = inode->i_sb->s_bdev;
  729. block = (sector_t)pgoff << (PAGE_SHIFT - blkbits);
  730. bh.b_size = PMD_SIZE;
  731. if (get_block(inode, block, &bh, 0) != 0)
  732. return VM_FAULT_SIGBUS;
  733. if (!buffer_mapped(&bh) && write) {
  734. if (get_block(inode, block, &bh, 1) != 0)
  735. return VM_FAULT_SIGBUS;
  736. alloc = true;
  737. WARN_ON_ONCE(buffer_unwritten(&bh) || buffer_new(&bh));
  738. }
  739. bdev = bh.b_bdev;
  740. /*
  741. * If the filesystem isn't willing to tell us the length of a hole,
  742. * just fall back to PTEs. Calling get_block 512 times in a loop
  743. * would be silly.
  744. */
  745. if (!buffer_size_valid(&bh) || bh.b_size < PMD_SIZE) {
  746. dax_pmd_dbg(&bh, address, "allocated block too small");
  747. return VM_FAULT_FALLBACK;
  748. }
  749. /*
  750. * If we allocated new storage, make sure no process has any
  751. * zero pages covering this hole
  752. */
  753. if (alloc) {
  754. loff_t lstart = pgoff << PAGE_SHIFT;
  755. loff_t lend = lstart + PMD_SIZE - 1; /* inclusive */
  756. truncate_pagecache_range(inode, lstart, lend);
  757. }
  758. i_mmap_lock_read(mapping);
  759. /*
  760. * If a truncate happened while we were allocating blocks, we may
  761. * leave blocks allocated to the file that are beyond EOF. We can't
  762. * take i_mutex here, so just leave them hanging; they'll be freed
  763. * when the file is deleted.
  764. */
  765. size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT;
  766. if (pgoff >= size) {
  767. result = VM_FAULT_SIGBUS;
  768. goto out;
  769. }
  770. if ((pgoff | PG_PMD_COLOUR) >= size) {
  771. dax_pmd_dbg(&bh, address,
  772. "offset + huge page size > file size");
  773. goto fallback;
  774. }
  775. if (!write && !buffer_mapped(&bh) && buffer_uptodate(&bh)) {
  776. spinlock_t *ptl;
  777. pmd_t entry;
  778. struct page *zero_page = get_huge_zero_page();
  779. if (unlikely(!zero_page)) {
  780. dax_pmd_dbg(&bh, address, "no zero page");
  781. goto fallback;
  782. }
  783. ptl = pmd_lock(vma->vm_mm, pmd);
  784. if (!pmd_none(*pmd)) {
  785. spin_unlock(ptl);
  786. dax_pmd_dbg(&bh, address, "pmd already present");
  787. goto fallback;
  788. }
  789. dev_dbg(part_to_dev(bdev->bd_part),
  790. "%s: %s addr: %lx pfn: <zero> sect: %llx\n",
  791. __func__, current->comm, address,
  792. (unsigned long long) to_sector(&bh, inode));
  793. entry = mk_pmd(zero_page, vma->vm_page_prot);
  794. entry = pmd_mkhuge(entry);
  795. set_pmd_at(vma->vm_mm, pmd_addr, pmd, entry);
  796. result = VM_FAULT_NOPAGE;
  797. spin_unlock(ptl);
  798. } else {
  799. struct blk_dax_ctl dax = {
  800. .sector = to_sector(&bh, inode),
  801. .size = PMD_SIZE,
  802. };
  803. long length = dax_map_atomic(bdev, &dax);
  804. if (length < 0) {
  805. result = VM_FAULT_SIGBUS;
  806. goto out;
  807. }
  808. if (length < PMD_SIZE) {
  809. dax_pmd_dbg(&bh, address, "dax-length too small");
  810. dax_unmap_atomic(bdev, &dax);
  811. goto fallback;
  812. }
  813. if (pfn_t_to_pfn(dax.pfn) & PG_PMD_COLOUR) {
  814. dax_pmd_dbg(&bh, address, "pfn unaligned");
  815. dax_unmap_atomic(bdev, &dax);
  816. goto fallback;
  817. }
  818. if (!pfn_t_devmap(dax.pfn)) {
  819. dax_unmap_atomic(bdev, &dax);
  820. dax_pmd_dbg(&bh, address, "pfn not in memmap");
  821. goto fallback;
  822. }
  823. dax_unmap_atomic(bdev, &dax);
  824. /*
  825. * For PTE faults we insert a radix tree entry for reads, and
  826. * leave it clean. Then on the first write we dirty the radix
  827. * tree entry via the dax_pfn_mkwrite() path. This sequence
  828. * allows the dax_pfn_mkwrite() call to be simpler and avoid a
  829. * call into get_block() to translate the pgoff to a sector in
  830. * order to be able to create a new radix tree entry.
  831. *
  832. * The PMD path doesn't have an equivalent to
  833. * dax_pfn_mkwrite(), though, so for a read followed by a
  834. * write we traverse all the way through __dax_pmd_fault()
  835. * twice. This means we can just skip inserting a radix tree
  836. * entry completely on the initial read and just wait until
  837. * the write to insert a dirty entry.
  838. */
  839. if (write) {
  840. error = dax_radix_entry(mapping, pgoff, dax.sector,
  841. true, true);
  842. if (error) {
  843. dax_pmd_dbg(&bh, address,
  844. "PMD radix insertion failed");
  845. goto fallback;
  846. }
  847. }
  848. dev_dbg(part_to_dev(bdev->bd_part),
  849. "%s: %s addr: %lx pfn: %lx sect: %llx\n",
  850. __func__, current->comm, address,
  851. pfn_t_to_pfn(dax.pfn),
  852. (unsigned long long) dax.sector);
  853. result |= vmf_insert_pfn_pmd(vma, address, pmd,
  854. dax.pfn, write);
  855. }
  856. out:
  857. i_mmap_unlock_read(mapping);
  858. return result;
  859. fallback:
  860. count_vm_event(THP_FAULT_FALLBACK);
  861. result = VM_FAULT_FALLBACK;
  862. goto out;
  863. }
  864. EXPORT_SYMBOL_GPL(__dax_pmd_fault);
  865. /**
  866. * dax_pmd_fault - handle a PMD fault on a DAX file
  867. * @vma: The virtual memory area where the fault occurred
  868. * @vmf: The description of the fault
  869. * @get_block: The filesystem method used to translate file offsets to blocks
  870. *
  871. * When a page fault occurs, filesystems may call this helper in their
  872. * pmd_fault handler for DAX files.
  873. */
  874. int dax_pmd_fault(struct vm_area_struct *vma, unsigned long address,
  875. pmd_t *pmd, unsigned int flags, get_block_t get_block)
  876. {
  877. int result;
  878. struct super_block *sb = file_inode(vma->vm_file)->i_sb;
  879. if (flags & FAULT_FLAG_WRITE) {
  880. sb_start_pagefault(sb);
  881. file_update_time(vma->vm_file);
  882. }
  883. result = __dax_pmd_fault(vma, address, pmd, flags, get_block);
  884. if (flags & FAULT_FLAG_WRITE)
  885. sb_end_pagefault(sb);
  886. return result;
  887. }
  888. EXPORT_SYMBOL_GPL(dax_pmd_fault);
  889. #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
  890. /**
  891. * dax_pfn_mkwrite - handle first write to DAX page
  892. * @vma: The virtual memory area where the fault occurred
  893. * @vmf: The description of the fault
  894. */
  895. int dax_pfn_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
  896. {
  897. struct file *file = vma->vm_file;
  898. int error;
  899. /*
  900. * We pass NO_SECTOR to dax_radix_entry() because we expect that a
  901. * RADIX_DAX_PTE entry already exists in the radix tree from a
  902. * previous call to __dax_fault(). We just want to look up that PTE
  903. * entry using vmf->pgoff and make sure the dirty tag is set. This
  904. * saves us from having to make a call to get_block() here to look
  905. * up the sector.
  906. */
  907. error = dax_radix_entry(file->f_mapping, vmf->pgoff, NO_SECTOR, false,
  908. true);
  909. if (error == -ENOMEM)
  910. return VM_FAULT_OOM;
  911. if (error)
  912. return VM_FAULT_SIGBUS;
  913. return VM_FAULT_NOPAGE;
  914. }
  915. EXPORT_SYMBOL_GPL(dax_pfn_mkwrite);
  916. /**
  917. * dax_zero_page_range - zero a range within a page of a DAX file
  918. * @inode: The file being truncated
  919. * @from: The file offset that is being truncated to
  920. * @length: The number of bytes to zero
  921. * @get_block: The filesystem method used to translate file offsets to blocks
  922. *
  923. * This function can be called by a filesystem when it is zeroing part of a
  924. * page in a DAX file. This is intended for hole-punch operations. If
  925. * you are truncating a file, the helper function dax_truncate_page() may be
  926. * more convenient.
  927. *
  928. * We work in terms of PAGE_SIZE here for commonality with
  929. * block_truncate_page(), but we could go down to PAGE_SIZE if the filesystem
  930. * took care of disposing of the unnecessary blocks. Even if the filesystem
  931. * block size is smaller than PAGE_SIZE, we have to zero the rest of the page
  932. * since the file might be mmapped.
  933. */
  934. int dax_zero_page_range(struct inode *inode, loff_t from, unsigned length,
  935. get_block_t get_block)
  936. {
  937. struct buffer_head bh;
  938. pgoff_t index = from >> PAGE_SHIFT;
  939. unsigned offset = from & (PAGE_SIZE-1);
  940. int err;
  941. /* Block boundary? Nothing to do */
  942. if (!length)
  943. return 0;
  944. BUG_ON((offset + length) > PAGE_SIZE);
  945. memset(&bh, 0, sizeof(bh));
  946. bh.b_bdev = inode->i_sb->s_bdev;
  947. bh.b_size = PAGE_SIZE;
  948. err = get_block(inode, index, &bh, 0);
  949. if (err < 0)
  950. return err;
  951. if (buffer_written(&bh)) {
  952. struct block_device *bdev = bh.b_bdev;
  953. struct blk_dax_ctl dax = {
  954. .sector = to_sector(&bh, inode),
  955. .size = PAGE_SIZE,
  956. };
  957. if (dax_map_atomic(bdev, &dax) < 0)
  958. return PTR_ERR(dax.addr);
  959. clear_pmem(dax.addr + offset, length);
  960. wmb_pmem();
  961. dax_unmap_atomic(bdev, &dax);
  962. }
  963. return 0;
  964. }
  965. EXPORT_SYMBOL_GPL(dax_zero_page_range);
  966. /**
  967. * dax_truncate_page - handle a partial page being truncated in a DAX file
  968. * @inode: The file being truncated
  969. * @from: The file offset that is being truncated to
  970. * @get_block: The filesystem method used to translate file offsets to blocks
  971. *
  972. * Similar to block_truncate_page(), this function can be called by a
  973. * filesystem when it is truncating a DAX file to handle the partial page.
  974. *
  975. * We work in terms of PAGE_SIZE here for commonality with
  976. * block_truncate_page(), but we could go down to PAGE_SIZE if the filesystem
  977. * took care of disposing of the unnecessary blocks. Even if the filesystem
  978. * block size is smaller than PAGE_SIZE, we have to zero the rest of the page
  979. * since the file might be mmapped.
  980. */
  981. int dax_truncate_page(struct inode *inode, loff_t from, get_block_t get_block)
  982. {
  983. unsigned length = PAGE_ALIGN(from) - from;
  984. return dax_zero_page_range(inode, from, length, get_block);
  985. }
  986. EXPORT_SYMBOL_GPL(dax_truncate_page);