|
@@ -332,10 +332,7 @@ xfs_file_dax_read(
|
|
struct kiocb *iocb,
|
|
struct kiocb *iocb,
|
|
struct iov_iter *to)
|
|
struct iov_iter *to)
|
|
{
|
|
{
|
|
- struct address_space *mapping = iocb->ki_filp->f_mapping;
|
|
|
|
- struct inode *inode = mapping->host;
|
|
|
|
- struct xfs_inode *ip = XFS_I(inode);
|
|
|
|
- struct iov_iter data = *to;
|
|
|
|
|
|
+ struct xfs_inode *ip = XFS_I(iocb->ki_filp->f_mapping->host);
|
|
size_t count = iov_iter_count(to);
|
|
size_t count = iov_iter_count(to);
|
|
ssize_t ret = 0;
|
|
ssize_t ret = 0;
|
|
|
|
|
|
@@ -345,11 +342,7 @@ xfs_file_dax_read(
|
|
return 0; /* skip atime */
|
|
return 0; /* skip atime */
|
|
|
|
|
|
xfs_rw_ilock(ip, XFS_IOLOCK_SHARED);
|
|
xfs_rw_ilock(ip, XFS_IOLOCK_SHARED);
|
|
- ret = dax_do_io(iocb, inode, &data, xfs_get_blocks_direct, NULL, 0);
|
|
|
|
- if (ret > 0) {
|
|
|
|
- iocb->ki_pos += ret;
|
|
|
|
- iov_iter_advance(to, ret);
|
|
|
|
- }
|
|
|
|
|
|
+ ret = iomap_dax_rw(iocb, to, &xfs_iomap_ops);
|
|
xfs_rw_iunlock(ip, XFS_IOLOCK_SHARED);
|
|
xfs_rw_iunlock(ip, XFS_IOLOCK_SHARED);
|
|
|
|
|
|
file_accessed(iocb->ki_filp);
|
|
file_accessed(iocb->ki_filp);
|
|
@@ -711,52 +704,32 @@ xfs_file_dax_write(
|
|
struct kiocb *iocb,
|
|
struct kiocb *iocb,
|
|
struct iov_iter *from)
|
|
struct iov_iter *from)
|
|
{
|
|
{
|
|
- struct address_space *mapping = iocb->ki_filp->f_mapping;
|
|
|
|
- struct inode *inode = mapping->host;
|
|
|
|
|
|
+ struct inode *inode = iocb->ki_filp->f_mapping->host;
|
|
struct xfs_inode *ip = XFS_I(inode);
|
|
struct xfs_inode *ip = XFS_I(inode);
|
|
- ssize_t ret = 0;
|
|
|
|
int iolock = XFS_IOLOCK_EXCL;
|
|
int iolock = XFS_IOLOCK_EXCL;
|
|
- struct iov_iter data;
|
|
|
|
|
|
+ ssize_t ret, error = 0;
|
|
|
|
+ size_t count;
|
|
|
|
+ loff_t pos;
|
|
|
|
|
|
xfs_rw_ilock(ip, iolock);
|
|
xfs_rw_ilock(ip, iolock);
|
|
ret = xfs_file_aio_write_checks(iocb, from, &iolock);
|
|
ret = xfs_file_aio_write_checks(iocb, from, &iolock);
|
|
if (ret)
|
|
if (ret)
|
|
goto out;
|
|
goto out;
|
|
|
|
|
|
- /*
|
|
|
|
- * Yes, even DAX files can have page cache attached to them: A zeroed
|
|
|
|
- * page is inserted into the pagecache when we have to serve a write
|
|
|
|
- * fault on a hole. It should never be dirtied and can simply be
|
|
|
|
- * dropped from the pagecache once we get real data for the page.
|
|
|
|
- *
|
|
|
|
- * XXX: This is racy against mmap, and there's nothing we can do about
|
|
|
|
- * it. dax_do_io() should really do this invalidation internally as
|
|
|
|
- * it will know if we've allocated over a holei for this specific IO and
|
|
|
|
- * if so it needs to update the mapping tree and invalidate existing
|
|
|
|
- * PTEs over the newly allocated range. Remove this invalidation when
|
|
|
|
- * dax_do_io() is fixed up.
|
|
|
|
- */
|
|
|
|
- if (mapping->nrpages) {
|
|
|
|
- loff_t end = iocb->ki_pos + iov_iter_count(from) - 1;
|
|
|
|
-
|
|
|
|
- ret = invalidate_inode_pages2_range(mapping,
|
|
|
|
- iocb->ki_pos >> PAGE_SHIFT,
|
|
|
|
- end >> PAGE_SHIFT);
|
|
|
|
- WARN_ON_ONCE(ret);
|
|
|
|
- }
|
|
|
|
|
|
+ pos = iocb->ki_pos;
|
|
|
|
+ count = iov_iter_count(from);
|
|
|
|
|
|
- trace_xfs_file_dax_write(ip, iov_iter_count(from), iocb->ki_pos);
|
|
|
|
|
|
+ trace_xfs_file_dax_write(ip, count, pos);
|
|
|
|
|
|
- data = *from;
|
|
|
|
- ret = dax_do_io(iocb, inode, &data, xfs_get_blocks_direct,
|
|
|
|
- xfs_end_io_direct_write, 0);
|
|
|
|
- if (ret > 0) {
|
|
|
|
- iocb->ki_pos += ret;
|
|
|
|
- iov_iter_advance(from, ret);
|
|
|
|
|
|
+ ret = iomap_dax_rw(iocb, from, &xfs_iomap_ops);
|
|
|
|
+ if (ret > 0 && iocb->ki_pos > i_size_read(inode)) {
|
|
|
|
+ i_size_write(inode, iocb->ki_pos);
|
|
|
|
+ error = xfs_setfilesize(ip, pos, ret);
|
|
}
|
|
}
|
|
|
|
+
|
|
out:
|
|
out:
|
|
xfs_rw_iunlock(ip, iolock);
|
|
xfs_rw_iunlock(ip, iolock);
|
|
- return ret;
|
|
|
|
|
|
+ return error ? error : ret;
|
|
}
|
|
}
|
|
|
|
|
|
STATIC ssize_t
|
|
STATIC ssize_t
|
|
@@ -1495,7 +1468,7 @@ xfs_filemap_page_mkwrite(
|
|
xfs_ilock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
|
|
xfs_ilock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
|
|
|
|
|
|
if (IS_DAX(inode)) {
|
|
if (IS_DAX(inode)) {
|
|
- ret = dax_mkwrite(vma, vmf, xfs_get_blocks_dax_fault);
|
|
|
|
|
|
+ ret = iomap_dax_fault(vma, vmf, &xfs_iomap_ops);
|
|
} else {
|
|
} else {
|
|
ret = iomap_page_mkwrite(vma, vmf, &xfs_iomap_ops);
|
|
ret = iomap_page_mkwrite(vma, vmf, &xfs_iomap_ops);
|
|
ret = block_page_mkwrite_return(ret);
|
|
ret = block_page_mkwrite_return(ret);
|
|
@@ -1529,7 +1502,7 @@ xfs_filemap_fault(
|
|
* changes to xfs_get_blocks_direct() to map unwritten extent
|
|
* changes to xfs_get_blocks_direct() to map unwritten extent
|
|
* ioend for conversion on read-only mappings.
|
|
* ioend for conversion on read-only mappings.
|
|
*/
|
|
*/
|
|
- ret = dax_fault(vma, vmf, xfs_get_blocks_dax_fault);
|
|
|
|
|
|
+ ret = iomap_dax_fault(vma, vmf, &xfs_iomap_ops);
|
|
} else
|
|
} else
|
|
ret = filemap_fault(vma, vmf);
|
|
ret = filemap_fault(vma, vmf);
|
|
xfs_iunlock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
|
|
xfs_iunlock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
|