|
@@ -1143,6 +1143,8 @@ __xfs_get_blocks(
|
|
|
ssize_t size;
|
|
|
int new = 0;
|
|
|
|
|
|
+ BUG_ON(create && !direct);
|
|
|
+
|
|
|
if (XFS_FORCED_SHUTDOWN(mp))
|
|
|
return -EIO;
|
|
|
|
|
@@ -1150,22 +1152,14 @@ __xfs_get_blocks(
|
|
|
ASSERT(bh_result->b_size >= (1 << inode->i_blkbits));
|
|
|
size = bh_result->b_size;
|
|
|
|
|
|
- if (!create && direct && offset >= i_size_read(inode))
|
|
|
+ if (!create && offset >= i_size_read(inode))
|
|
|
return 0;
|
|
|
|
|
|
/*
|
|
|
* Direct I/O is usually done on preallocated files, so try getting
|
|
|
- * a block mapping without an exclusive lock first. For buffered
|
|
|
- * writes we already have the exclusive iolock anyway, so avoiding
|
|
|
- * a lock roundtrip here by taking the ilock exclusive from the
|
|
|
- * beginning is a useful micro optimization.
|
|
|
+ * a block mapping without an exclusive lock first.
|
|
|
*/
|
|
|
- if (create && !direct) {
|
|
|
- lockmode = XFS_ILOCK_EXCL;
|
|
|
- xfs_ilock(ip, lockmode);
|
|
|
- } else {
|
|
|
- lockmode = xfs_ilock_data_map_shared(ip);
|
|
|
- }
|
|
|
+ lockmode = xfs_ilock_data_map_shared(ip);
|
|
|
|
|
|
ASSERT(offset <= mp->m_super->s_maxbytes);
|
|
|
if (offset + size > mp->m_super->s_maxbytes)
|
|
@@ -1184,37 +1178,19 @@ __xfs_get_blocks(
|
|
|
(imap.br_startblock == HOLESTARTBLOCK ||
|
|
|
imap.br_startblock == DELAYSTARTBLOCK) ||
|
|
|
(IS_DAX(inode) && ISUNWRITTEN(&imap)))) {
|
|
|
- if (direct || xfs_get_extsz_hint(ip)) {
|
|
|
- /*
|
|
|
- * xfs_iomap_write_direct() expects the shared lock. It
|
|
|
- * is unlocked on return.
|
|
|
- */
|
|
|
- if (lockmode == XFS_ILOCK_EXCL)
|
|
|
- xfs_ilock_demote(ip, lockmode);
|
|
|
-
|
|
|
- error = xfs_iomap_write_direct(ip, offset, size,
|
|
|
- &imap, nimaps);
|
|
|
- if (error)
|
|
|
- return error;
|
|
|
- new = 1;
|
|
|
+ /*
|
|
|
+ * xfs_iomap_write_direct() expects the shared lock. It
|
|
|
+ * is unlocked on return.
|
|
|
+ */
|
|
|
+ if (lockmode == XFS_ILOCK_EXCL)
|
|
|
+ xfs_ilock_demote(ip, lockmode);
|
|
|
|
|
|
- } else {
|
|
|
- /*
|
|
|
- * Delalloc reservations do not require a transaction,
|
|
|
- * we can go on without dropping the lock here. If we
|
|
|
- * are allocating a new delalloc block, make sure that
|
|
|
- * we set the new flag so that we mark the buffer new so
|
|
|
- * that we know that it is newly allocated if the write
|
|
|
- * fails.
|
|
|
- */
|
|
|
- if (nimaps && imap.br_startblock == HOLESTARTBLOCK)
|
|
|
- new = 1;
|
|
|
- error = xfs_iomap_write_delay(ip, offset, size, &imap);
|
|
|
- if (error)
|
|
|
- goto out_unlock;
|
|
|
+ error = xfs_iomap_write_direct(ip, offset, size,
|
|
|
+ &imap, nimaps);
|
|
|
+ if (error)
|
|
|
+ return error;
|
|
|
+ new = 1;
|
|
|
|
|
|
- xfs_iunlock(ip, lockmode);
|
|
|
- }
|
|
|
trace_xfs_get_blocks_alloc(ip, offset, size,
|
|
|
ISUNWRITTEN(&imap) ? XFS_IO_UNWRITTEN
|
|
|
: XFS_IO_DELALLOC, &imap);
|
|
@@ -1235,9 +1211,7 @@ __xfs_get_blocks(
|
|
|
}
|
|
|
|
|
|
/* trim mapping down to size requested */
|
|
|
- if (direct || size > (1 << inode->i_blkbits))
|
|
|
- xfs_map_trim_size(inode, iblock, bh_result,
|
|
|
- &imap, offset, size);
|
|
|
+ xfs_map_trim_size(inode, iblock, bh_result, &imap, offset, size);
|
|
|
|
|
|
/*
|
|
|
* For unwritten extents do not report a disk address in the buffered
|
|
@@ -1250,7 +1224,7 @@ __xfs_get_blocks(
|
|
|
if (ISUNWRITTEN(&imap))
|
|
|
set_buffer_unwritten(bh_result);
|
|
|
/* direct IO needs special help */
|
|
|
- if (create && direct) {
|
|
|
+ if (create) {
|
|
|
if (dax_fault)
|
|
|
ASSERT(!ISUNWRITTEN(&imap));
|
|
|
else
|
|
@@ -1279,14 +1253,7 @@ __xfs_get_blocks(
|
|
|
(new || ISUNWRITTEN(&imap))))
|
|
|
set_buffer_new(bh_result);
|
|
|
|
|
|
- if (imap.br_startblock == DELAYSTARTBLOCK) {
|
|
|
- BUG_ON(direct);
|
|
|
- if (create) {
|
|
|
- set_buffer_uptodate(bh_result);
|
|
|
- set_buffer_mapped(bh_result);
|
|
|
- set_buffer_delay(bh_result);
|
|
|
- }
|
|
|
- }
|
|
|
+ BUG_ON(direct && imap.br_startblock == DELAYSTARTBLOCK);
|
|
|
|
|
|
return 0;
|
|
|
|