|
@@ -686,6 +686,34 @@ out_sem:
|
|
|
return retval;
|
|
|
}
|
|
|
|
|
|
+/*
|
|
|
+ * Update EXT4_MAP_FLAGS in bh->b_state. For buffer heads attached to pages
|
|
|
+ * we have to be careful as someone else may be manipulating b_state as well.
|
|
|
+ */
|
|
|
+static void ext4_update_bh_state(struct buffer_head *bh, unsigned long flags)
|
|
|
+{
|
|
|
+ unsigned long old_state;
|
|
|
+ unsigned long new_state;
|
|
|
+
|
|
|
+ flags &= EXT4_MAP_FLAGS;
|
|
|
+
|
|
|
+ /* Dummy buffer_head? Set non-atomically. */
|
|
|
+ if (!bh->b_page) {
|
|
|
+ bh->b_state = (bh->b_state & ~EXT4_MAP_FLAGS) | flags;
|
|
|
+ return;
|
|
|
+ }
|
|
|
+ /*
|
|
|
+ * Someone else may be modifying b_state. Be careful! This is ugly but
|
|
|
+ * once we get rid of using bh as a container for mapping information
|
|
|
+ * to pass to / from get_block functions, this can go away.
|
|
|
+ */
|
|
|
+ do {
|
|
|
+ old_state = READ_ONCE(bh->b_state);
|
|
|
+ new_state = (old_state & ~EXT4_MAP_FLAGS) | flags;
|
|
|
+ } while (unlikely(
|
|
|
+ cmpxchg(&bh->b_state, old_state, new_state) != old_state));
|
|
|
+}
|
|
|
+
|
|
|
/* Maximum number of blocks we map for direct IO at once. */
|
|
|
#define DIO_MAX_BLOCKS 4096
|
|
|
|
|
@@ -722,7 +750,7 @@ static int _ext4_get_block(struct inode *inode, sector_t iblock,
|
|
|
ext4_io_end_t *io_end = ext4_inode_aio(inode);
|
|
|
|
|
|
map_bh(bh, inode->i_sb, map.m_pblk);
|
|
|
- bh->b_state = (bh->b_state & ~EXT4_MAP_FLAGS) | map.m_flags;
|
|
|
+ ext4_update_bh_state(bh, map.m_flags);
|
|
|
if (io_end && io_end->flag & EXT4_IO_END_UNWRITTEN)
|
|
|
set_buffer_defer_completion(bh);
|
|
|
bh->b_size = inode->i_sb->s_blocksize * map.m_len;
|
|
@@ -1685,7 +1713,7 @@ int ext4_da_get_block_prep(struct inode *inode, sector_t iblock,
|
|
|
return ret;
|
|
|
|
|
|
map_bh(bh, inode->i_sb, map.m_pblk);
|
|
|
- bh->b_state = (bh->b_state & ~EXT4_MAP_FLAGS) | map.m_flags;
|
|
|
+ ext4_update_bh_state(bh, map.m_flags);
|
|
|
|
|
|
if (buffer_unwritten(bh)) {
|
|
|
/* A delayed write to unwritten bh should be marked
|
|
@@ -3253,29 +3281,29 @@ static ssize_t ext4_ext_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
|
|
|
* case, we allocate an io_end structure to hook to the iocb.
|
|
|
*/
|
|
|
iocb->private = NULL;
|
|
|
- ext4_inode_aio_set(inode, NULL);
|
|
|
- if (!is_sync_kiocb(iocb)) {
|
|
|
- io_end = ext4_init_io_end(inode, GFP_NOFS);
|
|
|
- if (!io_end) {
|
|
|
- ret = -ENOMEM;
|
|
|
- goto retake_lock;
|
|
|
- }
|
|
|
- /*
|
|
|
- * Grab reference for DIO. Will be dropped in ext4_end_io_dio()
|
|
|
- */
|
|
|
- iocb->private = ext4_get_io_end(io_end);
|
|
|
- /*
|
|
|
- * we save the io structure for current async direct
|
|
|
- * IO, so that later ext4_map_blocks() could flag the
|
|
|
- * io structure whether there is a unwritten extents
|
|
|
- * needs to be converted when IO is completed.
|
|
|
- */
|
|
|
- ext4_inode_aio_set(inode, io_end);
|
|
|
- }
|
|
|
-
|
|
|
if (overwrite) {
|
|
|
get_block_func = ext4_get_block_overwrite;
|
|
|
} else {
|
|
|
+ ext4_inode_aio_set(inode, NULL);
|
|
|
+ if (!is_sync_kiocb(iocb)) {
|
|
|
+ io_end = ext4_init_io_end(inode, GFP_NOFS);
|
|
|
+ if (!io_end) {
|
|
|
+ ret = -ENOMEM;
|
|
|
+ goto retake_lock;
|
|
|
+ }
|
|
|
+ /*
|
|
|
+ * Grab reference for DIO. Will be dropped in
|
|
|
+ * ext4_end_io_dio()
|
|
|
+ */
|
|
|
+ iocb->private = ext4_get_io_end(io_end);
|
|
|
+ /*
|
|
|
+ * we save the io structure for current async direct
|
|
|
+ * IO, so that later ext4_map_blocks() could flag the
|
|
|
+ * io structure whether there is a unwritten extents
|
|
|
+ * needs to be converted when IO is completed.
|
|
|
+ */
|
|
|
+ ext4_inode_aio_set(inode, io_end);
|
|
|
+ }
|
|
|
get_block_func = ext4_get_block_write;
|
|
|
dio_flags = DIO_LOCKING;
|
|
|
}
|