|
@@ -686,6 +686,34 @@ out_sem:
|
|
|
return retval;
|
|
|
}
|
|
|
|
|
|
+/*
|
|
|
+ * Update EXT4_MAP_FLAGS in bh->b_state. For buffer heads attached to pages
|
|
|
+ * we have to be careful as someone else may be manipulating b_state as well.
|
|
|
+ */
|
|
|
+static void ext4_update_bh_state(struct buffer_head *bh, unsigned long flags)
|
|
|
+{
|
|
|
+ unsigned long old_state;
|
|
|
+ unsigned long new_state;
|
|
|
+
|
|
|
+ flags &= EXT4_MAP_FLAGS;
|
|
|
+
|
|
|
+ /* Dummy buffer_head? Set non-atomically. */
|
|
|
+ if (!bh->b_page) {
|
|
|
+ bh->b_state = (bh->b_state & ~EXT4_MAP_FLAGS) | flags;
|
|
|
+ return;
|
|
|
+ }
|
|
|
+ /*
|
|
|
+ * Someone else may be modifying b_state. Be careful! This is ugly but
|
|
|
+ * once we get rid of using bh as a container for mapping information
|
|
|
+ * to pass to / from get_block functions, this can go away.
|
|
|
+ */
|
|
|
+ do {
|
|
|
+ old_state = READ_ONCE(bh->b_state);
|
|
|
+ new_state = (old_state & ~EXT4_MAP_FLAGS) | flags;
|
|
|
+ } while (unlikely(
|
|
|
+ cmpxchg(&bh->b_state, old_state, new_state) != old_state));
|
|
|
+}
|
|
|
+
|
|
|
/* Maximum number of blocks we map for direct IO at once. */
|
|
|
#define DIO_MAX_BLOCKS 4096
|
|
|
|
|
@@ -722,7 +750,7 @@ static int _ext4_get_block(struct inode *inode, sector_t iblock,
|
|
|
ext4_io_end_t *io_end = ext4_inode_aio(inode);
|
|
|
|
|
|
map_bh(bh, inode->i_sb, map.m_pblk);
|
|
|
- bh->b_state = (bh->b_state & ~EXT4_MAP_FLAGS) | map.m_flags;
|
|
|
+ ext4_update_bh_state(bh, map.m_flags);
|
|
|
if (io_end && io_end->flag & EXT4_IO_END_UNWRITTEN)
|
|
|
set_buffer_defer_completion(bh);
|
|
|
bh->b_size = inode->i_sb->s_blocksize * map.m_len;
|
|
@@ -1685,7 +1713,7 @@ int ext4_da_get_block_prep(struct inode *inode, sector_t iblock,
|
|
|
return ret;
|
|
|
|
|
|
map_bh(bh, inode->i_sb, map.m_pblk);
|
|
|
- bh->b_state = (bh->b_state & ~EXT4_MAP_FLAGS) | map.m_flags;
|
|
|
+ ext4_update_bh_state(bh, map.m_flags);
|
|
|
|
|
|
if (buffer_unwritten(bh)) {
|
|
|
/* A delayed write to unwritten bh should be marked
|