|
@@ -251,19 +251,6 @@ int f2fs_reserve_block(struct dnode_of_data *dn, pgoff_t index)
|
|
|
return err;
|
|
|
}
|
|
|
|
|
|
-static void f2fs_map_bh(struct super_block *sb, pgoff_t pgofs,
|
|
|
- struct extent_info *ei, struct buffer_head *bh_result)
|
|
|
-{
|
|
|
- unsigned int blkbits = sb->s_blocksize_bits;
|
|
|
- size_t max_size = bh_result->b_size;
|
|
|
- size_t mapped_size;
|
|
|
-
|
|
|
- clear_buffer_new(bh_result);
|
|
|
- map_bh(bh_result, sb, ei->blk + pgofs - ei->fofs);
|
|
|
- mapped_size = (ei->fofs + ei->len - pgofs) << blkbits;
|
|
|
- bh_result->b_size = min(max_size, mapped_size);
|
|
|
-}
|
|
|
-
|
|
|
static bool lookup_extent_info(struct inode *inode, pgoff_t pgofs,
|
|
|
struct extent_info *ei)
|
|
|
{
|
|
@@ -1208,18 +1195,18 @@ out:
|
|
|
}
|
|
|
|
|
|
/*
|
|
|
- * get_data_block() now supported readahead/bmap/rw direct_IO with mapped bh.
|
|
|
+ * f2fs_map_blocks() now supported readahead/bmap/rw direct_IO with
|
|
|
+ * f2fs_map_blocks structure.
|
|
|
* If original data blocks are allocated, then give them to blockdev.
|
|
|
* Otherwise,
|
|
|
* a. preallocate requested block addresses
|
|
|
* b. do not use extent cache for better performance
|
|
|
* c. give the block addresses to blockdev
|
|
|
*/
|
|
|
-static int __get_data_block(struct inode *inode, sector_t iblock,
|
|
|
- struct buffer_head *bh_result, int create, bool fiemap)
|
|
|
+static int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map,
|
|
|
+ int create, bool fiemap)
|
|
|
{
|
|
|
- unsigned int blkbits = inode->i_sb->s_blocksize_bits;
|
|
|
- unsigned maxblocks = bh_result->b_size >> blkbits;
|
|
|
+ unsigned int maxblocks = map->m_len;
|
|
|
struct dnode_of_data dn;
|
|
|
int mode = create ? ALLOC_NODE : LOOKUP_NODE_RA;
|
|
|
pgoff_t pgofs, end_offset;
|
|
@@ -1227,11 +1214,16 @@ static int __get_data_block(struct inode *inode, sector_t iblock,
|
|
|
struct extent_info ei;
|
|
|
bool allocated = false;
|
|
|
|
|
|
- /* Get the page offset from the block offset(iblock) */
|
|
|
- pgofs = (pgoff_t)(iblock >> (PAGE_CACHE_SHIFT - blkbits));
|
|
|
+ map->m_len = 0;
|
|
|
+ map->m_flags = 0;
|
|
|
+
|
|
|
+ /* it only supports block size == page size */
|
|
|
+ pgofs = (pgoff_t)map->m_lblk;
|
|
|
|
|
|
if (f2fs_lookup_extent_cache(inode, pgofs, &ei)) {
|
|
|
- f2fs_map_bh(inode->i_sb, pgofs, &ei, bh_result);
|
|
|
+ map->m_pblk = ei.blk + pgofs - ei.fofs;
|
|
|
+ map->m_len = min((pgoff_t)maxblocks, ei.fofs + ei.len - pgofs);
|
|
|
+ map->m_flags = F2FS_MAP_MAPPED;
|
|
|
goto out;
|
|
|
}
|
|
|
|
|
@@ -1250,21 +1242,21 @@ static int __get_data_block(struct inode *inode, sector_t iblock,
|
|
|
goto put_out;
|
|
|
|
|
|
if (dn.data_blkaddr != NULL_ADDR) {
|
|
|
- clear_buffer_new(bh_result);
|
|
|
- map_bh(bh_result, inode->i_sb, dn.data_blkaddr);
|
|
|
+ map->m_flags = F2FS_MAP_MAPPED;
|
|
|
+ map->m_pblk = dn.data_blkaddr;
|
|
|
} else if (create) {
|
|
|
err = __allocate_data_block(&dn);
|
|
|
if (err)
|
|
|
goto put_out;
|
|
|
allocated = true;
|
|
|
- set_buffer_new(bh_result);
|
|
|
- map_bh(bh_result, inode->i_sb, dn.data_blkaddr);
|
|
|
+ map->m_flags = F2FS_MAP_NEW | F2FS_MAP_MAPPED;
|
|
|
+ map->m_pblk = dn.data_blkaddr;
|
|
|
} else {
|
|
|
goto put_out;
|
|
|
}
|
|
|
|
|
|
end_offset = ADDRS_PER_PAGE(dn.node_page, F2FS_I(inode));
|
|
|
- bh_result->b_size = (((size_t)1) << blkbits);
|
|
|
+ map->m_len = 1;
|
|
|
dn.ofs_in_node++;
|
|
|
pgofs++;
|
|
|
|
|
@@ -1288,22 +1280,22 @@ get_next:
|
|
|
end_offset = ADDRS_PER_PAGE(dn.node_page, F2FS_I(inode));
|
|
|
}
|
|
|
|
|
|
- if (maxblocks > (bh_result->b_size >> blkbits)) {
|
|
|
+ if (maxblocks > map->m_len) {
|
|
|
block_t blkaddr = datablock_addr(dn.node_page, dn.ofs_in_node);
|
|
|
if (blkaddr == NULL_ADDR && create) {
|
|
|
err = __allocate_data_block(&dn);
|
|
|
if (err)
|
|
|
goto sync_out;
|
|
|
allocated = true;
|
|
|
- set_buffer_new(bh_result);
|
|
|
+ map->m_flags |= F2FS_MAP_NEW;
|
|
|
blkaddr = dn.data_blkaddr;
|
|
|
}
|
|
|
/* Give more consecutive addresses for the readahead */
|
|
|
- if (blkaddr == (bh_result->b_blocknr + ofs)) {
|
|
|
+ if (map->m_pblk != NEW_ADDR && blkaddr == (map->m_pblk + ofs)) {
|
|
|
ofs++;
|
|
|
dn.ofs_in_node++;
|
|
|
pgofs++;
|
|
|
- bh_result->b_size += (((size_t)1) << blkbits);
|
|
|
+ map->m_len++;
|
|
|
goto get_next;
|
|
|
}
|
|
|
}
|
|
@@ -1316,10 +1308,28 @@ unlock_out:
|
|
|
if (create)
|
|
|
f2fs_unlock_op(F2FS_I_SB(inode));
|
|
|
out:
|
|
|
- trace_f2fs_get_data_block(inode, iblock, bh_result, err);
|
|
|
+ trace_f2fs_map_blocks(inode, map, err);
|
|
|
return err;
|
|
|
}
|
|
|
|
|
|
+static int __get_data_block(struct inode *inode, sector_t iblock,
|
|
|
+ struct buffer_head *bh, int create, bool fiemap)
|
|
|
+{
|
|
|
+ struct f2fs_map_blocks map;
|
|
|
+ int ret;
|
|
|
+
|
|
|
+ map.m_lblk = iblock;
|
|
|
+ map.m_len = bh->b_size >> inode->i_blkbits;
|
|
|
+
|
|
|
+ ret = f2fs_map_blocks(inode, &map, create, fiemap);
|
|
|
+ if (!ret) {
|
|
|
+ map_bh(bh, inode->i_sb, map.m_pblk);
|
|
|
+ bh->b_state = (bh->b_state & ~F2FS_MAP_FLAGS) | map.m_flags;
|
|
|
+ bh->b_size = map.m_len << inode->i_blkbits;
|
|
|
+ }
|
|
|
+ return ret;
|
|
|
+}
|
|
|
+
|
|
|
static int get_data_block(struct inode *inode, sector_t iblock,
|
|
|
struct buffer_head *bh_result, int create)
|
|
|
{
|