|
@@ -491,14 +491,12 @@ static int gfs2_iomap_alloc(struct inode *inode, struct iomap *iomap,
|
|
|
{
|
|
|
struct gfs2_inode *ip = GFS2_I(inode);
|
|
|
struct gfs2_sbd *sdp = GFS2_SB(inode);
|
|
|
- struct super_block *sb = sdp->sd_vfs;
|
|
|
struct buffer_head *dibh = mp->mp_bh[0];
|
|
|
u64 bn;
|
|
|
unsigned n, i, blks, alloced = 0, iblks = 0, branch_start = 0;
|
|
|
unsigned dblks = 0;
|
|
|
unsigned ptrs_per_blk;
|
|
|
const unsigned end_of_metadata = mp->mp_fheight - 1;
|
|
|
- int ret;
|
|
|
enum alloc_state state;
|
|
|
__be64 *ptr;
|
|
|
__be64 zero_bn = 0;
|
|
@@ -607,15 +605,6 @@ static int gfs2_iomap_alloc(struct inode *inode, struct iomap *iomap,
|
|
|
iomap->flags |= IOMAP_F_NEW;
|
|
|
while (n-- > 0)
|
|
|
*ptr++ = cpu_to_be64(bn++);
|
|
|
- if (flags & IOMAP_ZERO) {
|
|
|
- ret = sb_issue_zeroout(sb, iomap->addr >> inode->i_blkbits,
|
|
|
- dblks, GFP_NOFS);
|
|
|
- if (ret) {
|
|
|
- fs_err(sdp,
|
|
|
- "Failed to zero data buffers\n");
|
|
|
- flags &= ~IOMAP_ZERO;
|
|
|
- }
|
|
|
- }
|
|
|
break;
|
|
|
}
|
|
|
} while (iomap->addr == IOMAP_NULL_ADDR);
|
|
@@ -812,15 +801,22 @@ do_alloc:
|
|
|
}
|
|
|
|
|
|
/**
|
|
|
- * gfs2_block_map - Map a block from an inode to a disk block
|
|
|
+ * gfs2_block_map - Map one or more blocks of an inode to a disk block
|
|
|
* @inode: The inode
|
|
|
* @lblock: The logical block number
|
|
|
* @bh_map: The bh to be mapped
|
|
|
* @create: True if its ok to alloc blocks to satify the request
|
|
|
*
|
|
|
- * Sets buffer_mapped() if successful, sets buffer_boundary() if a
|
|
|
- * read of metadata will be required before the next block can be
|
|
|
- * mapped. Sets buffer_new() if new blocks were allocated.
|
|
|
+ * The size of the requested mapping is defined in bh_map->b_size.
|
|
|
+ *
|
|
|
+ * Clears buffer_mapped(bh_map) and leaves bh_map->b_size unchanged
|
|
|
+ * when @lblock is not mapped. Sets buffer_mapped(bh_map) and
|
|
|
+ * bh_map->b_size to indicate the size of the mapping when @lblock and
|
|
|
+ * successive blocks are mapped, up to the requested size.
|
|
|
+ *
|
|
|
+ * Sets buffer_boundary() if a read of metadata will be required
|
|
|
+ * before the next block can be mapped. Sets buffer_new() if new
|
|
|
+ * blocks were allocated.
|
|
|
*
|
|
|
* Returns: errno
|
|
|
*/
|
|
@@ -839,8 +835,6 @@ int gfs2_block_map(struct inode *inode, sector_t lblock,
|
|
|
|
|
|
if (create)
|
|
|
flags |= IOMAP_WRITE;
|
|
|
- if (buffer_zeronew(bh_map))
|
|
|
- flags |= IOMAP_ZERO;
|
|
|
ret = gfs2_iomap_begin(inode, (loff_t)lblock << inode->i_blkbits,
|
|
|
bh_map->b_size, flags, &iomap);
|
|
|
if (ret) {
|
|
@@ -1344,6 +1338,7 @@ static inline bool walk_done(struct gfs2_sbd *sdp,
|
|
|
static int punch_hole(struct gfs2_inode *ip, u64 offset, u64 length)
|
|
|
{
|
|
|
struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
|
|
|
+ u64 maxsize = sdp->sd_heightsize[ip->i_height];
|
|
|
struct metapath mp = {};
|
|
|
struct buffer_head *dibh, *bh;
|
|
|
struct gfs2_holder rd_gh;
|
|
@@ -1359,6 +1354,14 @@ static int punch_hole(struct gfs2_inode *ip, u64 offset, u64 length)
|
|
|
u64 prev_bnr = 0;
|
|
|
__be64 *start, *end;
|
|
|
|
|
|
+ if (offset >= maxsize) {
|
|
|
+ /*
|
|
|
+ * The starting point lies beyond the allocated meta-data;
|
|
|
+ * there are no blocks do deallocate.
|
|
|
+ */
|
|
|
+ return 0;
|
|
|
+ }
|
|
|
+
|
|
|
/*
|
|
|
* The start position of the hole is defined by lblock, start_list, and
|
|
|
* start_aligned. The end position of the hole is defined by lend,
|
|
@@ -1372,7 +1375,6 @@ static int punch_hole(struct gfs2_inode *ip, u64 offset, u64 length)
|
|
|
*/
|
|
|
|
|
|
if (length) {
|
|
|
- u64 maxsize = sdp->sd_heightsize[ip->i_height];
|
|
|
u64 end_offset = offset + length;
|
|
|
u64 lend;
|
|
|
|