|
@@ -685,12 +685,10 @@ out_unlock_iolock:
|
|
|
}
|
|
|
|
|
|
/*
|
|
|
- * dead simple method of punching delalyed allocation blocks from a range in
|
|
|
- * the inode. Walks a block at a time so will be slow, but is only executed in
|
|
|
- * rare error cases so the overhead is not critical. This will always punch out
|
|
|
- * both the start and end blocks, even if the ranges only partially overlap
|
|
|
- * them, so it is up to the caller to ensure that partial blocks are not
|
|
|
- * passed in.
|
|
|
+ * Dead simple method of punching delalyed allocation blocks from a range in
|
|
|
+ * the inode. This will always punch out both the start and end blocks, even
|
|
|
+ * if the ranges only partially overlap them, so it is up to the caller to
|
|
|
+ * ensure that partial blocks are not passed in.
|
|
|
*/
|
|
|
int
|
|
|
xfs_bmap_punch_delalloc_range(
|
|
@@ -698,63 +696,44 @@ xfs_bmap_punch_delalloc_range(
|
|
|
xfs_fileoff_t start_fsb,
|
|
|
xfs_fileoff_t length)
|
|
|
{
|
|
|
- xfs_fileoff_t remaining = length;
|
|
|
+ struct xfs_ifork *ifp = &ip->i_df;
|
|
|
+ xfs_fileoff_t end_fsb = start_fsb + length;
|
|
|
+ struct xfs_bmbt_irec got, del;
|
|
|
+ struct xfs_iext_cursor icur;
|
|
|
int error = 0;
|
|
|
|
|
|
ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
|
|
|
|
|
|
- do {
|
|
|
- int done;
|
|
|
- xfs_bmbt_irec_t imap;
|
|
|
- int nimaps = 1;
|
|
|
- xfs_fsblock_t firstblock;
|
|
|
- struct xfs_defer_ops dfops;
|
|
|
+ if (!(ifp->if_flags & XFS_IFEXTENTS)) {
|
|
|
+ error = xfs_iread_extents(NULL, ip, XFS_DATA_FORK);
|
|
|
+ if (error)
|
|
|
+ return error;
|
|
|
+ }
|
|
|
|
|
|
- /*
|
|
|
- * Map the range first and check that it is a delalloc extent
|
|
|
- * before trying to unmap the range. Otherwise we will be
|
|
|
- * trying to remove a real extent (which requires a
|
|
|
- * transaction) or a hole, which is probably a bad idea...
|
|
|
- */
|
|
|
- error = xfs_bmapi_read(ip, start_fsb, 1, &imap, &nimaps,
|
|
|
- XFS_BMAPI_ENTIRE);
|
|
|
+ if (!xfs_iext_lookup_extent_before(ip, ifp, &end_fsb, &icur, &got))
|
|
|
+ return 0;
|
|
|
|
|
|
- if (error) {
|
|
|
- /* something screwed, just bail */
|
|
|
- if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) {
|
|
|
- xfs_alert(ip->i_mount,
|
|
|
- "Failed delalloc mapping lookup ino %lld fsb %lld.",
|
|
|
- ip->i_ino, start_fsb);
|
|
|
- }
|
|
|
- break;
|
|
|
- }
|
|
|
- if (!nimaps) {
|
|
|
- /* nothing there */
|
|
|
- goto next_block;
|
|
|
- }
|
|
|
- if (imap.br_startblock != DELAYSTARTBLOCK) {
|
|
|
- /* been converted, ignore */
|
|
|
- goto next_block;
|
|
|
- }
|
|
|
- WARN_ON(imap.br_blockcount == 0);
|
|
|
+ while (got.br_startoff + got.br_blockcount > start_fsb) {
|
|
|
+ del = got;
|
|
|
+ xfs_trim_extent(&del, start_fsb, length);
|
|
|
|
|
|
/*
|
|
|
- * Note: while we initialise the firstblock/dfops pair, they
|
|
|
- * should never be used because blocks should never be
|
|
|
- * allocated or freed for a delalloc extent and hence we need
|
|
|
- * don't cancel or finish them after the xfs_bunmapi() call.
|
|
|
+ * A delete can push the cursor forward. Step back to the
|
|
|
+ * previous extent on non-delalloc or extents outside the
|
|
|
+ * target range.
|
|
|
*/
|
|
|
- xfs_defer_init(&dfops, &firstblock);
|
|
|
- error = xfs_bunmapi(NULL, ip, start_fsb, 1, 0, 1, &firstblock,
|
|
|
- &dfops, &done);
|
|
|
- if (error)
|
|
|
- break;
|
|
|
+ if (!del.br_blockcount ||
|
|
|
+ !isnullstartblock(del.br_startblock)) {
|
|
|
+ if (!xfs_iext_prev_extent(ifp, &icur, &got))
|
|
|
+ break;
|
|
|
+ continue;
|
|
|
+ }
|
|
|
|
|
|
- ASSERT(!xfs_defer_has_unfinished_work(&dfops));
|
|
|
-next_block:
|
|
|
- start_fsb++;
|
|
|
- remaining--;
|
|
|
- } while(remaining > 0);
|
|
|
+ error = xfs_bmap_del_extent_delay(ip, XFS_DATA_FORK, &icur,
|
|
|
+ &got, &del);
|
|
|
+ if (error || !xfs_iext_get_extent(ifp, &icur, &got))
|
|
|
+ break;
|
|
|
+ }
|
|
|
|
|
|
return error;
|
|
|
}
|
|
@@ -1208,7 +1187,22 @@ xfs_free_file_space(
|
|
|
return 0;
|
|
|
if (offset + len > XFS_ISIZE(ip))
|
|
|
len = XFS_ISIZE(ip) - offset;
|
|
|
- return iomap_zero_range(VFS_I(ip), offset, len, NULL, &xfs_iomap_ops);
|
|
|
+ error = iomap_zero_range(VFS_I(ip), offset, len, NULL, &xfs_iomap_ops);
|
|
|
+ if (error)
|
|
|
+ return error;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * If we zeroed right up to EOF and EOF straddles a page boundary we
|
|
|
+ * must make sure that the post-EOF area is also zeroed because the
|
|
|
+ * page could be mmap'd and iomap_zero_range doesn't do that for us.
|
|
|
+ * Writeback of the eof page will do this, albeit clumsily.
|
|
|
+ */
|
|
|
+ if (offset + len >= XFS_ISIZE(ip) && ((offset + len) & PAGE_MASK)) {
|
|
|
+ error = filemap_write_and_wait_range(VFS_I(ip)->i_mapping,
|
|
|
+ (offset + len) & ~PAGE_MASK, LLONG_MAX);
|
|
|
+ }
|
|
|
+
|
|
|
+ return error;
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -1404,6 +1398,10 @@ xfs_insert_file_space(
|
|
|
|
|
|
trace_xfs_insert_file_space(ip);
|
|
|
|
|
|
+ error = xfs_bmap_can_insert_extents(ip, stop_fsb, shift_fsb);
|
|
|
+ if (error)
|
|
|
+ return error;
|
|
|
+
|
|
|
error = xfs_prepare_shift(ip, offset);
|
|
|
if (error)
|
|
|
return error;
|