|
@@ -38,6 +38,7 @@
|
|
|
#include "xfs_bmap_util.h"
|
|
|
#include "xfs_bmap_btree.h"
|
|
|
#include "xfs_rtalloc.h"
|
|
|
+#include "xfs_errortag.h"
|
|
|
#include "xfs_error.h"
|
|
|
#include "xfs_quota.h"
|
|
|
#include "xfs_trans_space.h"
|
|
@@ -112,28 +113,21 @@ xfs_bmap_compute_maxlevels(
|
|
|
STATIC int /* error */
|
|
|
xfs_bmbt_lookup_eq(
|
|
|
struct xfs_btree_cur *cur,
|
|
|
- xfs_fileoff_t off,
|
|
|
- xfs_fsblock_t bno,
|
|
|
- xfs_filblks_t len,
|
|
|
+ struct xfs_bmbt_irec *irec,
|
|
|
int *stat) /* success/failure */
|
|
|
{
|
|
|
- cur->bc_rec.b.br_startoff = off;
|
|
|
- cur->bc_rec.b.br_startblock = bno;
|
|
|
- cur->bc_rec.b.br_blockcount = len;
|
|
|
+ cur->bc_rec.b = *irec;
|
|
|
return xfs_btree_lookup(cur, XFS_LOOKUP_EQ, stat);
|
|
|
}
|
|
|
|
|
|
STATIC int /* error */
|
|
|
-xfs_bmbt_lookup_ge(
|
|
|
+xfs_bmbt_lookup_first(
|
|
|
struct xfs_btree_cur *cur,
|
|
|
- xfs_fileoff_t off,
|
|
|
- xfs_fsblock_t bno,
|
|
|
- xfs_filblks_t len,
|
|
|
int *stat) /* success/failure */
|
|
|
{
|
|
|
- cur->bc_rec.b.br_startoff = off;
|
|
|
- cur->bc_rec.b.br_startblock = bno;
|
|
|
- cur->bc_rec.b.br_blockcount = len;
|
|
|
+ cur->bc_rec.b.br_startoff = 0;
|
|
|
+ cur->bc_rec.b.br_startblock = 0;
|
|
|
+ cur->bc_rec.b.br_blockcount = 0;
|
|
|
return xfs_btree_lookup(cur, XFS_LOOKUP_GE, stat);
|
|
|
}
|
|
|
|
|
@@ -160,21 +154,17 @@ static inline bool xfs_bmap_wants_extents(struct xfs_inode *ip, int whichfork)
|
|
|
}
|
|
|
|
|
|
/*
|
|
|
- * Update the record referred to by cur to the value given
|
|
|
- * by [off, bno, len, state].
|
|
|
+ * Update the record referred to by cur to the value given by irec
|
|
|
* This either works (return 0) or gets an EFSCORRUPTED error.
|
|
|
*/
|
|
|
STATIC int
|
|
|
xfs_bmbt_update(
|
|
|
struct xfs_btree_cur *cur,
|
|
|
- xfs_fileoff_t off,
|
|
|
- xfs_fsblock_t bno,
|
|
|
- xfs_filblks_t len,
|
|
|
- xfs_exntst_t state)
|
|
|
+ struct xfs_bmbt_irec *irec)
|
|
|
{
|
|
|
union xfs_btree_rec rec;
|
|
|
|
|
|
- xfs_bmbt_disk_set_allf(&rec.bmbt, off, bno, len, state);
|
|
|
+ xfs_bmbt_disk_set_all(&rec.bmbt, irec);
|
|
|
return xfs_btree_update(cur, &rec);
|
|
|
}
|
|
|
|
|
@@ -242,7 +232,6 @@ xfs_bmap_forkoff_reset(
|
|
|
{
|
|
|
if (whichfork == XFS_ATTR_FORK &&
|
|
|
ip->i_d.di_format != XFS_DINODE_FMT_DEV &&
|
|
|
- ip->i_d.di_format != XFS_DINODE_FMT_UUID &&
|
|
|
ip->i_d.di_format != XFS_DINODE_FMT_BTREE) {
|
|
|
uint dfl_forkoff = xfs_default_attroffset(ip) >> 3;
|
|
|
|
|
@@ -498,31 +487,6 @@ error_norelse:
|
|
|
return;
|
|
|
}
|
|
|
|
|
|
-/*
|
|
|
- * Add bmap trace insert entries for all the contents of the extent records.
|
|
|
- */
|
|
|
-void
|
|
|
-xfs_bmap_trace_exlist(
|
|
|
- xfs_inode_t *ip, /* incore inode pointer */
|
|
|
- xfs_extnum_t cnt, /* count of entries in the list */
|
|
|
- int whichfork, /* data or attr or cow fork */
|
|
|
- unsigned long caller_ip)
|
|
|
-{
|
|
|
- xfs_extnum_t idx; /* extent record index */
|
|
|
- xfs_ifork_t *ifp; /* inode fork pointer */
|
|
|
- int state = 0;
|
|
|
-
|
|
|
- if (whichfork == XFS_ATTR_FORK)
|
|
|
- state |= BMAP_ATTRFORK;
|
|
|
- else if (whichfork == XFS_COW_FORK)
|
|
|
- state |= BMAP_COWFORK;
|
|
|
-
|
|
|
- ifp = XFS_IFORK_PTR(ip, whichfork);
|
|
|
- ASSERT(cnt == xfs_iext_count(ifp));
|
|
|
- for (idx = 0; idx < cnt; idx++)
|
|
|
- trace_xfs_extlist(ip, idx, state, caller_ip);
|
|
|
-}
|
|
|
-
|
|
|
/*
|
|
|
* Validate that the bmbt_irecs being returned from bmapi are valid
|
|
|
* given the caller's original parameters. Specifically check the
|
|
@@ -657,8 +621,8 @@ xfs_bmap_btree_to_extents(
|
|
|
cbno = be64_to_cpu(*pp);
|
|
|
*logflagsp = 0;
|
|
|
#ifdef DEBUG
|
|
|
- if ((error = xfs_btree_check_lptr(cur, cbno, 1)))
|
|
|
- return error;
|
|
|
+ XFS_WANT_CORRUPTED_RETURN(cur->bc_mp,
|
|
|
+ xfs_btree_check_lptr(cur, cbno, 1));
|
|
|
#endif
|
|
|
error = xfs_btree_read_bufl(mp, tp, cbno, 0, &cbp, XFS_BMAP_BTREE_REF,
|
|
|
&xfs_bmbt_buf_ops);
|
|
@@ -703,14 +667,14 @@ xfs_bmap_extents_to_btree(
|
|
|
xfs_bmbt_rec_t *arp; /* child record pointer */
|
|
|
struct xfs_btree_block *block; /* btree root block */
|
|
|
xfs_btree_cur_t *cur; /* bmap btree cursor */
|
|
|
- xfs_bmbt_rec_host_t *ep; /* extent record pointer */
|
|
|
int error; /* error return value */
|
|
|
- xfs_extnum_t i, cnt; /* extent record index */
|
|
|
xfs_ifork_t *ifp; /* inode fork pointer */
|
|
|
xfs_bmbt_key_t *kp; /* root block key pointer */
|
|
|
xfs_mount_t *mp; /* mount structure */
|
|
|
- xfs_extnum_t nextents; /* number of file extents */
|
|
|
xfs_bmbt_ptr_t *pp; /* root block address pointer */
|
|
|
+ struct xfs_iext_cursor icur;
|
|
|
+ struct xfs_bmbt_irec rec;
|
|
|
+ xfs_extnum_t cnt = 0;
|
|
|
|
|
|
mp = ip->i_mount;
|
|
|
ASSERT(whichfork != XFS_COW_FORK);
|
|
@@ -789,15 +753,12 @@ xfs_bmap_extents_to_btree(
|
|
|
XFS_BTNUM_BMAP, 0, 0, ip->i_ino,
|
|
|
XFS_BTREE_LONG_PTRS);
|
|
|
|
|
|
- arp = XFS_BMBT_REC_ADDR(mp, ablock, 1);
|
|
|
- nextents = xfs_iext_count(ifp);
|
|
|
- for (cnt = i = 0; i < nextents; i++) {
|
|
|
- ep = xfs_iext_get_ext(ifp, i);
|
|
|
- if (!isnullstartblock(xfs_bmbt_get_startblock(ep))) {
|
|
|
- arp->l0 = cpu_to_be64(ep->l0);
|
|
|
- arp->l1 = cpu_to_be64(ep->l1);
|
|
|
- arp++; cnt++;
|
|
|
- }
|
|
|
+ for_each_xfs_iext(ifp, &icur, &rec) {
|
|
|
+ if (isnullstartblock(rec.br_startblock))
|
|
|
+ continue;
|
|
|
+ arp = XFS_BMBT_REC_ADDR(mp, ablock, 1 + cnt);
|
|
|
+ xfs_bmbt_disk_set_all(arp, &rec);
|
|
|
+ cnt++;
|
|
|
}
|
|
|
ASSERT(cnt == XFS_IFORK_NEXTENTS(ip, whichfork));
|
|
|
xfs_btree_set_numrecs(ablock, cnt);
|
|
@@ -845,6 +806,8 @@ xfs_bmap_local_to_extents_empty(
|
|
|
xfs_bmap_forkoff_reset(ip, whichfork);
|
|
|
ifp->if_flags &= ~XFS_IFINLINE;
|
|
|
ifp->if_flags |= XFS_IFEXTENTS;
|
|
|
+ ifp->if_u1.if_root = NULL;
|
|
|
+ ifp->if_height = 0;
|
|
|
XFS_IFORK_FMT_SET(ip, whichfork, XFS_DINODE_FMT_EXTENTS);
|
|
|
}
|
|
|
|
|
@@ -868,6 +831,7 @@ xfs_bmap_local_to_extents(
|
|
|
xfs_alloc_arg_t args; /* allocation arguments */
|
|
|
xfs_buf_t *bp; /* buffer for extent block */
|
|
|
struct xfs_bmbt_irec rec;
|
|
|
+ struct xfs_iext_cursor icur;
|
|
|
|
|
|
/*
|
|
|
* We don't want to deal with the case of keeping inode data inline yet.
|
|
@@ -885,8 +849,7 @@ xfs_bmap_local_to_extents(
|
|
|
|
|
|
flags = 0;
|
|
|
error = 0;
|
|
|
- ASSERT((ifp->if_flags & (XFS_IFINLINE|XFS_IFEXTENTS|XFS_IFEXTIREC)) ==
|
|
|
- XFS_IFINLINE);
|
|
|
+ ASSERT((ifp->if_flags & (XFS_IFINLINE|XFS_IFEXTENTS)) == XFS_IFINLINE);
|
|
|
memset(&args, 0, sizeof(args));
|
|
|
args.tp = tp;
|
|
|
args.mp = ip->i_mount;
|
|
@@ -930,15 +893,16 @@ xfs_bmap_local_to_extents(
|
|
|
xfs_bmap_local_to_extents_empty(ip, whichfork);
|
|
|
flags |= XFS_ILOG_CORE;
|
|
|
|
|
|
+ ifp->if_u1.if_root = NULL;
|
|
|
+ ifp->if_height = 0;
|
|
|
+
|
|
|
rec.br_startoff = 0;
|
|
|
rec.br_startblock = args.fsbno;
|
|
|
rec.br_blockcount = 1;
|
|
|
rec.br_state = XFS_EXT_NORM;
|
|
|
- xfs_iext_insert(ip, 0, 1, &rec, 0);
|
|
|
+ xfs_iext_first(ifp, &icur);
|
|
|
+ xfs_iext_insert(ip, &icur, &rec, 0);
|
|
|
|
|
|
- trace_xfs_bmap_post_update(ip, 0,
|
|
|
- whichfork == XFS_ATTR_FORK ? BMAP_ATTRFORK : 0,
|
|
|
- _THIS_IP_);
|
|
|
XFS_IFORK_NEXT_SET(ip, whichfork, 1);
|
|
|
ip->i_d.di_nblocks = 1;
|
|
|
xfs_trans_mod_dquot_byino(tp, ip,
|
|
@@ -973,7 +937,8 @@ xfs_bmap_add_attrfork_btree(
|
|
|
cur = xfs_bmbt_init_cursor(mp, tp, ip, XFS_DATA_FORK);
|
|
|
cur->bc_private.b.dfops = dfops;
|
|
|
cur->bc_private.b.firstblock = *firstblock;
|
|
|
- if ((error = xfs_bmbt_lookup_ge(cur, 0, 0, 0, &stat)))
|
|
|
+ error = xfs_bmbt_lookup_first(cur, &stat);
|
|
|
+ if (error)
|
|
|
goto error0;
|
|
|
/* must be at least one entry */
|
|
|
XFS_WANT_CORRUPTED_GOTO(mp, stat == 1, error0);
|
|
@@ -1124,9 +1089,6 @@ xfs_bmap_add_attrfork(
|
|
|
case XFS_DINODE_FMT_DEV:
|
|
|
ip->i_d.di_forkoff = roundup(sizeof(xfs_dev_t), 8) >> 3;
|
|
|
break;
|
|
|
- case XFS_DINODE_FMT_UUID:
|
|
|
- ip->i_d.di_forkoff = roundup(sizeof(uuid_t), 8) >> 3;
|
|
|
- break;
|
|
|
case XFS_DINODE_FMT_LOCAL:
|
|
|
case XFS_DINODE_FMT_EXTENTS:
|
|
|
case XFS_DINODE_FMT_BTREE:
|
|
@@ -1206,32 +1168,35 @@ trans_cancel:
|
|
|
*/
|
|
|
|
|
|
/*
|
|
|
- * Read in the extents to if_extents.
|
|
|
- * All inode fields are set up by caller, we just traverse the btree
|
|
|
- * and copy the records in. If the file system cannot contain unwritten
|
|
|
- * extents, the records are checked for no "state" flags.
|
|
|
+ * Read in extents from a btree-format inode.
|
|
|
*/
|
|
|
-int /* error */
|
|
|
-xfs_bmap_read_extents(
|
|
|
- xfs_trans_t *tp, /* transaction pointer */
|
|
|
- xfs_inode_t *ip, /* incore inode */
|
|
|
- int whichfork) /* data or attr fork */
|
|
|
+int
|
|
|
+xfs_iread_extents(
|
|
|
+ struct xfs_trans *tp,
|
|
|
+ struct xfs_inode *ip,
|
|
|
+ int whichfork)
|
|
|
{
|
|
|
- struct xfs_btree_block *block; /* current btree block */
|
|
|
- xfs_fsblock_t bno; /* block # of "block" */
|
|
|
- xfs_buf_t *bp; /* buffer for "block" */
|
|
|
- int error; /* error return value */
|
|
|
- xfs_extnum_t i, j; /* index into the extents list */
|
|
|
- xfs_ifork_t *ifp; /* fork structure */
|
|
|
- int level; /* btree level, for checking */
|
|
|
- xfs_mount_t *mp; /* file system mount structure */
|
|
|
- __be64 *pp; /* pointer to block address */
|
|
|
- /* REFERENCED */
|
|
|
- xfs_extnum_t room; /* number of entries there's room for */
|
|
|
+ struct xfs_mount *mp = ip->i_mount;
|
|
|
+ int state = xfs_bmap_fork_to_state(whichfork);
|
|
|
+ struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
|
|
|
+ xfs_extnum_t nextents = XFS_IFORK_NEXTENTS(ip, whichfork);
|
|
|
+ struct xfs_btree_block *block = ifp->if_broot;
|
|
|
+ struct xfs_iext_cursor icur;
|
|
|
+ struct xfs_bmbt_irec new;
|
|
|
+ xfs_fsblock_t bno;
|
|
|
+ struct xfs_buf *bp;
|
|
|
+ xfs_extnum_t i, j;
|
|
|
+ int level;
|
|
|
+ __be64 *pp;
|
|
|
+ int error;
|
|
|
+
|
|
|
+ ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
|
|
|
+
|
|
|
+ if (unlikely(XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE)) {
|
|
|
+ XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, mp);
|
|
|
+ return -EFSCORRUPTED;
|
|
|
+ }
|
|
|
|
|
|
- mp = ip->i_mount;
|
|
|
- ifp = XFS_IFORK_PTR(ip, whichfork);
|
|
|
- block = ifp->if_broot;
|
|
|
/*
|
|
|
* Root level must use BMAP_BROOT_PTR_ADDR macro to get ptr out.
|
|
|
*/
|
|
@@ -1248,21 +1213,23 @@ xfs_bmap_read_extents(
|
|
|
error = xfs_btree_read_bufl(mp, tp, bno, 0, &bp,
|
|
|
XFS_BMAP_BTREE_REF, &xfs_bmbt_buf_ops);
|
|
|
if (error)
|
|
|
- return error;
|
|
|
+ goto out;
|
|
|
block = XFS_BUF_TO_BLOCK(bp);
|
|
|
if (level == 0)
|
|
|
break;
|
|
|
pp = XFS_BMBT_PTR_ADDR(mp, block, 1, mp->m_bmap_dmxr[1]);
|
|
|
bno = be64_to_cpu(*pp);
|
|
|
XFS_WANT_CORRUPTED_GOTO(mp,
|
|
|
- XFS_FSB_SANITY_CHECK(mp, bno), error0);
|
|
|
+ XFS_FSB_SANITY_CHECK(mp, bno), out_brelse);
|
|
|
xfs_trans_brelse(tp, bp);
|
|
|
}
|
|
|
+
|
|
|
/*
|
|
|
* Here with bp and block set to the leftmost leaf node in the tree.
|
|
|
*/
|
|
|
- room = xfs_iext_count(ifp);
|
|
|
i = 0;
|
|
|
+ xfs_iext_first(ifp, &icur);
|
|
|
+
|
|
|
/*
|
|
|
* Loop over all leaf nodes. Copy information to the extent records.
|
|
|
*/
|
|
@@ -1272,14 +1239,15 @@ xfs_bmap_read_extents(
|
|
|
xfs_extnum_t num_recs;
|
|
|
|
|
|
num_recs = xfs_btree_get_numrecs(block);
|
|
|
- if (unlikely(i + num_recs > room)) {
|
|
|
- ASSERT(i + num_recs <= room);
|
|
|
+ if (unlikely(i + num_recs > nextents)) {
|
|
|
+ ASSERT(i + num_recs <= nextents);
|
|
|
xfs_warn(ip->i_mount,
|
|
|
"corrupt dinode %Lu, (btree extents).",
|
|
|
(unsigned long long) ip->i_ino);
|
|
|
- XFS_CORRUPTION_ERROR("xfs_bmap_read_extents(1)",
|
|
|
+ XFS_CORRUPTION_ERROR(__func__,
|
|
|
XFS_ERRLEVEL_LOW, ip->i_mount, block);
|
|
|
- goto error0;
|
|
|
+ error = -EFSCORRUPTED;
|
|
|
+ goto out_brelse;
|
|
|
}
|
|
|
/*
|
|
|
* Read-ahead the next leaf block, if any.
|
|
@@ -1292,15 +1260,17 @@ xfs_bmap_read_extents(
|
|
|
* Copy records into the extent records.
|
|
|
*/
|
|
|
frp = XFS_BMBT_REC_ADDR(mp, block, 1);
|
|
|
- for (j = 0; j < num_recs; j++, i++, frp++) {
|
|
|
- xfs_bmbt_rec_host_t *trp = xfs_iext_get_ext(ifp, i);
|
|
|
- trp->l0 = be64_to_cpu(frp->l0);
|
|
|
- trp->l1 = be64_to_cpu(frp->l1);
|
|
|
- if (!xfs_bmbt_validate_extent(mp, whichfork, trp)) {
|
|
|
+ for (j = 0; j < num_recs; j++, frp++, i++) {
|
|
|
+ xfs_bmbt_disk_get_all(frp, &new);
|
|
|
+ if (!xfs_bmbt_validate_extent(mp, whichfork, &new)) {
|
|
|
XFS_ERROR_REPORT("xfs_bmap_read_extents(2)",
|
|
|
XFS_ERRLEVEL_LOW, mp);
|
|
|
- goto error0;
|
|
|
+ error = -EFSCORRUPTED;
|
|
|
+ goto out_brelse;
|
|
|
}
|
|
|
+ xfs_iext_insert(ip, &icur, &new, state);
|
|
|
+ trace_xfs_read_extent(ip, &icur, state, _THIS_IP_);
|
|
|
+ xfs_iext_next(ifp, &icur);
|
|
|
}
|
|
|
xfs_trans_brelse(tp, bp);
|
|
|
bno = nextbno;
|
|
@@ -1312,71 +1282,74 @@ xfs_bmap_read_extents(
|
|
|
error = xfs_btree_read_bufl(mp, tp, bno, 0, &bp,
|
|
|
XFS_BMAP_BTREE_REF, &xfs_bmbt_buf_ops);
|
|
|
if (error)
|
|
|
- return error;
|
|
|
+ goto out;
|
|
|
block = XFS_BUF_TO_BLOCK(bp);
|
|
|
}
|
|
|
- if (i != XFS_IFORK_NEXTENTS(ip, whichfork))
|
|
|
- return -EFSCORRUPTED;
|
|
|
+
|
|
|
+ if (i != XFS_IFORK_NEXTENTS(ip, whichfork)) {
|
|
|
+ error = -EFSCORRUPTED;
|
|
|
+ goto out;
|
|
|
+ }
|
|
|
ASSERT(i == xfs_iext_count(ifp));
|
|
|
- XFS_BMAP_TRACE_EXLIST(ip, i, whichfork);
|
|
|
+
|
|
|
+ ifp->if_flags |= XFS_IFEXTENTS;
|
|
|
return 0;
|
|
|
-error0:
|
|
|
+
|
|
|
+out_brelse:
|
|
|
xfs_trans_brelse(tp, bp);
|
|
|
- return -EFSCORRUPTED;
|
|
|
+out:
|
|
|
+ xfs_iext_destroy(ifp);
|
|
|
+ return error;
|
|
|
}
|
|
|
|
|
|
/*
|
|
|
- * Returns the file-relative block number of the first unused block(s)
|
|
|
- * in the file with at least "len" logically contiguous blocks free.
|
|
|
- * This is the lowest-address hole if the file has holes, else the first block
|
|
|
- * past the end of file.
|
|
|
- * Return 0 if the file is currently local (in-inode).
|
|
|
+ * Returns the relative block number of the first unused block(s) in the given
|
|
|
+ * fork with at least "len" logically contiguous blocks free. This is the
|
|
|
+ * lowest-address hole if the fork has holes, else the first block past the end
|
|
|
+ * of fork. Return 0 if the fork is currently local (in-inode).
|
|
|
*/
|
|
|
int /* error */
|
|
|
xfs_bmap_first_unused(
|
|
|
- xfs_trans_t *tp, /* transaction pointer */
|
|
|
- xfs_inode_t *ip, /* incore inode */
|
|
|
- xfs_extlen_t len, /* size of hole to find */
|
|
|
- xfs_fileoff_t *first_unused, /* unused block */
|
|
|
- int whichfork) /* data or attr fork */
|
|
|
+ struct xfs_trans *tp, /* transaction pointer */
|
|
|
+ struct xfs_inode *ip, /* incore inode */
|
|
|
+ xfs_extlen_t len, /* size of hole to find */
|
|
|
+ xfs_fileoff_t *first_unused, /* unused block */
|
|
|
+ int whichfork) /* data or attr fork */
|
|
|
{
|
|
|
- int error; /* error return value */
|
|
|
- int idx; /* extent record index */
|
|
|
- xfs_ifork_t *ifp; /* inode fork pointer */
|
|
|
- xfs_fileoff_t lastaddr; /* last block number seen */
|
|
|
- xfs_fileoff_t lowest; /* lowest useful block */
|
|
|
- xfs_fileoff_t max; /* starting useful block */
|
|
|
- xfs_extnum_t nextents; /* number of extent entries */
|
|
|
+ struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
|
|
|
+ struct xfs_bmbt_irec got;
|
|
|
+ struct xfs_iext_cursor icur;
|
|
|
+ xfs_fileoff_t lastaddr = 0;
|
|
|
+ xfs_fileoff_t lowest, max;
|
|
|
+ int error;
|
|
|
|
|
|
ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE ||
|
|
|
XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS ||
|
|
|
XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL);
|
|
|
+
|
|
|
if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL) {
|
|
|
*first_unused = 0;
|
|
|
return 0;
|
|
|
}
|
|
|
- ifp = XFS_IFORK_PTR(ip, whichfork);
|
|
|
- if (!(ifp->if_flags & XFS_IFEXTENTS) &&
|
|
|
- (error = xfs_iread_extents(tp, ip, whichfork)))
|
|
|
- return error;
|
|
|
- lowest = *first_unused;
|
|
|
- nextents = xfs_iext_count(ifp);
|
|
|
- for (idx = 0, lastaddr = 0, max = lowest; idx < nextents; idx++) {
|
|
|
- struct xfs_bmbt_irec got;
|
|
|
|
|
|
- xfs_iext_get_extent(ifp, idx, &got);
|
|
|
+ if (!(ifp->if_flags & XFS_IFEXTENTS)) {
|
|
|
+ error = xfs_iread_extents(tp, ip, whichfork);
|
|
|
+ if (error)
|
|
|
+ return error;
|
|
|
+ }
|
|
|
|
|
|
+ lowest = max = *first_unused;
|
|
|
+ for_each_xfs_iext(ifp, &icur, &got) {
|
|
|
/*
|
|
|
* See if the hole before this extent will work.
|
|
|
*/
|
|
|
if (got.br_startoff >= lowest + len &&
|
|
|
- got.br_startoff - max >= len) {
|
|
|
- *first_unused = max;
|
|
|
- return 0;
|
|
|
- }
|
|
|
+ got.br_startoff - max >= len)
|
|
|
+ break;
|
|
|
lastaddr = got.br_startoff + got.br_blockcount;
|
|
|
max = XFS_FILEOFF_MAX(lastaddr, lowest);
|
|
|
}
|
|
|
+
|
|
|
*first_unused = max;
|
|
|
return 0;
|
|
|
}
|
|
@@ -1396,7 +1369,7 @@ xfs_bmap_last_before(
|
|
|
{
|
|
|
struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
|
|
|
struct xfs_bmbt_irec got;
|
|
|
- xfs_extnum_t idx;
|
|
|
+ struct xfs_iext_cursor icur;
|
|
|
int error;
|
|
|
|
|
|
switch (XFS_IFORK_FORMAT(ip, whichfork)) {
|
|
@@ -1416,17 +1389,8 @@ xfs_bmap_last_before(
|
|
|
return error;
|
|
|
}
|
|
|
|
|
|
- if (xfs_iext_lookup_extent(ip, ifp, *last_block - 1, &idx, &got)) {
|
|
|
- if (got.br_startoff <= *last_block - 1)
|
|
|
- return 0;
|
|
|
- }
|
|
|
-
|
|
|
- if (xfs_iext_get_extent(ifp, idx - 1, &got)) {
|
|
|
- *last_block = got.br_startoff + got.br_blockcount;
|
|
|
- return 0;
|
|
|
- }
|
|
|
-
|
|
|
- *last_block = 0;
|
|
|
+ if (!xfs_iext_lookup_extent_before(ip, ifp, last_block, &icur, &got))
|
|
|
+ *last_block = 0;
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
@@ -1439,8 +1403,8 @@ xfs_bmap_last_extent(
|
|
|
int *is_empty)
|
|
|
{
|
|
|
struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
|
|
|
+ struct xfs_iext_cursor icur;
|
|
|
int error;
|
|
|
- int nextents;
|
|
|
|
|
|
if (!(ifp->if_flags & XFS_IFEXTENTS)) {
|
|
|
error = xfs_iread_extents(tp, ip, whichfork);
|
|
@@ -1448,14 +1412,11 @@ xfs_bmap_last_extent(
|
|
|
return error;
|
|
|
}
|
|
|
|
|
|
- nextents = xfs_iext_count(ifp);
|
|
|
- if (nextents == 0) {
|
|
|
+ xfs_iext_last(ifp, &icur);
|
|
|
+ if (!xfs_iext_get_extent(ifp, &icur, rec))
|
|
|
*is_empty = 1;
|
|
|
- return 0;
|
|
|
- }
|
|
|
-
|
|
|
- xfs_bmbt_get_all(xfs_iext_get_ext(ifp, nextents - 1), rec);
|
|
|
- *is_empty = 0;
|
|
|
+ else
|
|
|
+ *is_empty = 0;
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
@@ -1540,10 +1501,10 @@ xfs_bmap_one_block(
|
|
|
xfs_inode_t *ip, /* incore inode */
|
|
|
int whichfork) /* data or attr fork */
|
|
|
{
|
|
|
- xfs_bmbt_rec_host_t *ep; /* ptr to fork's extent */
|
|
|
xfs_ifork_t *ifp; /* inode fork pointer */
|
|
|
int rval; /* return value */
|
|
|
xfs_bmbt_irec_t s; /* internal version of extent */
|
|
|
+ struct xfs_iext_cursor icur;
|
|
|
|
|
|
#ifndef DEBUG
|
|
|
if (whichfork == XFS_DATA_FORK)
|
|
@@ -1555,8 +1516,8 @@ xfs_bmap_one_block(
|
|
|
return 0;
|
|
|
ifp = XFS_IFORK_PTR(ip, whichfork);
|
|
|
ASSERT(ifp->if_flags & XFS_IFEXTENTS);
|
|
|
- ep = xfs_iext_get_ext(ifp, 0);
|
|
|
- xfs_bmbt_get_all(ep, &s);
|
|
|
+ xfs_iext_first(ifp, &icur);
|
|
|
+ xfs_iext_get_extent(ifp, &icur, &s);
|
|
|
rval = s.br_startoff == 0 && s.br_blockcount == 1;
|
|
|
if (rval && whichfork == XFS_DATA_FORK)
|
|
|
ASSERT(XFS_ISIZE(ip) == ip->i_mount->m_sb.sb_blocksize);
|
|
@@ -1576,8 +1537,6 @@ xfs_bmap_add_extent_delay_real(
|
|
|
int whichfork)
|
|
|
{
|
|
|
struct xfs_bmbt_irec *new = &bma->got;
|
|
|
- int diff; /* temp value */
|
|
|
- xfs_bmbt_rec_host_t *ep; /* extent entry for idx */
|
|
|
int error; /* error return value */
|
|
|
int i; /* temp state */
|
|
|
xfs_ifork_t *ifp; /* inode fork pointer */
|
|
@@ -1585,14 +1544,14 @@ xfs_bmap_add_extent_delay_real(
|
|
|
xfs_bmbt_irec_t r[3]; /* neighbor extent entries */
|
|
|
/* left is 0, right is 1, prev is 2 */
|
|
|
int rval=0; /* return value (logging flags) */
|
|
|
- int state = 0;/* state bits, accessed thru macros */
|
|
|
+ int state = xfs_bmap_fork_to_state(whichfork);
|
|
|
xfs_filblks_t da_new; /* new count del alloc blocks used */
|
|
|
xfs_filblks_t da_old; /* old count del alloc blocks used */
|
|
|
xfs_filblks_t temp=0; /* value for da_new calculations */
|
|
|
- xfs_filblks_t temp2=0;/* value for da_new calculations */
|
|
|
int tmp_rval; /* partial logging flags */
|
|
|
struct xfs_mount *mp;
|
|
|
xfs_extnum_t *nextents;
|
|
|
+ struct xfs_bmbt_irec old;
|
|
|
|
|
|
mp = bma->ip->i_mount;
|
|
|
ifp = XFS_IFORK_PTR(bma->ip, whichfork);
|
|
@@ -1600,8 +1559,6 @@ xfs_bmap_add_extent_delay_real(
|
|
|
nextents = (whichfork == XFS_COW_FORK ? &bma->ip->i_cnextents :
|
|
|
&bma->ip->i_d.di_nextents);
|
|
|
|
|
|
- ASSERT(bma->idx >= 0);
|
|
|
- ASSERT(bma->idx <= xfs_iext_count(ifp));
|
|
|
ASSERT(!isnullstartblock(new->br_startblock));
|
|
|
ASSERT(!bma->cur ||
|
|
|
(bma->cur->bc_private.b.flags & XFS_BTCUR_BPRV_WASDEL));
|
|
@@ -1612,15 +1569,12 @@ xfs_bmap_add_extent_delay_real(
|
|
|
#define RIGHT r[1]
|
|
|
#define PREV r[2]
|
|
|
|
|
|
- if (whichfork == XFS_COW_FORK)
|
|
|
- state |= BMAP_COWFORK;
|
|
|
-
|
|
|
/*
|
|
|
* Set up a bunch of variables to make the tests simpler.
|
|
|
*/
|
|
|
- ep = xfs_iext_get_ext(ifp, bma->idx);
|
|
|
- xfs_bmbt_get_all(ep, &PREV);
|
|
|
+ xfs_iext_get_extent(ifp, &bma->icur, &PREV);
|
|
|
new_endoff = new->br_startoff + new->br_blockcount;
|
|
|
+ ASSERT(isnullstartblock(PREV.br_startblock));
|
|
|
ASSERT(PREV.br_startoff <= new->br_startoff);
|
|
|
ASSERT(PREV.br_startoff + PREV.br_blockcount >= new_endoff);
|
|
|
|
|
@@ -1640,10 +1594,8 @@ xfs_bmap_add_extent_delay_real(
|
|
|
* Check and set flags if this segment has a left neighbor.
|
|
|
* Don't set contiguous if the combined extent would be too large.
|
|
|
*/
|
|
|
- if (bma->idx > 0) {
|
|
|
+ if (xfs_iext_peek_prev_extent(ifp, &bma->icur, &LEFT)) {
|
|
|
state |= BMAP_LEFT_VALID;
|
|
|
- xfs_bmbt_get_all(xfs_iext_get_ext(ifp, bma->idx - 1), &LEFT);
|
|
|
-
|
|
|
if (isnullstartblock(LEFT.br_startblock))
|
|
|
state |= BMAP_LEFT_DELAY;
|
|
|
}
|
|
@@ -1660,10 +1612,8 @@ xfs_bmap_add_extent_delay_real(
|
|
|
* Don't set contiguous if the combined extent would be too large.
|
|
|
* Also check for all-three-contiguous being too large.
|
|
|
*/
|
|
|
- if (bma->idx < xfs_iext_count(ifp) - 1) {
|
|
|
+ if (xfs_iext_peek_next_extent(ifp, &bma->icur, &RIGHT)) {
|
|
|
state |= BMAP_RIGHT_VALID;
|
|
|
- xfs_bmbt_get_all(xfs_iext_get_ext(ifp, bma->idx + 1), &RIGHT);
|
|
|
-
|
|
|
if (isnullstartblock(RIGHT.br_startblock))
|
|
|
state |= BMAP_RIGHT_DELAY;
|
|
|
}
|
|
@@ -1693,22 +1643,19 @@ xfs_bmap_add_extent_delay_real(
|
|
|
* Filling in all of a previously delayed allocation extent.
|
|
|
* The left and right neighbors are both contiguous with new.
|
|
|
*/
|
|
|
- bma->idx--;
|
|
|
- trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_);
|
|
|
- xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, bma->idx),
|
|
|
- LEFT.br_blockcount + PREV.br_blockcount +
|
|
|
- RIGHT.br_blockcount);
|
|
|
- trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_);
|
|
|
-
|
|
|
- xfs_iext_remove(bma->ip, bma->idx + 1, 2, state);
|
|
|
+ LEFT.br_blockcount += PREV.br_blockcount + RIGHT.br_blockcount;
|
|
|
+
|
|
|
+ xfs_iext_remove(bma->ip, &bma->icur, state);
|
|
|
+ xfs_iext_remove(bma->ip, &bma->icur, state);
|
|
|
+ xfs_iext_prev(ifp, &bma->icur);
|
|
|
+ xfs_iext_update_extent(bma->ip, state, &bma->icur, &LEFT);
|
|
|
(*nextents)--;
|
|
|
+
|
|
|
if (bma->cur == NULL)
|
|
|
rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
|
|
|
else {
|
|
|
rval = XFS_ILOG_CORE;
|
|
|
- error = xfs_bmbt_lookup_eq(bma->cur, RIGHT.br_startoff,
|
|
|
- RIGHT.br_startblock,
|
|
|
- RIGHT.br_blockcount, &i);
|
|
|
+ error = xfs_bmbt_lookup_eq(bma->cur, &RIGHT, &i);
|
|
|
if (error)
|
|
|
goto done;
|
|
|
XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
|
|
@@ -1720,11 +1667,7 @@ xfs_bmap_add_extent_delay_real(
|
|
|
if (error)
|
|
|
goto done;
|
|
|
XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
|
|
|
- error = xfs_bmbt_update(bma->cur, LEFT.br_startoff,
|
|
|
- LEFT.br_startblock,
|
|
|
- LEFT.br_blockcount +
|
|
|
- PREV.br_blockcount +
|
|
|
- RIGHT.br_blockcount, LEFT.br_state);
|
|
|
+ error = xfs_bmbt_update(bma->cur, &LEFT);
|
|
|
if (error)
|
|
|
goto done;
|
|
|
}
|
|
@@ -1735,28 +1678,22 @@ xfs_bmap_add_extent_delay_real(
|
|
|
* Filling in all of a previously delayed allocation extent.
|
|
|
* The left neighbor is contiguous, the right is not.
|
|
|
*/
|
|
|
- bma->idx--;
|
|
|
+ old = LEFT;
|
|
|
+ LEFT.br_blockcount += PREV.br_blockcount;
|
|
|
|
|
|
- trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_);
|
|
|
- xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, bma->idx),
|
|
|
- LEFT.br_blockcount + PREV.br_blockcount);
|
|
|
- trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_);
|
|
|
+ xfs_iext_remove(bma->ip, &bma->icur, state);
|
|
|
+ xfs_iext_prev(ifp, &bma->icur);
|
|
|
+ xfs_iext_update_extent(bma->ip, state, &bma->icur, &LEFT);
|
|
|
|
|
|
- xfs_iext_remove(bma->ip, bma->idx + 1, 1, state);
|
|
|
if (bma->cur == NULL)
|
|
|
rval = XFS_ILOG_DEXT;
|
|
|
else {
|
|
|
rval = 0;
|
|
|
- error = xfs_bmbt_lookup_eq(bma->cur, LEFT.br_startoff,
|
|
|
- LEFT.br_startblock, LEFT.br_blockcount,
|
|
|
- &i);
|
|
|
+ error = xfs_bmbt_lookup_eq(bma->cur, &old, &i);
|
|
|
if (error)
|
|
|
goto done;
|
|
|
XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
|
|
|
- error = xfs_bmbt_update(bma->cur, LEFT.br_startoff,
|
|
|
- LEFT.br_startblock,
|
|
|
- LEFT.br_blockcount +
|
|
|
- PREV.br_blockcount, LEFT.br_state);
|
|
|
+ error = xfs_bmbt_update(bma->cur, &LEFT);
|
|
|
if (error)
|
|
|
goto done;
|
|
|
}
|
|
@@ -1767,27 +1704,23 @@ xfs_bmap_add_extent_delay_real(
|
|
|
* Filling in all of a previously delayed allocation extent.
|
|
|
* The right neighbor is contiguous, the left is not.
|
|
|
*/
|
|
|
- trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_);
|
|
|
- xfs_bmbt_set_startblock(ep, new->br_startblock);
|
|
|
- xfs_bmbt_set_blockcount(ep,
|
|
|
- PREV.br_blockcount + RIGHT.br_blockcount);
|
|
|
- trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_);
|
|
|
+ PREV.br_startblock = new->br_startblock;
|
|
|
+ PREV.br_blockcount += RIGHT.br_blockcount;
|
|
|
+
|
|
|
+ xfs_iext_next(ifp, &bma->icur);
|
|
|
+ xfs_iext_remove(bma->ip, &bma->icur, state);
|
|
|
+ xfs_iext_prev(ifp, &bma->icur);
|
|
|
+ xfs_iext_update_extent(bma->ip, state, &bma->icur, &PREV);
|
|
|
|
|
|
- xfs_iext_remove(bma->ip, bma->idx + 1, 1, state);
|
|
|
if (bma->cur == NULL)
|
|
|
rval = XFS_ILOG_DEXT;
|
|
|
else {
|
|
|
rval = 0;
|
|
|
- error = xfs_bmbt_lookup_eq(bma->cur, RIGHT.br_startoff,
|
|
|
- RIGHT.br_startblock,
|
|
|
- RIGHT.br_blockcount, &i);
|
|
|
+ error = xfs_bmbt_lookup_eq(bma->cur, &RIGHT, &i);
|
|
|
if (error)
|
|
|
goto done;
|
|
|
XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
|
|
|
- error = xfs_bmbt_update(bma->cur, PREV.br_startoff,
|
|
|
- new->br_startblock,
|
|
|
- PREV.br_blockcount +
|
|
|
- RIGHT.br_blockcount, PREV.br_state);
|
|
|
+ error = xfs_bmbt_update(bma->cur, &PREV);
|
|
|
if (error)
|
|
|
goto done;
|
|
|
}
|
|
@@ -1799,23 +1732,19 @@ xfs_bmap_add_extent_delay_real(
|
|
|
* Neither the left nor right neighbors are contiguous with
|
|
|
* the new one.
|
|
|
*/
|
|
|
- trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_);
|
|
|
- xfs_bmbt_set_startblock(ep, new->br_startblock);
|
|
|
- xfs_bmbt_set_state(ep, new->br_state);
|
|
|
- trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_);
|
|
|
+ PREV.br_startblock = new->br_startblock;
|
|
|
+ PREV.br_state = new->br_state;
|
|
|
+ xfs_iext_update_extent(bma->ip, state, &bma->icur, &PREV);
|
|
|
|
|
|
(*nextents)++;
|
|
|
if (bma->cur == NULL)
|
|
|
rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
|
|
|
else {
|
|
|
rval = XFS_ILOG_CORE;
|
|
|
- error = xfs_bmbt_lookup_eq(bma->cur, new->br_startoff,
|
|
|
- new->br_startblock, new->br_blockcount,
|
|
|
- &i);
|
|
|
+ error = xfs_bmbt_lookup_eq(bma->cur, new, &i);
|
|
|
if (error)
|
|
|
goto done;
|
|
|
XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done);
|
|
|
- bma->cur->bc_rec.b.br_state = XFS_EXT_NORM;
|
|
|
error = xfs_btree_insert(bma->cur, &i);
|
|
|
if (error)
|
|
|
goto done;
|
|
@@ -1828,40 +1757,33 @@ xfs_bmap_add_extent_delay_real(
|
|
|
* Filling in the first part of a previous delayed allocation.
|
|
|
* The left neighbor is contiguous.
|
|
|
*/
|
|
|
- trace_xfs_bmap_pre_update(bma->ip, bma->idx - 1, state, _THIS_IP_);
|
|
|
- xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, bma->idx - 1),
|
|
|
- LEFT.br_blockcount + new->br_blockcount);
|
|
|
- xfs_bmbt_set_startoff(ep,
|
|
|
- PREV.br_startoff + new->br_blockcount);
|
|
|
- trace_xfs_bmap_post_update(bma->ip, bma->idx - 1, state, _THIS_IP_);
|
|
|
-
|
|
|
+ old = LEFT;
|
|
|
temp = PREV.br_blockcount - new->br_blockcount;
|
|
|
- trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_);
|
|
|
- xfs_bmbt_set_blockcount(ep, temp);
|
|
|
+ da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp),
|
|
|
+ startblockval(PREV.br_startblock));
|
|
|
+
|
|
|
+ LEFT.br_blockcount += new->br_blockcount;
|
|
|
+
|
|
|
+ PREV.br_blockcount = temp;
|
|
|
+ PREV.br_startoff += new->br_blockcount;
|
|
|
+ PREV.br_startblock = nullstartblock(da_new);
|
|
|
+
|
|
|
+ xfs_iext_update_extent(bma->ip, state, &bma->icur, &PREV);
|
|
|
+ xfs_iext_prev(ifp, &bma->icur);
|
|
|
+ xfs_iext_update_extent(bma->ip, state, &bma->icur, &LEFT);
|
|
|
+
|
|
|
if (bma->cur == NULL)
|
|
|
rval = XFS_ILOG_DEXT;
|
|
|
else {
|
|
|
rval = 0;
|
|
|
- error = xfs_bmbt_lookup_eq(bma->cur, LEFT.br_startoff,
|
|
|
- LEFT.br_startblock, LEFT.br_blockcount,
|
|
|
- &i);
|
|
|
+ error = xfs_bmbt_lookup_eq(bma->cur, &old, &i);
|
|
|
if (error)
|
|
|
goto done;
|
|
|
XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
|
|
|
- error = xfs_bmbt_update(bma->cur, LEFT.br_startoff,
|
|
|
- LEFT.br_startblock,
|
|
|
- LEFT.br_blockcount +
|
|
|
- new->br_blockcount,
|
|
|
- LEFT.br_state);
|
|
|
+ error = xfs_bmbt_update(bma->cur, &LEFT);
|
|
|
if (error)
|
|
|
goto done;
|
|
|
}
|
|
|
- da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp),
|
|
|
- startblockval(PREV.br_startblock));
|
|
|
- xfs_bmbt_set_startblock(ep, nullstartblock(da_new));
|
|
|
- trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_);
|
|
|
-
|
|
|
- bma->idx--;
|
|
|
break;
|
|
|
|
|
|
case BMAP_LEFT_FILLING:
|
|
@@ -1869,23 +1791,16 @@ xfs_bmap_add_extent_delay_real(
|
|
|
* Filling in the first part of a previous delayed allocation.
|
|
|
* The left neighbor is not contiguous.
|
|
|
*/
|
|
|
- trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_);
|
|
|
- xfs_bmbt_set_startoff(ep, new_endoff);
|
|
|
- temp = PREV.br_blockcount - new->br_blockcount;
|
|
|
- xfs_bmbt_set_blockcount(ep, temp);
|
|
|
- xfs_iext_insert(bma->ip, bma->idx, 1, new, state);
|
|
|
+ xfs_iext_update_extent(bma->ip, state, &bma->icur, new);
|
|
|
(*nextents)++;
|
|
|
if (bma->cur == NULL)
|
|
|
rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
|
|
|
else {
|
|
|
rval = XFS_ILOG_CORE;
|
|
|
- error = xfs_bmbt_lookup_eq(bma->cur, new->br_startoff,
|
|
|
- new->br_startblock, new->br_blockcount,
|
|
|
- &i);
|
|
|
+ error = xfs_bmbt_lookup_eq(bma->cur, new, &i);
|
|
|
if (error)
|
|
|
goto done;
|
|
|
XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done);
|
|
|
- bma->cur->bc_rec.b.br_state = XFS_EXT_NORM;
|
|
|
error = xfs_btree_insert(bma->cur, &i);
|
|
|
if (error)
|
|
|
goto done;
|
|
@@ -1900,12 +1815,18 @@ xfs_bmap_add_extent_delay_real(
|
|
|
if (error)
|
|
|
goto done;
|
|
|
}
|
|
|
+
|
|
|
+ temp = PREV.br_blockcount - new->br_blockcount;
|
|
|
da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp),
|
|
|
startblockval(PREV.br_startblock) -
|
|
|
(bma->cur ? bma->cur->bc_private.b.allocated : 0));
|
|
|
- ep = xfs_iext_get_ext(ifp, bma->idx + 1);
|
|
|
- xfs_bmbt_set_startblock(ep, nullstartblock(da_new));
|
|
|
- trace_xfs_bmap_post_update(bma->ip, bma->idx + 1, state, _THIS_IP_);
|
|
|
+
|
|
|
+ PREV.br_startoff = new_endoff;
|
|
|
+ PREV.br_blockcount = temp;
|
|
|
+ PREV.br_startblock = nullstartblock(da_new);
|
|
|
+ xfs_iext_next(ifp, &bma->icur);
|
|
|
+ xfs_iext_insert(bma->ip, &bma->icur, &PREV, state);
|
|
|
+ xfs_iext_prev(ifp, &bma->icur);
|
|
|
break;
|
|
|
|
|
|
case BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
|
|
@@ -1913,40 +1834,34 @@ xfs_bmap_add_extent_delay_real(
|
|
|
* Filling in the last part of a previous delayed allocation.
|
|
|
* The right neighbor is contiguous with the new allocation.
|
|
|
*/
|
|
|
- temp = PREV.br_blockcount - new->br_blockcount;
|
|
|
- trace_xfs_bmap_pre_update(bma->ip, bma->idx + 1, state, _THIS_IP_);
|
|
|
- xfs_bmbt_set_blockcount(ep, temp);
|
|
|
- xfs_bmbt_set_allf(xfs_iext_get_ext(ifp, bma->idx + 1),
|
|
|
- new->br_startoff, new->br_startblock,
|
|
|
- new->br_blockcount + RIGHT.br_blockcount,
|
|
|
- RIGHT.br_state);
|
|
|
- trace_xfs_bmap_post_update(bma->ip, bma->idx + 1, state, _THIS_IP_);
|
|
|
+ old = RIGHT;
|
|
|
+ RIGHT.br_startoff = new->br_startoff;
|
|
|
+ RIGHT.br_startblock = new->br_startblock;
|
|
|
+ RIGHT.br_blockcount += new->br_blockcount;
|
|
|
+
|
|
|
if (bma->cur == NULL)
|
|
|
rval = XFS_ILOG_DEXT;
|
|
|
else {
|
|
|
rval = 0;
|
|
|
- error = xfs_bmbt_lookup_eq(bma->cur, RIGHT.br_startoff,
|
|
|
- RIGHT.br_startblock,
|
|
|
- RIGHT.br_blockcount, &i);
|
|
|
+ error = xfs_bmbt_lookup_eq(bma->cur, &old, &i);
|
|
|
if (error)
|
|
|
goto done;
|
|
|
XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
|
|
|
- error = xfs_bmbt_update(bma->cur, new->br_startoff,
|
|
|
- new->br_startblock,
|
|
|
- new->br_blockcount +
|
|
|
- RIGHT.br_blockcount,
|
|
|
- RIGHT.br_state);
|
|
|
+ error = xfs_bmbt_update(bma->cur, &RIGHT);
|
|
|
if (error)
|
|
|
goto done;
|
|
|
}
|
|
|
|
|
|
+ temp = PREV.br_blockcount - new->br_blockcount;
|
|
|
da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp),
|
|
|
startblockval(PREV.br_startblock));
|
|
|
- trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_);
|
|
|
- xfs_bmbt_set_startblock(ep, nullstartblock(da_new));
|
|
|
- trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_);
|
|
|
|
|
|
- bma->idx++;
|
|
|
+ PREV.br_blockcount = temp;
|
|
|
+ PREV.br_startblock = nullstartblock(da_new);
|
|
|
+
|
|
|
+ xfs_iext_update_extent(bma->ip, state, &bma->icur, &PREV);
|
|
|
+ xfs_iext_next(ifp, &bma->icur);
|
|
|
+ xfs_iext_update_extent(bma->ip, state, &bma->icur, &RIGHT);
|
|
|
break;
|
|
|
|
|
|
case BMAP_RIGHT_FILLING:
|
|
@@ -1954,22 +1869,16 @@ xfs_bmap_add_extent_delay_real(
|
|
|
* Filling in the last part of a previous delayed allocation.
|
|
|
* The right neighbor is not contiguous.
|
|
|
*/
|
|
|
- temp = PREV.br_blockcount - new->br_blockcount;
|
|
|
- trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_);
|
|
|
- xfs_bmbt_set_blockcount(ep, temp);
|
|
|
- xfs_iext_insert(bma->ip, bma->idx + 1, 1, new, state);
|
|
|
+ xfs_iext_update_extent(bma->ip, state, &bma->icur, new);
|
|
|
(*nextents)++;
|
|
|
if (bma->cur == NULL)
|
|
|
rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
|
|
|
else {
|
|
|
rval = XFS_ILOG_CORE;
|
|
|
- error = xfs_bmbt_lookup_eq(bma->cur, new->br_startoff,
|
|
|
- new->br_startblock, new->br_blockcount,
|
|
|
- &i);
|
|
|
+ error = xfs_bmbt_lookup_eq(bma->cur, new, &i);
|
|
|
if (error)
|
|
|
goto done;
|
|
|
XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done);
|
|
|
- bma->cur->bc_rec.b.br_state = XFS_EXT_NORM;
|
|
|
error = xfs_btree_insert(bma->cur, &i);
|
|
|
if (error)
|
|
|
goto done;
|
|
@@ -1984,14 +1893,16 @@ xfs_bmap_add_extent_delay_real(
|
|
|
if (error)
|
|
|
goto done;
|
|
|
}
|
|
|
+
|
|
|
+ temp = PREV.br_blockcount - new->br_blockcount;
|
|
|
da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp),
|
|
|
startblockval(PREV.br_startblock) -
|
|
|
(bma->cur ? bma->cur->bc_private.b.allocated : 0));
|
|
|
- ep = xfs_iext_get_ext(ifp, bma->idx);
|
|
|
- xfs_bmbt_set_startblock(ep, nullstartblock(da_new));
|
|
|
- trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_);
|
|
|
|
|
|
- bma->idx++;
|
|
|
+ PREV.br_startblock = nullstartblock(da_new);
|
|
|
+ PREV.br_blockcount = temp;
|
|
|
+ xfs_iext_insert(bma->ip, &bma->icur, &PREV, state);
|
|
|
+ xfs_iext_next(ifp, &bma->icur);
|
|
|
break;
|
|
|
|
|
|
case 0:
|
|
@@ -2015,30 +1926,40 @@ xfs_bmap_add_extent_delay_real(
|
|
|
* PREV @ idx LEFT RIGHT
|
|
|
* inserted at idx + 1
|
|
|
*/
|
|
|
- temp = new->br_startoff - PREV.br_startoff;
|
|
|
- temp2 = PREV.br_startoff + PREV.br_blockcount - new_endoff;
|
|
|
- trace_xfs_bmap_pre_update(bma->ip, bma->idx, 0, _THIS_IP_);
|
|
|
- xfs_bmbt_set_blockcount(ep, temp); /* truncate PREV */
|
|
|
+ old = PREV;
|
|
|
+
|
|
|
+ /* LEFT is the new middle */
|
|
|
LEFT = *new;
|
|
|
+
|
|
|
+ /* RIGHT is the new right */
|
|
|
RIGHT.br_state = PREV.br_state;
|
|
|
- RIGHT.br_startblock = nullstartblock(
|
|
|
- (int)xfs_bmap_worst_indlen(bma->ip, temp2));
|
|
|
RIGHT.br_startoff = new_endoff;
|
|
|
- RIGHT.br_blockcount = temp2;
|
|
|
- /* insert LEFT (r[0]) and RIGHT (r[1]) at the same time */
|
|
|
- xfs_iext_insert(bma->ip, bma->idx + 1, 2, &LEFT, state);
|
|
|
+ RIGHT.br_blockcount =
|
|
|
+ PREV.br_startoff + PREV.br_blockcount - new_endoff;
|
|
|
+ RIGHT.br_startblock =
|
|
|
+ nullstartblock(xfs_bmap_worst_indlen(bma->ip,
|
|
|
+ RIGHT.br_blockcount));
|
|
|
+
|
|
|
+ /* truncate PREV */
|
|
|
+ PREV.br_blockcount = new->br_startoff - PREV.br_startoff;
|
|
|
+ PREV.br_startblock =
|
|
|
+ nullstartblock(xfs_bmap_worst_indlen(bma->ip,
|
|
|
+ PREV.br_blockcount));
|
|
|
+ xfs_iext_update_extent(bma->ip, state, &bma->icur, &PREV);
|
|
|
+
|
|
|
+ xfs_iext_next(ifp, &bma->icur);
|
|
|
+ xfs_iext_insert(bma->ip, &bma->icur, &RIGHT, state);
|
|
|
+ xfs_iext_insert(bma->ip, &bma->icur, &LEFT, state);
|
|
|
(*nextents)++;
|
|
|
+
|
|
|
if (bma->cur == NULL)
|
|
|
rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
|
|
|
else {
|
|
|
rval = XFS_ILOG_CORE;
|
|
|
- error = xfs_bmbt_lookup_eq(bma->cur, new->br_startoff,
|
|
|
- new->br_startblock, new->br_blockcount,
|
|
|
- &i);
|
|
|
+ error = xfs_bmbt_lookup_eq(bma->cur, new, &i);
|
|
|
if (error)
|
|
|
goto done;
|
|
|
XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done);
|
|
|
- bma->cur->bc_rec.b.br_state = XFS_EXT_NORM;
|
|
|
error = xfs_btree_insert(bma->cur, &i);
|
|
|
if (error)
|
|
|
goto done;
|
|
@@ -2053,30 +1974,9 @@ xfs_bmap_add_extent_delay_real(
|
|
|
if (error)
|
|
|
goto done;
|
|
|
}
|
|
|
- temp = xfs_bmap_worst_indlen(bma->ip, temp);
|
|
|
- temp2 = xfs_bmap_worst_indlen(bma->ip, temp2);
|
|
|
- diff = (int)(temp + temp2 -
|
|
|
- (startblockval(PREV.br_startblock) -
|
|
|
- (bma->cur ?
|
|
|
- bma->cur->bc_private.b.allocated : 0)));
|
|
|
- if (diff > 0) {
|
|
|
- error = xfs_mod_fdblocks(bma->ip->i_mount,
|
|
|
- -((int64_t)diff), false);
|
|
|
- ASSERT(!error);
|
|
|
- if (error)
|
|
|
- goto done;
|
|
|
- }
|
|
|
-
|
|
|
- ep = xfs_iext_get_ext(ifp, bma->idx);
|
|
|
- xfs_bmbt_set_startblock(ep, nullstartblock((int)temp));
|
|
|
- trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_);
|
|
|
- trace_xfs_bmap_pre_update(bma->ip, bma->idx + 2, state, _THIS_IP_);
|
|
|
- xfs_bmbt_set_startblock(xfs_iext_get_ext(ifp, bma->idx + 2),
|
|
|
- nullstartblock((int)temp2));
|
|
|
- trace_xfs_bmap_post_update(bma->ip, bma->idx + 2, state, _THIS_IP_);
|
|
|
|
|
|
- bma->idx++;
|
|
|
- da_new = temp + temp2;
|
|
|
+ da_new = startblockval(PREV.br_startblock) +
|
|
|
+ startblockval(RIGHT.br_startblock);
|
|
|
break;
|
|
|
|
|
|
case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
|
|
@@ -2110,19 +2010,17 @@ xfs_bmap_add_extent_delay_real(
|
|
|
goto done;
|
|
|
}
|
|
|
|
|
|
- /* adjust for changes in reserved delayed indirect blocks */
|
|
|
- if (da_old || da_new) {
|
|
|
- temp = da_new;
|
|
|
- if (bma->cur)
|
|
|
- temp += bma->cur->bc_private.b.allocated;
|
|
|
- if (temp < da_old)
|
|
|
- xfs_mod_fdblocks(bma->ip->i_mount,
|
|
|
- (int64_t)(da_old - temp), false);
|
|
|
+ if (bma->cur) {
|
|
|
+ da_new += bma->cur->bc_private.b.allocated;
|
|
|
+ bma->cur->bc_private.b.allocated = 0;
|
|
|
}
|
|
|
|
|
|
- /* clear out the allocated field, done with it now in any case. */
|
|
|
- if (bma->cur)
|
|
|
- bma->cur->bc_private.b.allocated = 0;
|
|
|
+ /* adjust for changes in reserved delayed indirect blocks */
|
|
|
+ if (da_new != da_old) {
|
|
|
+ ASSERT(state == 0 || da_new < da_old);
|
|
|
+ error = xfs_mod_fdblocks(mp, (int64_t)(da_old - da_new),
|
|
|
+ false);
|
|
|
+ }
|
|
|
|
|
|
xfs_bmap_check_leaf_extents(bma->cur, bma->ip, whichfork);
|
|
|
done:
|
|
@@ -2142,7 +2040,7 @@ xfs_bmap_add_extent_unwritten_real(
|
|
|
struct xfs_trans *tp,
|
|
|
xfs_inode_t *ip, /* incore inode pointer */
|
|
|
int whichfork,
|
|
|
- xfs_extnum_t *idx, /* extent number to update/insert */
|
|
|
+ struct xfs_iext_cursor *icur,
|
|
|
xfs_btree_cur_t **curp, /* if *curp is null, not a btree */
|
|
|
xfs_bmbt_irec_t *new, /* new data to add to file extents */
|
|
|
xfs_fsblock_t *first, /* pointer to firstblock variable */
|
|
@@ -2150,28 +2048,22 @@ xfs_bmap_add_extent_unwritten_real(
|
|
|
int *logflagsp) /* inode logging flags */
|
|
|
{
|
|
|
xfs_btree_cur_t *cur; /* btree cursor */
|
|
|
- xfs_bmbt_rec_host_t *ep; /* extent entry for idx */
|
|
|
int error; /* error return value */
|
|
|
int i; /* temp state */
|
|
|
xfs_ifork_t *ifp; /* inode fork pointer */
|
|
|
xfs_fileoff_t new_endoff; /* end offset of new entry */
|
|
|
- xfs_exntst_t newext; /* new extent state */
|
|
|
- xfs_exntst_t oldext; /* old extent state */
|
|
|
xfs_bmbt_irec_t r[3]; /* neighbor extent entries */
|
|
|
/* left is 0, right is 1, prev is 2 */
|
|
|
int rval=0; /* return value (logging flags) */
|
|
|
- int state = 0;/* state bits, accessed thru macros */
|
|
|
+ int state = xfs_bmap_fork_to_state(whichfork);
|
|
|
struct xfs_mount *mp = ip->i_mount;
|
|
|
+ struct xfs_bmbt_irec old;
|
|
|
|
|
|
*logflagsp = 0;
|
|
|
|
|
|
cur = *curp;
|
|
|
ifp = XFS_IFORK_PTR(ip, whichfork);
|
|
|
- if (whichfork == XFS_COW_FORK)
|
|
|
- state |= BMAP_COWFORK;
|
|
|
|
|
|
- ASSERT(*idx >= 0);
|
|
|
- ASSERT(*idx <= xfs_iext_count(ifp));
|
|
|
ASSERT(!isnullstartblock(new->br_startblock));
|
|
|
|
|
|
XFS_STATS_INC(mp, xs_add_exlist);
|
|
@@ -2184,12 +2076,8 @@ xfs_bmap_add_extent_unwritten_real(
|
|
|
* Set up a bunch of variables to make the tests simpler.
|
|
|
*/
|
|
|
error = 0;
|
|
|
- ep = xfs_iext_get_ext(ifp, *idx);
|
|
|
- xfs_bmbt_get_all(ep, &PREV);
|
|
|
- newext = new->br_state;
|
|
|
- oldext = (newext == XFS_EXT_UNWRITTEN) ?
|
|
|
- XFS_EXT_NORM : XFS_EXT_UNWRITTEN;
|
|
|
- ASSERT(PREV.br_state == oldext);
|
|
|
+ xfs_iext_get_extent(ifp, icur, &PREV);
|
|
|
+ ASSERT(new->br_state != PREV.br_state);
|
|
|
new_endoff = new->br_startoff + new->br_blockcount;
|
|
|
ASSERT(PREV.br_startoff <= new->br_startoff);
|
|
|
ASSERT(PREV.br_startoff + PREV.br_blockcount >= new_endoff);
|
|
@@ -2207,10 +2095,8 @@ xfs_bmap_add_extent_unwritten_real(
|
|
|
* Check and set flags if this segment has a left neighbor.
|
|
|
* Don't set contiguous if the combined extent would be too large.
|
|
|
*/
|
|
|
- if (*idx > 0) {
|
|
|
+ if (xfs_iext_peek_prev_extent(ifp, icur, &LEFT)) {
|
|
|
state |= BMAP_LEFT_VALID;
|
|
|
- xfs_bmbt_get_all(xfs_iext_get_ext(ifp, *idx - 1), &LEFT);
|
|
|
-
|
|
|
if (isnullstartblock(LEFT.br_startblock))
|
|
|
state |= BMAP_LEFT_DELAY;
|
|
|
}
|
|
@@ -2218,7 +2104,7 @@ xfs_bmap_add_extent_unwritten_real(
|
|
|
if ((state & BMAP_LEFT_VALID) && !(state & BMAP_LEFT_DELAY) &&
|
|
|
LEFT.br_startoff + LEFT.br_blockcount == new->br_startoff &&
|
|
|
LEFT.br_startblock + LEFT.br_blockcount == new->br_startblock &&
|
|
|
- LEFT.br_state == newext &&
|
|
|
+ LEFT.br_state == new->br_state &&
|
|
|
LEFT.br_blockcount + new->br_blockcount <= MAXEXTLEN)
|
|
|
state |= BMAP_LEFT_CONTIG;
|
|
|
|
|
@@ -2227,9 +2113,8 @@ xfs_bmap_add_extent_unwritten_real(
|
|
|
* Don't set contiguous if the combined extent would be too large.
|
|
|
* Also check for all-three-contiguous being too large.
|
|
|
*/
|
|
|
- if (*idx < xfs_iext_count(ifp) - 1) {
|
|
|
+ if (xfs_iext_peek_next_extent(ifp, icur, &RIGHT)) {
|
|
|
state |= BMAP_RIGHT_VALID;
|
|
|
- xfs_bmbt_get_all(xfs_iext_get_ext(ifp, *idx + 1), &RIGHT);
|
|
|
if (isnullstartblock(RIGHT.br_startblock))
|
|
|
state |= BMAP_RIGHT_DELAY;
|
|
|
}
|
|
@@ -2237,7 +2122,7 @@ xfs_bmap_add_extent_unwritten_real(
|
|
|
if ((state & BMAP_RIGHT_VALID) && !(state & BMAP_RIGHT_DELAY) &&
|
|
|
new_endoff == RIGHT.br_startoff &&
|
|
|
new->br_startblock + new->br_blockcount == RIGHT.br_startblock &&
|
|
|
- newext == RIGHT.br_state &&
|
|
|
+ new->br_state == RIGHT.br_state &&
|
|
|
new->br_blockcount + RIGHT.br_blockcount <= MAXEXTLEN &&
|
|
|
((state & (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING |
|
|
|
BMAP_RIGHT_FILLING)) !=
|
|
@@ -2258,24 +2143,20 @@ xfs_bmap_add_extent_unwritten_real(
|
|
|
* Setting all of a previous oldext extent to newext.
|
|
|
* The left and right neighbors are both contiguous with new.
|
|
|
*/
|
|
|
- --*idx;
|
|
|
-
|
|
|
- trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
|
|
|
- xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, *idx),
|
|
|
- LEFT.br_blockcount + PREV.br_blockcount +
|
|
|
- RIGHT.br_blockcount);
|
|
|
- trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
|
|
|
+ LEFT.br_blockcount += PREV.br_blockcount + RIGHT.br_blockcount;
|
|
|
|
|
|
- xfs_iext_remove(ip, *idx + 1, 2, state);
|
|
|
+ xfs_iext_remove(ip, icur, state);
|
|
|
+ xfs_iext_remove(ip, icur, state);
|
|
|
+ xfs_iext_prev(ifp, icur);
|
|
|
+ xfs_iext_update_extent(ip, state, icur, &LEFT);
|
|
|
XFS_IFORK_NEXT_SET(ip, whichfork,
|
|
|
XFS_IFORK_NEXTENTS(ip, whichfork) - 2);
|
|
|
if (cur == NULL)
|
|
|
rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
|
|
|
else {
|
|
|
rval = XFS_ILOG_CORE;
|
|
|
- if ((error = xfs_bmbt_lookup_eq(cur, RIGHT.br_startoff,
|
|
|
- RIGHT.br_startblock,
|
|
|
- RIGHT.br_blockcount, &i)))
|
|
|
+ error = xfs_bmbt_lookup_eq(cur, &RIGHT, &i);
|
|
|
+ if (error)
|
|
|
goto done;
|
|
|
XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
|
|
|
if ((error = xfs_btree_delete(cur, &i)))
|
|
@@ -2290,10 +2171,8 @@ xfs_bmap_add_extent_unwritten_real(
|
|
|
if ((error = xfs_btree_decrement(cur, 0, &i)))
|
|
|
goto done;
|
|
|
XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
|
|
|
- if ((error = xfs_bmbt_update(cur, LEFT.br_startoff,
|
|
|
- LEFT.br_startblock,
|
|
|
- LEFT.br_blockcount + PREV.br_blockcount +
|
|
|
- RIGHT.br_blockcount, LEFT.br_state)))
|
|
|
+ error = xfs_bmbt_update(cur, &LEFT);
|
|
|
+ if (error)
|
|
|
goto done;
|
|
|
}
|
|
|
break;
|
|
@@ -2303,23 +2182,19 @@ xfs_bmap_add_extent_unwritten_real(
|
|
|
* Setting all of a previous oldext extent to newext.
|
|
|
* The left neighbor is contiguous, the right is not.
|
|
|
*/
|
|
|
- --*idx;
|
|
|
+ LEFT.br_blockcount += PREV.br_blockcount;
|
|
|
|
|
|
- trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
|
|
|
- xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, *idx),
|
|
|
- LEFT.br_blockcount + PREV.br_blockcount);
|
|
|
- trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
|
|
|
-
|
|
|
- xfs_iext_remove(ip, *idx + 1, 1, state);
|
|
|
+ xfs_iext_remove(ip, icur, state);
|
|
|
+ xfs_iext_prev(ifp, icur);
|
|
|
+ xfs_iext_update_extent(ip, state, icur, &LEFT);
|
|
|
XFS_IFORK_NEXT_SET(ip, whichfork,
|
|
|
XFS_IFORK_NEXTENTS(ip, whichfork) - 1);
|
|
|
if (cur == NULL)
|
|
|
rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
|
|
|
else {
|
|
|
rval = XFS_ILOG_CORE;
|
|
|
- if ((error = xfs_bmbt_lookup_eq(cur, PREV.br_startoff,
|
|
|
- PREV.br_startblock, PREV.br_blockcount,
|
|
|
- &i)))
|
|
|
+ error = xfs_bmbt_lookup_eq(cur, &PREV, &i);
|
|
|
+ if (error)
|
|
|
goto done;
|
|
|
XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
|
|
|
if ((error = xfs_btree_delete(cur, &i)))
|
|
@@ -2328,10 +2203,8 @@ xfs_bmap_add_extent_unwritten_real(
|
|
|
if ((error = xfs_btree_decrement(cur, 0, &i)))
|
|
|
goto done;
|
|
|
XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
|
|
|
- if ((error = xfs_bmbt_update(cur, LEFT.br_startoff,
|
|
|
- LEFT.br_startblock,
|
|
|
- LEFT.br_blockcount + PREV.br_blockcount,
|
|
|
- LEFT.br_state)))
|
|
|
+ error = xfs_bmbt_update(cur, &LEFT);
|
|
|
+ if (error)
|
|
|
goto done;
|
|
|
}
|
|
|
break;
|
|
@@ -2341,21 +2214,22 @@ xfs_bmap_add_extent_unwritten_real(
|
|
|
* Setting all of a previous oldext extent to newext.
|
|
|
* The right neighbor is contiguous, the left is not.
|
|
|
*/
|
|
|
- trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
|
|
|
- xfs_bmbt_set_blockcount(ep,
|
|
|
- PREV.br_blockcount + RIGHT.br_blockcount);
|
|
|
- xfs_bmbt_set_state(ep, newext);
|
|
|
- trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
|
|
|
- xfs_iext_remove(ip, *idx + 1, 1, state);
|
|
|
+ PREV.br_blockcount += RIGHT.br_blockcount;
|
|
|
+ PREV.br_state = new->br_state;
|
|
|
+
|
|
|
+ xfs_iext_next(ifp, icur);
|
|
|
+ xfs_iext_remove(ip, icur, state);
|
|
|
+ xfs_iext_prev(ifp, icur);
|
|
|
+ xfs_iext_update_extent(ip, state, icur, &PREV);
|
|
|
+
|
|
|
XFS_IFORK_NEXT_SET(ip, whichfork,
|
|
|
XFS_IFORK_NEXTENTS(ip, whichfork) - 1);
|
|
|
if (cur == NULL)
|
|
|
rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
|
|
|
else {
|
|
|
rval = XFS_ILOG_CORE;
|
|
|
- if ((error = xfs_bmbt_lookup_eq(cur, RIGHT.br_startoff,
|
|
|
- RIGHT.br_startblock,
|
|
|
- RIGHT.br_blockcount, &i)))
|
|
|
+ error = xfs_bmbt_lookup_eq(cur, &RIGHT, &i);
|
|
|
+ if (error)
|
|
|
goto done;
|
|
|
XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
|
|
|
if ((error = xfs_btree_delete(cur, &i)))
|
|
@@ -2364,10 +2238,8 @@ xfs_bmap_add_extent_unwritten_real(
|
|
|
if ((error = xfs_btree_decrement(cur, 0, &i)))
|
|
|
goto done;
|
|
|
XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
|
|
|
- if ((error = xfs_bmbt_update(cur, new->br_startoff,
|
|
|
- new->br_startblock,
|
|
|
- new->br_blockcount + RIGHT.br_blockcount,
|
|
|
- newext)))
|
|
|
+ error = xfs_bmbt_update(cur, &PREV);
|
|
|
+ if (error)
|
|
|
goto done;
|
|
|
}
|
|
|
break;
|
|
@@ -2378,22 +2250,19 @@ xfs_bmap_add_extent_unwritten_real(
|
|
|
* Neither the left nor right neighbors are contiguous with
|
|
|
* the new one.
|
|
|
*/
|
|
|
- trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
|
|
|
- xfs_bmbt_set_state(ep, newext);
|
|
|
- trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
|
|
|
+ PREV.br_state = new->br_state;
|
|
|
+ xfs_iext_update_extent(ip, state, icur, &PREV);
|
|
|
|
|
|
if (cur == NULL)
|
|
|
rval = XFS_ILOG_DEXT;
|
|
|
else {
|
|
|
rval = 0;
|
|
|
- if ((error = xfs_bmbt_lookup_eq(cur, new->br_startoff,
|
|
|
- new->br_startblock, new->br_blockcount,
|
|
|
- &i)))
|
|
|
+ error = xfs_bmbt_lookup_eq(cur, new, &i);
|
|
|
+ if (error)
|
|
|
goto done;
|
|
|
XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
|
|
|
- if ((error = xfs_bmbt_update(cur, new->br_startoff,
|
|
|
- new->br_startblock, new->br_blockcount,
|
|
|
- newext)))
|
|
|
+ error = xfs_bmbt_update(cur, &PREV);
|
|
|
+ if (error)
|
|
|
goto done;
|
|
|
}
|
|
|
break;
|
|
@@ -2403,43 +2272,32 @@ xfs_bmap_add_extent_unwritten_real(
|
|
|
* Setting the first part of a previous oldext extent to newext.
|
|
|
* The left neighbor is contiguous.
|
|
|
*/
|
|
|
- trace_xfs_bmap_pre_update(ip, *idx - 1, state, _THIS_IP_);
|
|
|
- xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, *idx - 1),
|
|
|
- LEFT.br_blockcount + new->br_blockcount);
|
|
|
- xfs_bmbt_set_startoff(ep,
|
|
|
- PREV.br_startoff + new->br_blockcount);
|
|
|
- trace_xfs_bmap_post_update(ip, *idx - 1, state, _THIS_IP_);
|
|
|
-
|
|
|
- trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
|
|
|
- xfs_bmbt_set_startblock(ep,
|
|
|
- new->br_startblock + new->br_blockcount);
|
|
|
- xfs_bmbt_set_blockcount(ep,
|
|
|
- PREV.br_blockcount - new->br_blockcount);
|
|
|
- trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
|
|
|
-
|
|
|
- --*idx;
|
|
|
+ LEFT.br_blockcount += new->br_blockcount;
|
|
|
+
|
|
|
+ old = PREV;
|
|
|
+ PREV.br_startoff += new->br_blockcount;
|
|
|
+ PREV.br_startblock += new->br_blockcount;
|
|
|
+ PREV.br_blockcount -= new->br_blockcount;
|
|
|
+
|
|
|
+ xfs_iext_update_extent(ip, state, icur, &PREV);
|
|
|
+ xfs_iext_prev(ifp, icur);
|
|
|
+ xfs_iext_update_extent(ip, state, icur, &LEFT);
|
|
|
|
|
|
if (cur == NULL)
|
|
|
rval = XFS_ILOG_DEXT;
|
|
|
else {
|
|
|
rval = 0;
|
|
|
- if ((error = xfs_bmbt_lookup_eq(cur, PREV.br_startoff,
|
|
|
- PREV.br_startblock, PREV.br_blockcount,
|
|
|
- &i)))
|
|
|
+ error = xfs_bmbt_lookup_eq(cur, &old, &i);
|
|
|
+ if (error)
|
|
|
goto done;
|
|
|
XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
|
|
|
- if ((error = xfs_bmbt_update(cur,
|
|
|
- PREV.br_startoff + new->br_blockcount,
|
|
|
- PREV.br_startblock + new->br_blockcount,
|
|
|
- PREV.br_blockcount - new->br_blockcount,
|
|
|
- oldext)))
|
|
|
+ error = xfs_bmbt_update(cur, &PREV);
|
|
|
+ if (error)
|
|
|
goto done;
|
|
|
- if ((error = xfs_btree_decrement(cur, 0, &i)))
|
|
|
+ error = xfs_btree_decrement(cur, 0, &i);
|
|
|
+ if (error)
|
|
|
goto done;
|
|
|
- error = xfs_bmbt_update(cur, LEFT.br_startoff,
|
|
|
- LEFT.br_startblock,
|
|
|
- LEFT.br_blockcount + new->br_blockcount,
|
|
|
- LEFT.br_state);
|
|
|
+ error = xfs_bmbt_update(cur, &LEFT);
|
|
|
if (error)
|
|
|
goto done;
|
|
|
}
|
|
@@ -2450,32 +2308,25 @@ xfs_bmap_add_extent_unwritten_real(
|
|
|
* Setting the first part of a previous oldext extent to newext.
|
|
|
* The left neighbor is not contiguous.
|
|
|
*/
|
|
|
- trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
|
|
|
- ASSERT(ep && xfs_bmbt_get_state(ep) == oldext);
|
|
|
- xfs_bmbt_set_startoff(ep, new_endoff);
|
|
|
- xfs_bmbt_set_blockcount(ep,
|
|
|
- PREV.br_blockcount - new->br_blockcount);
|
|
|
- xfs_bmbt_set_startblock(ep,
|
|
|
- new->br_startblock + new->br_blockcount);
|
|
|
- trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
|
|
|
-
|
|
|
- xfs_iext_insert(ip, *idx, 1, new, state);
|
|
|
+ old = PREV;
|
|
|
+ PREV.br_startoff += new->br_blockcount;
|
|
|
+ PREV.br_startblock += new->br_blockcount;
|
|
|
+ PREV.br_blockcount -= new->br_blockcount;
|
|
|
+
|
|
|
+ xfs_iext_update_extent(ip, state, icur, &PREV);
|
|
|
+ xfs_iext_insert(ip, icur, new, state);
|
|
|
XFS_IFORK_NEXT_SET(ip, whichfork,
|
|
|
XFS_IFORK_NEXTENTS(ip, whichfork) + 1);
|
|
|
if (cur == NULL)
|
|
|
rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
|
|
|
else {
|
|
|
rval = XFS_ILOG_CORE;
|
|
|
- if ((error = xfs_bmbt_lookup_eq(cur, PREV.br_startoff,
|
|
|
- PREV.br_startblock, PREV.br_blockcount,
|
|
|
- &i)))
|
|
|
+ error = xfs_bmbt_lookup_eq(cur, &old, &i);
|
|
|
+ if (error)
|
|
|
goto done;
|
|
|
XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
|
|
|
- if ((error = xfs_bmbt_update(cur,
|
|
|
- PREV.br_startoff + new->br_blockcount,
|
|
|
- PREV.br_startblock + new->br_blockcount,
|
|
|
- PREV.br_blockcount - new->br_blockcount,
|
|
|
- oldext)))
|
|
|
+ error = xfs_bmbt_update(cur, &PREV);
|
|
|
+ if (error)
|
|
|
goto done;
|
|
|
cur->bc_rec.b = *new;
|
|
|
if ((error = xfs_btree_insert(cur, &i)))
|
|
@@ -2489,39 +2340,33 @@ xfs_bmap_add_extent_unwritten_real(
|
|
|
* Setting the last part of a previous oldext extent to newext.
|
|
|
* The right neighbor is contiguous with the new allocation.
|
|
|
*/
|
|
|
- trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
|
|
|
- xfs_bmbt_set_blockcount(ep,
|
|
|
- PREV.br_blockcount - new->br_blockcount);
|
|
|
- trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
|
|
|
+ old = PREV;
|
|
|
+ PREV.br_blockcount -= new->br_blockcount;
|
|
|
|
|
|
- ++*idx;
|
|
|
+ RIGHT.br_startoff = new->br_startoff;
|
|
|
+ RIGHT.br_startblock = new->br_startblock;
|
|
|
+ RIGHT.br_blockcount += new->br_blockcount;
|
|
|
|
|
|
- trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
|
|
|
- xfs_bmbt_set_allf(xfs_iext_get_ext(ifp, *idx),
|
|
|
- new->br_startoff, new->br_startblock,
|
|
|
- new->br_blockcount + RIGHT.br_blockcount, newext);
|
|
|
- trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
|
|
|
+ xfs_iext_update_extent(ip, state, icur, &PREV);
|
|
|
+ xfs_iext_next(ifp, icur);
|
|
|
+ xfs_iext_update_extent(ip, state, icur, &RIGHT);
|
|
|
|
|
|
if (cur == NULL)
|
|
|
rval = XFS_ILOG_DEXT;
|
|
|
else {
|
|
|
rval = 0;
|
|
|
- if ((error = xfs_bmbt_lookup_eq(cur, PREV.br_startoff,
|
|
|
- PREV.br_startblock,
|
|
|
- PREV.br_blockcount, &i)))
|
|
|
+ error = xfs_bmbt_lookup_eq(cur, &old, &i);
|
|
|
+ if (error)
|
|
|
goto done;
|
|
|
XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
|
|
|
- if ((error = xfs_bmbt_update(cur, PREV.br_startoff,
|
|
|
- PREV.br_startblock,
|
|
|
- PREV.br_blockcount - new->br_blockcount,
|
|
|
- oldext)))
|
|
|
+ error = xfs_bmbt_update(cur, &PREV);
|
|
|
+ if (error)
|
|
|
goto done;
|
|
|
- if ((error = xfs_btree_increment(cur, 0, &i)))
|
|
|
+ error = xfs_btree_increment(cur, 0, &i);
|
|
|
+ if (error)
|
|
|
goto done;
|
|
|
- if ((error = xfs_bmbt_update(cur, new->br_startoff,
|
|
|
- new->br_startblock,
|
|
|
- new->br_blockcount + RIGHT.br_blockcount,
|
|
|
- newext)))
|
|
|
+ error = xfs_bmbt_update(cur, &RIGHT);
|
|
|
+ if (error)
|
|
|
goto done;
|
|
|
}
|
|
|
break;
|
|
@@ -2531,13 +2376,12 @@ xfs_bmap_add_extent_unwritten_real(
|
|
|
* Setting the last part of a previous oldext extent to newext.
|
|
|
* The right neighbor is not contiguous.
|
|
|
*/
|
|
|
- trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
|
|
|
- xfs_bmbt_set_blockcount(ep,
|
|
|
- PREV.br_blockcount - new->br_blockcount);
|
|
|
- trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
|
|
|
+ old = PREV;
|
|
|
+ PREV.br_blockcount -= new->br_blockcount;
|
|
|
|
|
|
- ++*idx;
|
|
|
- xfs_iext_insert(ip, *idx, 1, new, state);
|
|
|
+ xfs_iext_update_extent(ip, state, icur, &PREV);
|
|
|
+ xfs_iext_next(ifp, icur);
|
|
|
+ xfs_iext_insert(ip, icur, new, state);
|
|
|
|
|
|
XFS_IFORK_NEXT_SET(ip, whichfork,
|
|
|
XFS_IFORK_NEXTENTS(ip, whichfork) + 1);
|
|
@@ -2545,22 +2389,17 @@ xfs_bmap_add_extent_unwritten_real(
|
|
|
rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
|
|
|
else {
|
|
|
rval = XFS_ILOG_CORE;
|
|
|
- if ((error = xfs_bmbt_lookup_eq(cur, PREV.br_startoff,
|
|
|
- PREV.br_startblock, PREV.br_blockcount,
|
|
|
- &i)))
|
|
|
+ error = xfs_bmbt_lookup_eq(cur, &old, &i);
|
|
|
+ if (error)
|
|
|
goto done;
|
|
|
XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
|
|
|
- if ((error = xfs_bmbt_update(cur, PREV.br_startoff,
|
|
|
- PREV.br_startblock,
|
|
|
- PREV.br_blockcount - new->br_blockcount,
|
|
|
- oldext)))
|
|
|
+ error = xfs_bmbt_update(cur, &PREV);
|
|
|
+ if (error)
|
|
|
goto done;
|
|
|
- if ((error = xfs_bmbt_lookup_eq(cur, new->br_startoff,
|
|
|
- new->br_startblock, new->br_blockcount,
|
|
|
- &i)))
|
|
|
+ error = xfs_bmbt_lookup_eq(cur, new, &i);
|
|
|
+ if (error)
|
|
|
goto done;
|
|
|
XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done);
|
|
|
- cur->bc_rec.b.br_state = XFS_EXT_NORM;
|
|
|
if ((error = xfs_btree_insert(cur, &i)))
|
|
|
goto done;
|
|
|
XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
|
|
@@ -2573,20 +2412,20 @@ xfs_bmap_add_extent_unwritten_real(
|
|
|
* newext. Contiguity is impossible here.
|
|
|
* One extent becomes three extents.
|
|
|
*/
|
|
|
- trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
|
|
|
- xfs_bmbt_set_blockcount(ep,
|
|
|
- new->br_startoff - PREV.br_startoff);
|
|
|
- trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
|
|
|
+ old = PREV;
|
|
|
+ PREV.br_blockcount = new->br_startoff - PREV.br_startoff;
|
|
|
|
|
|
r[0] = *new;
|
|
|
r[1].br_startoff = new_endoff;
|
|
|
r[1].br_blockcount =
|
|
|
- PREV.br_startoff + PREV.br_blockcount - new_endoff;
|
|
|
+ old.br_startoff + old.br_blockcount - new_endoff;
|
|
|
r[1].br_startblock = new->br_startblock + new->br_blockcount;
|
|
|
- r[1].br_state = oldext;
|
|
|
+ r[1].br_state = PREV.br_state;
|
|
|
|
|
|
- ++*idx;
|
|
|
- xfs_iext_insert(ip, *idx, 2, &r[0], state);
|
|
|
+ xfs_iext_update_extent(ip, state, icur, &PREV);
|
|
|
+ xfs_iext_next(ifp, icur);
|
|
|
+ xfs_iext_insert(ip, icur, &r[1], state);
|
|
|
+ xfs_iext_insert(ip, icur, &r[0], state);
|
|
|
|
|
|
XFS_IFORK_NEXT_SET(ip, whichfork,
|
|
|
XFS_IFORK_NEXTENTS(ip, whichfork) + 2);
|
|
@@ -2594,20 +2433,16 @@ xfs_bmap_add_extent_unwritten_real(
|
|
|
rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
|
|
|
else {
|
|
|
rval = XFS_ILOG_CORE;
|
|
|
- if ((error = xfs_bmbt_lookup_eq(cur, PREV.br_startoff,
|
|
|
- PREV.br_startblock, PREV.br_blockcount,
|
|
|
- &i)))
|
|
|
+ error = xfs_bmbt_lookup_eq(cur, &old, &i);
|
|
|
+ if (error)
|
|
|
goto done;
|
|
|
XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
|
|
|
/* new right extent - oldext */
|
|
|
- if ((error = xfs_bmbt_update(cur, r[1].br_startoff,
|
|
|
- r[1].br_startblock, r[1].br_blockcount,
|
|
|
- r[1].br_state)))
|
|
|
+ error = xfs_bmbt_update(cur, &r[1]);
|
|
|
+ if (error)
|
|
|
goto done;
|
|
|
/* new left extent - oldext */
|
|
|
cur->bc_rec.b = PREV;
|
|
|
- cur->bc_rec.b.br_blockcount =
|
|
|
- new->br_startoff - PREV.br_startoff;
|
|
|
if ((error = xfs_btree_insert(cur, &i)))
|
|
|
goto done;
|
|
|
XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
|
|
@@ -2616,13 +2451,11 @@ xfs_bmap_add_extent_unwritten_real(
|
|
|
* we are about to insert as we can't trust it after
|
|
|
* the previous insert.
|
|
|
*/
|
|
|
- if ((error = xfs_bmbt_lookup_eq(cur, new->br_startoff,
|
|
|
- new->br_startblock, new->br_blockcount,
|
|
|
- &i)))
|
|
|
+ error = xfs_bmbt_lookup_eq(cur, new, &i);
|
|
|
+ if (error)
|
|
|
goto done;
|
|
|
XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done);
|
|
|
/* new middle extent - newext */
|
|
|
- cur->bc_rec.b.br_state = new->br_state;
|
|
|
if ((error = xfs_btree_insert(cur, &i)))
|
|
|
goto done;
|
|
|
XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
|
|
@@ -2681,7 +2514,7 @@ STATIC void
|
|
|
xfs_bmap_add_extent_hole_delay(
|
|
|
xfs_inode_t *ip, /* incore inode pointer */
|
|
|
int whichfork,
|
|
|
- xfs_extnum_t *idx, /* extent number to update/insert */
|
|
|
+ struct xfs_iext_cursor *icur,
|
|
|
xfs_bmbt_irec_t *new) /* new data to add to file extents */
|
|
|
{
|
|
|
xfs_ifork_t *ifp; /* inode fork pointer */
|
|
@@ -2689,22 +2522,17 @@ xfs_bmap_add_extent_hole_delay(
|
|
|
xfs_filblks_t newlen=0; /* new indirect size */
|
|
|
xfs_filblks_t oldlen=0; /* old indirect size */
|
|
|
xfs_bmbt_irec_t right; /* right neighbor extent entry */
|
|
|
- int state; /* state bits, accessed thru macros */
|
|
|
- xfs_filblks_t temp=0; /* temp for indirect calculations */
|
|
|
+ int state = xfs_bmap_fork_to_state(whichfork);
|
|
|
+ xfs_filblks_t temp; /* temp for indirect calculations */
|
|
|
|
|
|
ifp = XFS_IFORK_PTR(ip, whichfork);
|
|
|
- state = 0;
|
|
|
- if (whichfork == XFS_COW_FORK)
|
|
|
- state |= BMAP_COWFORK;
|
|
|
ASSERT(isnullstartblock(new->br_startblock));
|
|
|
|
|
|
/*
|
|
|
* Check and set flags if this segment has a left neighbor
|
|
|
*/
|
|
|
- if (*idx > 0) {
|
|
|
+ if (xfs_iext_peek_prev_extent(ifp, icur, &left)) {
|
|
|
state |= BMAP_LEFT_VALID;
|
|
|
- xfs_bmbt_get_all(xfs_iext_get_ext(ifp, *idx - 1), &left);
|
|
|
-
|
|
|
if (isnullstartblock(left.br_startblock))
|
|
|
state |= BMAP_LEFT_DELAY;
|
|
|
}
|
|
@@ -2713,10 +2541,8 @@ xfs_bmap_add_extent_hole_delay(
|
|
|
* Check and set flags if the current (right) segment exists.
|
|
|
* If it doesn't exist, we're converting the hole at end-of-file.
|
|
|
*/
|
|
|
- if (*idx < xfs_iext_count(ifp)) {
|
|
|
+ if (xfs_iext_get_extent(ifp, icur, &right)) {
|
|
|
state |= BMAP_RIGHT_VALID;
|
|
|
- xfs_bmbt_get_all(xfs_iext_get_ext(ifp, *idx), &right);
|
|
|
-
|
|
|
if (isnullstartblock(right.br_startblock))
|
|
|
state |= BMAP_RIGHT_DELAY;
|
|
|
}
|
|
@@ -2748,22 +2574,20 @@ xfs_bmap_add_extent_hole_delay(
|
|
|
* on the left and on the right.
|
|
|
* Merge all three into a single extent record.
|
|
|
*/
|
|
|
- --*idx;
|
|
|
temp = left.br_blockcount + new->br_blockcount +
|
|
|
right.br_blockcount;
|
|
|
|
|
|
- trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
|
|
|
- xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, *idx), temp);
|
|
|
oldlen = startblockval(left.br_startblock) +
|
|
|
startblockval(new->br_startblock) +
|
|
|
startblockval(right.br_startblock);
|
|
|
newlen = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp),
|
|
|
oldlen);
|
|
|
- xfs_bmbt_set_startblock(xfs_iext_get_ext(ifp, *idx),
|
|
|
- nullstartblock((int)newlen));
|
|
|
- trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
|
|
|
+ left.br_startblock = nullstartblock(newlen);
|
|
|
+ left.br_blockcount = temp;
|
|
|
|
|
|
- xfs_iext_remove(ip, *idx + 1, 1, state);
|
|
|
+ xfs_iext_remove(ip, icur, state);
|
|
|
+ xfs_iext_prev(ifp, icur);
|
|
|
+ xfs_iext_update_extent(ip, state, icur, &left);
|
|
|
break;
|
|
|
|
|
|
case BMAP_LEFT_CONTIG:
|
|
@@ -2772,18 +2596,17 @@ xfs_bmap_add_extent_hole_delay(
|
|
|
* on the left.
|
|
|
* Merge the new allocation with the left neighbor.
|
|
|
*/
|
|
|
- --*idx;
|
|
|
temp = left.br_blockcount + new->br_blockcount;
|
|
|
|
|
|
- trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
|
|
|
- xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, *idx), temp);
|
|
|
oldlen = startblockval(left.br_startblock) +
|
|
|
startblockval(new->br_startblock);
|
|
|
newlen = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp),
|
|
|
oldlen);
|
|
|
- xfs_bmbt_set_startblock(xfs_iext_get_ext(ifp, *idx),
|
|
|
- nullstartblock((int)newlen));
|
|
|
- trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
|
|
|
+ left.br_blockcount = temp;
|
|
|
+ left.br_startblock = nullstartblock(newlen);
|
|
|
+
|
|
|
+ xfs_iext_prev(ifp, icur);
|
|
|
+ xfs_iext_update_extent(ip, state, icur, &left);
|
|
|
break;
|
|
|
|
|
|
case BMAP_RIGHT_CONTIG:
|
|
@@ -2792,16 +2615,15 @@ xfs_bmap_add_extent_hole_delay(
|
|
|
* on the right.
|
|
|
* Merge the new allocation with the right neighbor.
|
|
|
*/
|
|
|
- trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
|
|
|
temp = new->br_blockcount + right.br_blockcount;
|
|
|
oldlen = startblockval(new->br_startblock) +
|
|
|
startblockval(right.br_startblock);
|
|
|
newlen = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp),
|
|
|
oldlen);
|
|
|
- xfs_bmbt_set_allf(xfs_iext_get_ext(ifp, *idx),
|
|
|
- new->br_startoff,
|
|
|
- nullstartblock((int)newlen), temp, right.br_state);
|
|
|
- trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
|
|
|
+ right.br_startoff = new->br_startoff;
|
|
|
+ right.br_startblock = nullstartblock(newlen);
|
|
|
+ right.br_blockcount = temp;
|
|
|
+ xfs_iext_update_extent(ip, state, icur, &right);
|
|
|
break;
|
|
|
|
|
|
case 0:
|
|
@@ -2811,7 +2633,7 @@ xfs_bmap_add_extent_hole_delay(
|
|
|
* Insert a new entry.
|
|
|
*/
|
|
|
oldlen = newlen = 0;
|
|
|
- xfs_iext_insert(ip, *idx, 1, new, state);
|
|
|
+ xfs_iext_insert(ip, icur, new, state);
|
|
|
break;
|
|
|
}
|
|
|
if (oldlen != newlen) {
|
|
@@ -2832,7 +2654,7 @@ xfs_bmap_add_extent_hole_real(
|
|
|
struct xfs_trans *tp,
|
|
|
struct xfs_inode *ip,
|
|
|
int whichfork,
|
|
|
- xfs_extnum_t *idx,
|
|
|
+ struct xfs_iext_cursor *icur,
|
|
|
struct xfs_btree_cur **curp,
|
|
|
struct xfs_bmbt_irec *new,
|
|
|
xfs_fsblock_t *first,
|
|
@@ -2847,27 +2669,19 @@ xfs_bmap_add_extent_hole_real(
|
|
|
xfs_bmbt_irec_t left; /* left neighbor extent entry */
|
|
|
xfs_bmbt_irec_t right; /* right neighbor extent entry */
|
|
|
int rval=0; /* return value (logging flags) */
|
|
|
- int state; /* state bits, accessed thru macros */
|
|
|
+ int state = xfs_bmap_fork_to_state(whichfork);
|
|
|
+ struct xfs_bmbt_irec old;
|
|
|
|
|
|
- ASSERT(*idx >= 0);
|
|
|
- ASSERT(*idx <= xfs_iext_count(ifp));
|
|
|
ASSERT(!isnullstartblock(new->br_startblock));
|
|
|
ASSERT(!cur || !(cur->bc_private.b.flags & XFS_BTCUR_BPRV_WASDEL));
|
|
|
|
|
|
XFS_STATS_INC(mp, xs_add_exlist);
|
|
|
|
|
|
- state = 0;
|
|
|
- if (whichfork == XFS_ATTR_FORK)
|
|
|
- state |= BMAP_ATTRFORK;
|
|
|
- if (whichfork == XFS_COW_FORK)
|
|
|
- state |= BMAP_COWFORK;
|
|
|
-
|
|
|
/*
|
|
|
* Check and set flags if this segment has a left neighbor.
|
|
|
*/
|
|
|
- if (*idx > 0) {
|
|
|
+ if (xfs_iext_peek_prev_extent(ifp, icur, &left)) {
|
|
|
state |= BMAP_LEFT_VALID;
|
|
|
- xfs_bmbt_get_all(xfs_iext_get_ext(ifp, *idx - 1), &left);
|
|
|
if (isnullstartblock(left.br_startblock))
|
|
|
state |= BMAP_LEFT_DELAY;
|
|
|
}
|
|
@@ -2876,9 +2690,8 @@ xfs_bmap_add_extent_hole_real(
|
|
|
* Check and set flags if this segment has a current value.
|
|
|
* Not true if we're inserting into the "hole" at eof.
|
|
|
*/
|
|
|
- if (*idx < xfs_iext_count(ifp)) {
|
|
|
+ if (xfs_iext_get_extent(ifp, icur, &right)) {
|
|
|
state |= BMAP_RIGHT_VALID;
|
|
|
- xfs_bmbt_get_all(xfs_iext_get_ext(ifp, *idx), &right);
|
|
|
if (isnullstartblock(right.br_startblock))
|
|
|
state |= BMAP_RIGHT_DELAY;
|
|
|
}
|
|
@@ -2915,14 +2728,11 @@ xfs_bmap_add_extent_hole_real(
|
|
|
* left and on the right.
|
|
|
* Merge all three into a single extent record.
|
|
|
*/
|
|
|
- --*idx;
|
|
|
- trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
|
|
|
- xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, *idx),
|
|
|
- left.br_blockcount + new->br_blockcount +
|
|
|
- right.br_blockcount);
|
|
|
- trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
|
|
|
+ left.br_blockcount += new->br_blockcount + right.br_blockcount;
|
|
|
|
|
|
- xfs_iext_remove(ip, *idx + 1, 1, state);
|
|
|
+ xfs_iext_remove(ip, icur, state);
|
|
|
+ xfs_iext_prev(ifp, icur);
|
|
|
+ xfs_iext_update_extent(ip, state, icur, &left);
|
|
|
|
|
|
XFS_IFORK_NEXT_SET(ip, whichfork,
|
|
|
XFS_IFORK_NEXTENTS(ip, whichfork) - 1);
|
|
@@ -2930,9 +2740,7 @@ xfs_bmap_add_extent_hole_real(
|
|
|
rval = XFS_ILOG_CORE | xfs_ilog_fext(whichfork);
|
|
|
} else {
|
|
|
rval = XFS_ILOG_CORE;
|
|
|
- error = xfs_bmbt_lookup_eq(cur, right.br_startoff,
|
|
|
- right.br_startblock, right.br_blockcount,
|
|
|
- &i);
|
|
|
+ error = xfs_bmbt_lookup_eq(cur, &right, &i);
|
|
|
if (error)
|
|
|
goto done;
|
|
|
XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
|
|
@@ -2944,12 +2752,7 @@ xfs_bmap_add_extent_hole_real(
|
|
|
if (error)
|
|
|
goto done;
|
|
|
XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
|
|
|
- error = xfs_bmbt_update(cur, left.br_startoff,
|
|
|
- left.br_startblock,
|
|
|
- left.br_blockcount +
|
|
|
- new->br_blockcount +
|
|
|
- right.br_blockcount,
|
|
|
- left.br_state);
|
|
|
+ error = xfs_bmbt_update(cur, &left);
|
|
|
if (error)
|
|
|
goto done;
|
|
|
}
|
|
@@ -2961,27 +2764,21 @@ xfs_bmap_add_extent_hole_real(
|
|
|
* on the left.
|
|
|
* Merge the new allocation with the left neighbor.
|
|
|
*/
|
|
|
- --*idx;
|
|
|
- trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
|
|
|
- xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, *idx),
|
|
|
- left.br_blockcount + new->br_blockcount);
|
|
|
- trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
|
|
|
+ old = left;
|
|
|
+ left.br_blockcount += new->br_blockcount;
|
|
|
+
|
|
|
+ xfs_iext_prev(ifp, icur);
|
|
|
+ xfs_iext_update_extent(ip, state, icur, &left);
|
|
|
|
|
|
if (cur == NULL) {
|
|
|
rval = xfs_ilog_fext(whichfork);
|
|
|
} else {
|
|
|
rval = 0;
|
|
|
- error = xfs_bmbt_lookup_eq(cur, left.br_startoff,
|
|
|
- left.br_startblock, left.br_blockcount,
|
|
|
- &i);
|
|
|
+ error = xfs_bmbt_lookup_eq(cur, &old, &i);
|
|
|
if (error)
|
|
|
goto done;
|
|
|
XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
|
|
|
- error = xfs_bmbt_update(cur, left.br_startoff,
|
|
|
- left.br_startblock,
|
|
|
- left.br_blockcount +
|
|
|
- new->br_blockcount,
|
|
|
- left.br_state);
|
|
|
+ error = xfs_bmbt_update(cur, &left);
|
|
|
if (error)
|
|
|
goto done;
|
|
|
}
|
|
@@ -2993,29 +2790,22 @@ xfs_bmap_add_extent_hole_real(
|
|
|
* on the right.
|
|
|
* Merge the new allocation with the right neighbor.
|
|
|
*/
|
|
|
- trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
|
|
|
- xfs_bmbt_set_allf(xfs_iext_get_ext(ifp, *idx),
|
|
|
- new->br_startoff, new->br_startblock,
|
|
|
- new->br_blockcount + right.br_blockcount,
|
|
|
- right.br_state);
|
|
|
- trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
|
|
|
+ old = right;
|
|
|
+
|
|
|
+ right.br_startoff = new->br_startoff;
|
|
|
+ right.br_startblock = new->br_startblock;
|
|
|
+ right.br_blockcount += new->br_blockcount;
|
|
|
+ xfs_iext_update_extent(ip, state, icur, &right);
|
|
|
|
|
|
if (cur == NULL) {
|
|
|
rval = xfs_ilog_fext(whichfork);
|
|
|
} else {
|
|
|
rval = 0;
|
|
|
- error = xfs_bmbt_lookup_eq(cur,
|
|
|
- right.br_startoff,
|
|
|
- right.br_startblock,
|
|
|
- right.br_blockcount, &i);
|
|
|
+ error = xfs_bmbt_lookup_eq(cur, &old, &i);
|
|
|
if (error)
|
|
|
goto done;
|
|
|
XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
|
|
|
- error = xfs_bmbt_update(cur, new->br_startoff,
|
|
|
- new->br_startblock,
|
|
|
- new->br_blockcount +
|
|
|
- right.br_blockcount,
|
|
|
- right.br_state);
|
|
|
+ error = xfs_bmbt_update(cur, &right);
|
|
|
if (error)
|
|
|
goto done;
|
|
|
}
|
|
@@ -3027,21 +2817,17 @@ xfs_bmap_add_extent_hole_real(
|
|
|
* real allocation.
|
|
|
* Insert a new entry.
|
|
|
*/
|
|
|
- xfs_iext_insert(ip, *idx, 1, new, state);
|
|
|
+ xfs_iext_insert(ip, icur, new, state);
|
|
|
XFS_IFORK_NEXT_SET(ip, whichfork,
|
|
|
XFS_IFORK_NEXTENTS(ip, whichfork) + 1);
|
|
|
if (cur == NULL) {
|
|
|
rval = XFS_ILOG_CORE | xfs_ilog_fext(whichfork);
|
|
|
} else {
|
|
|
rval = XFS_ILOG_CORE;
|
|
|
- error = xfs_bmbt_lookup_eq(cur,
|
|
|
- new->br_startoff,
|
|
|
- new->br_startblock,
|
|
|
- new->br_blockcount, &i);
|
|
|
+ error = xfs_bmbt_lookup_eq(cur, new, &i);
|
|
|
if (error)
|
|
|
goto done;
|
|
|
XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done);
|
|
|
- cur->bc_rec.b.br_state = new->br_state;
|
|
|
error = xfs_btree_insert(cur, &i);
|
|
|
if (error)
|
|
|
goto done;
|
|
@@ -3981,7 +3767,7 @@ xfs_bmapi_read(
|
|
|
struct xfs_bmbt_irec got;
|
|
|
xfs_fileoff_t obno;
|
|
|
xfs_fileoff_t end;
|
|
|
- xfs_extnum_t idx;
|
|
|
+ struct xfs_iext_cursor icur;
|
|
|
int error;
|
|
|
bool eof = false;
|
|
|
int n = 0;
|
|
@@ -4023,7 +3809,7 @@ xfs_bmapi_read(
|
|
|
return error;
|
|
|
}
|
|
|
|
|
|
- if (!xfs_iext_lookup_extent(ip, ifp, bno, &idx, &got))
|
|
|
+ if (!xfs_iext_lookup_extent(ip, ifp, bno, &icur, &got))
|
|
|
eof = true;
|
|
|
end = bno + len;
|
|
|
obno = bno;
|
|
@@ -4055,7 +3841,7 @@ xfs_bmapi_read(
|
|
|
break;
|
|
|
|
|
|
/* Else go on to the next record. */
|
|
|
- if (!xfs_iext_get_extent(ifp, ++idx, &got))
|
|
|
+ if (!xfs_iext_next_extent(ifp, &icur, &got))
|
|
|
eof = true;
|
|
|
}
|
|
|
*nmap = n;
|
|
@@ -4083,7 +3869,7 @@ xfs_bmapi_reserve_delalloc(
|
|
|
xfs_filblks_t len,
|
|
|
xfs_filblks_t prealloc,
|
|
|
struct xfs_bmbt_irec *got,
|
|
|
- xfs_extnum_t *lastx,
|
|
|
+ struct xfs_iext_cursor *icur,
|
|
|
int eof)
|
|
|
{
|
|
|
struct xfs_mount *mp = ip->i_mount;
|
|
@@ -4113,7 +3899,7 @@ xfs_bmapi_reserve_delalloc(
|
|
|
if (extsz) {
|
|
|
struct xfs_bmbt_irec prev;
|
|
|
|
|
|
- if (!xfs_iext_get_extent(ifp, *lastx - 1, &prev))
|
|
|
+ if (!xfs_iext_peek_prev_extent(ifp, icur, &prev))
|
|
|
prev.br_startoff = NULLFILEOFF;
|
|
|
|
|
|
error = xfs_bmap_extsize_align(mp, got, &prev, extsz, rt, eof,
|
|
@@ -4162,7 +3948,7 @@ xfs_bmapi_reserve_delalloc(
|
|
|
got->br_blockcount = alen;
|
|
|
got->br_state = XFS_EXT_NORM;
|
|
|
|
|
|
- xfs_bmap_add_extent_hole_delay(ip, whichfork, lastx, got);
|
|
|
+ xfs_bmap_add_extent_hole_delay(ip, whichfork, icur, got);
|
|
|
|
|
|
/*
|
|
|
* Tag the inode if blocks were preallocated. Note that COW fork
|
|
@@ -4207,10 +3993,7 @@ xfs_bmapi_allocate(
|
|
|
if (bma->wasdel) {
|
|
|
bma->length = (xfs_extlen_t)bma->got.br_blockcount;
|
|
|
bma->offset = bma->got.br_startoff;
|
|
|
- if (bma->idx) {
|
|
|
- xfs_bmbt_get_all(xfs_iext_get_ext(ifp, bma->idx - 1),
|
|
|
- &bma->prev);
|
|
|
- }
|
|
|
+ xfs_iext_peek_prev_extent(ifp, &bma->icur, &bma->prev);
|
|
|
} else {
|
|
|
bma->length = XFS_FILBLKS_MIN(bma->length, MAXEXTLEN);
|
|
|
if (!bma->eof)
|
|
@@ -4295,7 +4078,7 @@ xfs_bmapi_allocate(
|
|
|
error = xfs_bmap_add_extent_delay_real(bma, whichfork);
|
|
|
else
|
|
|
error = xfs_bmap_add_extent_hole_real(bma->tp, bma->ip,
|
|
|
- whichfork, &bma->idx, &bma->cur, &bma->got,
|
|
|
+ whichfork, &bma->icur, &bma->cur, &bma->got,
|
|
|
bma->firstblock, bma->dfops, &bma->logflags);
|
|
|
|
|
|
bma->logflags |= tmp_logflags;
|
|
@@ -4307,7 +4090,7 @@ xfs_bmapi_allocate(
|
|
|
* or xfs_bmap_add_extent_hole_real might have merged it into one of
|
|
|
* the neighbouring ones.
|
|
|
*/
|
|
|
- xfs_bmbt_get_all(xfs_iext_get_ext(ifp, bma->idx), &bma->got);
|
|
|
+ xfs_iext_get_extent(ifp, &bma->icur, &bma->got);
|
|
|
|
|
|
ASSERT(bma->got.br_startoff <= bma->offset);
|
|
|
ASSERT(bma->got.br_startoff + bma->got.br_blockcount >=
|
|
@@ -4365,8 +4148,8 @@ xfs_bmapi_convert_unwritten(
|
|
|
}
|
|
|
|
|
|
error = xfs_bmap_add_extent_unwritten_real(bma->tp, bma->ip, whichfork,
|
|
|
- &bma->idx, &bma->cur, mval, bma->firstblock, bma->dfops,
|
|
|
- &tmp_logflags);
|
|
|
+ &bma->icur, &bma->cur, mval, bma->firstblock,
|
|
|
+ bma->dfops, &tmp_logflags);
|
|
|
/*
|
|
|
* Log the inode core unconditionally in the unwritten extent conversion
|
|
|
* path because the conversion might not have done so (e.g., if the
|
|
@@ -4388,7 +4171,7 @@ xfs_bmapi_convert_unwritten(
|
|
|
* xfs_bmap_add_extent_unwritten_real might have merged it into one
|
|
|
* of the neighbouring ones.
|
|
|
*/
|
|
|
- xfs_bmbt_get_all(xfs_iext_get_ext(ifp, bma->idx), &bma->got);
|
|
|
+ xfs_iext_get_extent(ifp, &bma->icur, &bma->got);
|
|
|
|
|
|
/*
|
|
|
* We may have combined previously unwritten space with written space,
|
|
@@ -4507,9 +4290,9 @@ xfs_bmapi_write(
|
|
|
end = bno + len;
|
|
|
obno = bno;
|
|
|
|
|
|
- if (!xfs_iext_lookup_extent(ip, ifp, bno, &bma.idx, &bma.got))
|
|
|
+ if (!xfs_iext_lookup_extent(ip, ifp, bno, &bma.icur, &bma.got))
|
|
|
eof = true;
|
|
|
- if (!xfs_iext_get_extent(ifp, bma.idx - 1, &bma.prev))
|
|
|
+ if (!xfs_iext_peek_prev_extent(ifp, &bma.icur, &bma.prev))
|
|
|
bma.prev.br_startoff = NULLFILEOFF;
|
|
|
bma.tp = tp;
|
|
|
bma.ip = ip;
|
|
@@ -4551,7 +4334,8 @@ xfs_bmapi_write(
|
|
|
* First, deal with the hole before the allocated space
|
|
|
* that we found, if any.
|
|
|
*/
|
|
|
- if (need_alloc || wasdelay) {
|
|
|
+ if ((need_alloc || wasdelay) &&
|
|
|
+ !(flags & XFS_BMAPI_CONVERT_ONLY)) {
|
|
|
bma.eof = eof;
|
|
|
bma.conv = !!(flags & XFS_BMAPI_CONVERT);
|
|
|
bma.wasdel = wasdelay;
|
|
@@ -4614,7 +4398,7 @@ xfs_bmapi_write(
|
|
|
|
|
|
/* Else go on to the next record. */
|
|
|
bma.prev = bma.got;
|
|
|
- if (!xfs_iext_get_extent(ifp, ++bma.idx, &bma.got))
|
|
|
+ if (!xfs_iext_next_extent(ifp, &bma.icur, &bma.got))
|
|
|
eof = true;
|
|
|
}
|
|
|
*nmap = n;
|
|
@@ -4687,7 +4471,7 @@ xfs_bmapi_remap(
|
|
|
struct xfs_btree_cur *cur = NULL;
|
|
|
xfs_fsblock_t firstblock = NULLFSBLOCK;
|
|
|
struct xfs_bmbt_irec got;
|
|
|
- xfs_extnum_t idx;
|
|
|
+ struct xfs_iext_cursor icur;
|
|
|
int logflags = 0, error;
|
|
|
|
|
|
ASSERT(len > 0);
|
|
@@ -4711,7 +4495,7 @@ xfs_bmapi_remap(
|
|
|
return error;
|
|
|
}
|
|
|
|
|
|
- if (xfs_iext_lookup_extent(ip, ifp, bno, &idx, &got)) {
|
|
|
+ if (xfs_iext_lookup_extent(ip, ifp, bno, &icur, &got)) {
|
|
|
/* make sure we only reflink into a hole. */
|
|
|
ASSERT(got.br_startoff > bno);
|
|
|
ASSERT(got.br_startoff - bno >= len);
|
|
@@ -4732,8 +4516,8 @@ xfs_bmapi_remap(
|
|
|
got.br_blockcount = len;
|
|
|
got.br_state = XFS_EXT_NORM;
|
|
|
|
|
|
- error = xfs_bmap_add_extent_hole_real(tp, ip, XFS_DATA_FORK, &idx, &cur,
|
|
|
- &got, &firstblock, dfops, &logflags);
|
|
|
+ error = xfs_bmap_add_extent_hole_real(tp, ip, XFS_DATA_FORK, &icur,
|
|
|
+ &cur, &got, &firstblock, dfops, &logflags);
|
|
|
if (error)
|
|
|
goto error0;
|
|
|
|
|
@@ -4849,7 +4633,7 @@ int
|
|
|
xfs_bmap_del_extent_delay(
|
|
|
struct xfs_inode *ip,
|
|
|
int whichfork,
|
|
|
- xfs_extnum_t *idx,
|
|
|
+ struct xfs_iext_cursor *icur,
|
|
|
struct xfs_bmbt_irec *got,
|
|
|
struct xfs_bmbt_irec *del)
|
|
|
{
|
|
@@ -4859,7 +4643,8 @@ xfs_bmap_del_extent_delay(
|
|
|
int64_t da_old, da_new, da_diff = 0;
|
|
|
xfs_fileoff_t del_endoff, got_endoff;
|
|
|
xfs_filblks_t got_indlen, new_indlen, stolen;
|
|
|
- int error = 0, state = 0;
|
|
|
+ int state = xfs_bmap_fork_to_state(whichfork);
|
|
|
+ int error = 0;
|
|
|
bool isrt;
|
|
|
|
|
|
XFS_STATS_INC(mp, xs_del_exlist);
|
|
@@ -4870,8 +4655,6 @@ xfs_bmap_del_extent_delay(
|
|
|
da_old = startblockval(got->br_startblock);
|
|
|
da_new = 0;
|
|
|
|
|
|
- ASSERT(*idx >= 0);
|
|
|
- ASSERT(*idx <= xfs_iext_count(ifp));
|
|
|
ASSERT(del->br_blockcount > 0);
|
|
|
ASSERT(got->br_startoff <= del->br_startoff);
|
|
|
ASSERT(got_endoff >= del_endoff);
|
|
@@ -4895,46 +4678,39 @@ xfs_bmap_del_extent_delay(
|
|
|
return error;
|
|
|
ip->i_delayed_blks -= del->br_blockcount;
|
|
|
|
|
|
- if (whichfork == XFS_COW_FORK)
|
|
|
- state |= BMAP_COWFORK;
|
|
|
-
|
|
|
if (got->br_startoff == del->br_startoff)
|
|
|
- state |= BMAP_LEFT_CONTIG;
|
|
|
+ state |= BMAP_LEFT_FILLING;
|
|
|
if (got_endoff == del_endoff)
|
|
|
- state |= BMAP_RIGHT_CONTIG;
|
|
|
+ state |= BMAP_RIGHT_FILLING;
|
|
|
|
|
|
- switch (state & (BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG)) {
|
|
|
- case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
|
|
|
+ switch (state & (BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING)) {
|
|
|
+ case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING:
|
|
|
/*
|
|
|
* Matches the whole extent. Delete the entry.
|
|
|
*/
|
|
|
- xfs_iext_remove(ip, *idx, 1, state);
|
|
|
- --*idx;
|
|
|
+ xfs_iext_remove(ip, icur, state);
|
|
|
+ xfs_iext_prev(ifp, icur);
|
|
|
break;
|
|
|
- case BMAP_LEFT_CONTIG:
|
|
|
+ case BMAP_LEFT_FILLING:
|
|
|
/*
|
|
|
* Deleting the first part of the extent.
|
|
|
*/
|
|
|
- trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
|
|
|
got->br_startoff = del_endoff;
|
|
|
got->br_blockcount -= del->br_blockcount;
|
|
|
da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip,
|
|
|
got->br_blockcount), da_old);
|
|
|
got->br_startblock = nullstartblock((int)da_new);
|
|
|
- xfs_iext_update_extent(ifp, *idx, got);
|
|
|
- trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
|
|
|
+ xfs_iext_update_extent(ip, state, icur, got);
|
|
|
break;
|
|
|
- case BMAP_RIGHT_CONTIG:
|
|
|
+ case BMAP_RIGHT_FILLING:
|
|
|
/*
|
|
|
* Deleting the last part of the extent.
|
|
|
*/
|
|
|
- trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
|
|
|
got->br_blockcount = got->br_blockcount - del->br_blockcount;
|
|
|
da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip,
|
|
|
got->br_blockcount), da_old);
|
|
|
got->br_startblock = nullstartblock((int)da_new);
|
|
|
- xfs_iext_update_extent(ifp, *idx, got);
|
|
|
- trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
|
|
|
+ xfs_iext_update_extent(ip, state, icur, got);
|
|
|
break;
|
|
|
case 0:
|
|
|
/*
|
|
@@ -4946,8 +4722,6 @@ xfs_bmap_del_extent_delay(
|
|
|
* Warn if either of the new indlen reservations is zero as this
|
|
|
* can lead to delalloc problems.
|
|
|
*/
|
|
|
- trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
|
|
|
-
|
|
|
got->br_blockcount = del->br_startoff - got->br_startoff;
|
|
|
got_indlen = xfs_bmap_worst_indlen(ip, got->br_blockcount);
|
|
|
|
|
@@ -4959,15 +4733,14 @@ xfs_bmap_del_extent_delay(
|
|
|
del->br_blockcount);
|
|
|
|
|
|
got->br_startblock = nullstartblock((int)got_indlen);
|
|
|
- xfs_iext_update_extent(ifp, *idx, got);
|
|
|
- trace_xfs_bmap_post_update(ip, *idx, 0, _THIS_IP_);
|
|
|
|
|
|
new.br_startoff = del_endoff;
|
|
|
new.br_state = got->br_state;
|
|
|
new.br_startblock = nullstartblock((int)new_indlen);
|
|
|
|
|
|
- ++*idx;
|
|
|
- xfs_iext_insert(ip, *idx, 1, &new, state);
|
|
|
+ xfs_iext_update_extent(ip, state, icur, got);
|
|
|
+ xfs_iext_next(ifp, icur);
|
|
|
+ xfs_iext_insert(ip, icur, &new, state);
|
|
|
|
|
|
da_new = got_indlen + new_indlen - stolen;
|
|
|
del->br_blockcount -= stolen;
|
|
@@ -4986,7 +4759,7 @@ xfs_bmap_del_extent_delay(
|
|
|
void
|
|
|
xfs_bmap_del_extent_cow(
|
|
|
struct xfs_inode *ip,
|
|
|
- xfs_extnum_t *idx,
|
|
|
+ struct xfs_iext_cursor *icur,
|
|
|
struct xfs_bmbt_irec *got,
|
|
|
struct xfs_bmbt_irec *del)
|
|
|
{
|
|
@@ -5001,75 +4774,67 @@ xfs_bmap_del_extent_cow(
|
|
|
del_endoff = del->br_startoff + del->br_blockcount;
|
|
|
got_endoff = got->br_startoff + got->br_blockcount;
|
|
|
|
|
|
- ASSERT(*idx >= 0);
|
|
|
- ASSERT(*idx <= xfs_iext_count(ifp));
|
|
|
ASSERT(del->br_blockcount > 0);
|
|
|
ASSERT(got->br_startoff <= del->br_startoff);
|
|
|
ASSERT(got_endoff >= del_endoff);
|
|
|
ASSERT(!isnullstartblock(got->br_startblock));
|
|
|
|
|
|
if (got->br_startoff == del->br_startoff)
|
|
|
- state |= BMAP_LEFT_CONTIG;
|
|
|
+ state |= BMAP_LEFT_FILLING;
|
|
|
if (got_endoff == del_endoff)
|
|
|
- state |= BMAP_RIGHT_CONTIG;
|
|
|
+ state |= BMAP_RIGHT_FILLING;
|
|
|
|
|
|
- switch (state & (BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG)) {
|
|
|
- case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
|
|
|
+ switch (state & (BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING)) {
|
|
|
+ case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING:
|
|
|
/*
|
|
|
* Matches the whole extent. Delete the entry.
|
|
|
*/
|
|
|
- xfs_iext_remove(ip, *idx, 1, state);
|
|
|
- --*idx;
|
|
|
+ xfs_iext_remove(ip, icur, state);
|
|
|
+ xfs_iext_prev(ifp, icur);
|
|
|
break;
|
|
|
- case BMAP_LEFT_CONTIG:
|
|
|
+ case BMAP_LEFT_FILLING:
|
|
|
/*
|
|
|
* Deleting the first part of the extent.
|
|
|
*/
|
|
|
- trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
|
|
|
got->br_startoff = del_endoff;
|
|
|
got->br_blockcount -= del->br_blockcount;
|
|
|
got->br_startblock = del->br_startblock + del->br_blockcount;
|
|
|
- xfs_iext_update_extent(ifp, *idx, got);
|
|
|
- trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
|
|
|
+ xfs_iext_update_extent(ip, state, icur, got);
|
|
|
break;
|
|
|
- case BMAP_RIGHT_CONTIG:
|
|
|
+ case BMAP_RIGHT_FILLING:
|
|
|
/*
|
|
|
* Deleting the last part of the extent.
|
|
|
*/
|
|
|
- trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
|
|
|
got->br_blockcount -= del->br_blockcount;
|
|
|
- xfs_iext_update_extent(ifp, *idx, got);
|
|
|
- trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
|
|
|
+ xfs_iext_update_extent(ip, state, icur, got);
|
|
|
break;
|
|
|
case 0:
|
|
|
/*
|
|
|
* Deleting the middle of the extent.
|
|
|
*/
|
|
|
- trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
|
|
|
got->br_blockcount = del->br_startoff - got->br_startoff;
|
|
|
- xfs_iext_update_extent(ifp, *idx, got);
|
|
|
- trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
|
|
|
|
|
|
new.br_startoff = del_endoff;
|
|
|
new.br_blockcount = got_endoff - del_endoff;
|
|
|
new.br_state = got->br_state;
|
|
|
new.br_startblock = del->br_startblock + del->br_blockcount;
|
|
|
|
|
|
- ++*idx;
|
|
|
- xfs_iext_insert(ip, *idx, 1, &new, state);
|
|
|
+ xfs_iext_update_extent(ip, state, icur, got);
|
|
|
+ xfs_iext_next(ifp, icur);
|
|
|
+ xfs_iext_insert(ip, icur, &new, state);
|
|
|
break;
|
|
|
}
|
|
|
}
|
|
|
|
|
|
/*
|
|
|
* Called by xfs_bmapi to update file extent records and the btree
|
|
|
- * after removing space (or undoing a delayed allocation).
|
|
|
+ * after removing space.
|
|
|
*/
|
|
|
STATIC int /* error */
|
|
|
-xfs_bmap_del_extent(
|
|
|
+xfs_bmap_del_extent_real(
|
|
|
xfs_inode_t *ip, /* incore inode pointer */
|
|
|
xfs_trans_t *tp, /* current transaction pointer */
|
|
|
- xfs_extnum_t *idx, /* extent number to update/delete */
|
|
|
+ struct xfs_iext_cursor *icur,
|
|
|
struct xfs_defer_ops *dfops, /* list of extents to be freed */
|
|
|
xfs_btree_cur_t *cur, /* if null, not a btree */
|
|
|
xfs_bmbt_irec_t *del, /* data to remove from extents */
|
|
@@ -5077,16 +4842,12 @@ xfs_bmap_del_extent(
|
|
|
int whichfork, /* data or attr fork */
|
|
|
int bflags) /* bmapi flags */
|
|
|
{
|
|
|
- xfs_filblks_t da_new; /* new delay-alloc indirect blocks */
|
|
|
- xfs_filblks_t da_old; /* old delay-alloc indirect blocks */
|
|
|
xfs_fsblock_t del_endblock=0; /* first block past del */
|
|
|
xfs_fileoff_t del_endoff; /* first offset past del */
|
|
|
- int delay; /* current block is delayed allocated */
|
|
|
int do_fx; /* free extent at end of routine */
|
|
|
- xfs_bmbt_rec_host_t *ep; /* current extent entry pointer */
|
|
|
int error; /* error return value */
|
|
|
- int flags; /* inode logging flags */
|
|
|
- xfs_bmbt_irec_t got; /* current extent entry */
|
|
|
+ int flags = 0;/* inode logging flags */
|
|
|
+ struct xfs_bmbt_irec got; /* current extent entry */
|
|
|
xfs_fileoff_t got_endoff; /* first offset past got */
|
|
|
int i; /* temp state */
|
|
|
xfs_ifork_t *ifp; /* inode fork pointer */
|
|
@@ -5095,103 +4856,81 @@ xfs_bmap_del_extent(
|
|
|
xfs_bmbt_irec_t new; /* new record to be inserted */
|
|
|
/* REFERENCED */
|
|
|
uint qfield; /* quota field to update */
|
|
|
- xfs_filblks_t temp; /* for indirect length calculations */
|
|
|
- xfs_filblks_t temp2; /* for indirect length calculations */
|
|
|
- int state = 0;
|
|
|
+ int state = xfs_bmap_fork_to_state(whichfork);
|
|
|
+ struct xfs_bmbt_irec old;
|
|
|
|
|
|
mp = ip->i_mount;
|
|
|
XFS_STATS_INC(mp, xs_del_exlist);
|
|
|
|
|
|
- if (whichfork == XFS_ATTR_FORK)
|
|
|
- state |= BMAP_ATTRFORK;
|
|
|
- else if (whichfork == XFS_COW_FORK)
|
|
|
- state |= BMAP_COWFORK;
|
|
|
-
|
|
|
ifp = XFS_IFORK_PTR(ip, whichfork);
|
|
|
- ASSERT((*idx >= 0) && (*idx < xfs_iext_count(ifp)));
|
|
|
ASSERT(del->br_blockcount > 0);
|
|
|
- ep = xfs_iext_get_ext(ifp, *idx);
|
|
|
- xfs_bmbt_get_all(ep, &got);
|
|
|
+ xfs_iext_get_extent(ifp, icur, &got);
|
|
|
ASSERT(got.br_startoff <= del->br_startoff);
|
|
|
del_endoff = del->br_startoff + del->br_blockcount;
|
|
|
got_endoff = got.br_startoff + got.br_blockcount;
|
|
|
ASSERT(got_endoff >= del_endoff);
|
|
|
- delay = isnullstartblock(got.br_startblock);
|
|
|
- ASSERT(isnullstartblock(del->br_startblock) == delay);
|
|
|
- flags = 0;
|
|
|
+ ASSERT(!isnullstartblock(got.br_startblock));
|
|
|
qfield = 0;
|
|
|
error = 0;
|
|
|
+
|
|
|
/*
|
|
|
- * If deleting a real allocation, must free up the disk space.
|
|
|
+ * If it's the case where the directory code is running with no block
|
|
|
+ * reservation, and the deleted block is in the middle of its extent,
|
|
|
+ * and the resulting insert of an extent would cause transformation to
|
|
|
+ * btree format, then reject it. The calling code will then swap blocks
|
|
|
+ * around instead. We have to do this now, rather than waiting for the
|
|
|
+ * conversion to btree format, since the transaction will be dirty then.
|
|
|
*/
|
|
|
- if (!delay) {
|
|
|
- flags = XFS_ILOG_CORE;
|
|
|
- /*
|
|
|
- * Realtime allocation. Free it and record di_nblocks update.
|
|
|
- */
|
|
|
- if (whichfork == XFS_DATA_FORK && XFS_IS_REALTIME_INODE(ip)) {
|
|
|
- xfs_fsblock_t bno;
|
|
|
- xfs_filblks_t len;
|
|
|
-
|
|
|
- ASSERT(do_mod(del->br_blockcount,
|
|
|
- mp->m_sb.sb_rextsize) == 0);
|
|
|
- ASSERT(do_mod(del->br_startblock,
|
|
|
- mp->m_sb.sb_rextsize) == 0);
|
|
|
- bno = del->br_startblock;
|
|
|
- len = del->br_blockcount;
|
|
|
- do_div(bno, mp->m_sb.sb_rextsize);
|
|
|
- do_div(len, mp->m_sb.sb_rextsize);
|
|
|
- error = xfs_rtfree_extent(tp, bno, (xfs_extlen_t)len);
|
|
|
- if (error)
|
|
|
- goto done;
|
|
|
- do_fx = 0;
|
|
|
- nblks = len * mp->m_sb.sb_rextsize;
|
|
|
- qfield = XFS_TRANS_DQ_RTBCOUNT;
|
|
|
- }
|
|
|
- /*
|
|
|
- * Ordinary allocation.
|
|
|
- */
|
|
|
- else {
|
|
|
- do_fx = 1;
|
|
|
- nblks = del->br_blockcount;
|
|
|
- qfield = XFS_TRANS_DQ_BCOUNT;
|
|
|
- }
|
|
|
- /*
|
|
|
- * Set up del_endblock and cur for later.
|
|
|
- */
|
|
|
- del_endblock = del->br_startblock + del->br_blockcount;
|
|
|
- if (cur) {
|
|
|
- if ((error = xfs_bmbt_lookup_eq(cur, got.br_startoff,
|
|
|
- got.br_startblock, got.br_blockcount,
|
|
|
- &i)))
|
|
|
- goto done;
|
|
|
- XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
|
|
|
- }
|
|
|
- da_old = da_new = 0;
|
|
|
- } else {
|
|
|
- da_old = startblockval(got.br_startblock);
|
|
|
- da_new = 0;
|
|
|
- nblks = 0;
|
|
|
+ if (tp->t_blk_res == 0 &&
|
|
|
+ XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS &&
|
|
|
+ XFS_IFORK_NEXTENTS(ip, whichfork) >=
|
|
|
+ XFS_IFORK_MAXEXT(ip, whichfork) &&
|
|
|
+ del->br_startoff > got.br_startoff && del_endoff < got_endoff)
|
|
|
+ return -ENOSPC;
|
|
|
+
|
|
|
+ flags = XFS_ILOG_CORE;
|
|
|
+ if (whichfork == XFS_DATA_FORK && XFS_IS_REALTIME_INODE(ip)) {
|
|
|
+ xfs_fsblock_t bno;
|
|
|
+ xfs_filblks_t len;
|
|
|
+
|
|
|
+ ASSERT(do_mod(del->br_blockcount, mp->m_sb.sb_rextsize) == 0);
|
|
|
+ ASSERT(do_mod(del->br_startblock, mp->m_sb.sb_rextsize) == 0);
|
|
|
+ bno = del->br_startblock;
|
|
|
+ len = del->br_blockcount;
|
|
|
+ do_div(bno, mp->m_sb.sb_rextsize);
|
|
|
+ do_div(len, mp->m_sb.sb_rextsize);
|
|
|
+ error = xfs_rtfree_extent(tp, bno, (xfs_extlen_t)len);
|
|
|
+ if (error)
|
|
|
+ goto done;
|
|
|
do_fx = 0;
|
|
|
+ nblks = len * mp->m_sb.sb_rextsize;
|
|
|
+ qfield = XFS_TRANS_DQ_RTBCOUNT;
|
|
|
+ } else {
|
|
|
+ do_fx = 1;
|
|
|
+ nblks = del->br_blockcount;
|
|
|
+ qfield = XFS_TRANS_DQ_BCOUNT;
|
|
|
}
|
|
|
|
|
|
- /*
|
|
|
- * Set flag value to use in switch statement.
|
|
|
- * Left-contig is 2, right-contig is 1.
|
|
|
- */
|
|
|
- switch (((got.br_startoff == del->br_startoff) << 1) |
|
|
|
- (got_endoff == del_endoff)) {
|
|
|
- case 3:
|
|
|
+ del_endblock = del->br_startblock + del->br_blockcount;
|
|
|
+ if (cur) {
|
|
|
+ error = xfs_bmbt_lookup_eq(cur, &got, &i);
|
|
|
+ if (error)
|
|
|
+ goto done;
|
|
|
+ XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
|
|
|
+ }
|
|
|
+
|
|
|
+ if (got.br_startoff == del->br_startoff)
|
|
|
+ state |= BMAP_LEFT_FILLING;
|
|
|
+ if (got_endoff == del_endoff)
|
|
|
+ state |= BMAP_RIGHT_FILLING;
|
|
|
+
|
|
|
+ switch (state & (BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING)) {
|
|
|
+ case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING:
|
|
|
/*
|
|
|
* Matches the whole extent. Delete the entry.
|
|
|
*/
|
|
|
- trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
|
|
|
- xfs_iext_remove(ip, *idx, 1,
|
|
|
- whichfork == XFS_ATTR_FORK ? BMAP_ATTRFORK : 0);
|
|
|
- --*idx;
|
|
|
- if (delay)
|
|
|
- break;
|
|
|
-
|
|
|
+ xfs_iext_remove(ip, icur, state);
|
|
|
+ xfs_iext_prev(ifp, icur);
|
|
|
XFS_IFORK_NEXT_SET(ip, whichfork,
|
|
|
XFS_IFORK_NEXTENTS(ip, whichfork) - 1);
|
|
|
flags |= XFS_ILOG_CORE;
|
|
@@ -5203,168 +4942,106 @@ xfs_bmap_del_extent(
|
|
|
goto done;
|
|
|
XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
|
|
|
break;
|
|
|
-
|
|
|
- case 2:
|
|
|
+ case BMAP_LEFT_FILLING:
|
|
|
/*
|
|
|
* Deleting the first part of the extent.
|
|
|
*/
|
|
|
- trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
|
|
|
- xfs_bmbt_set_startoff(ep, del_endoff);
|
|
|
- temp = got.br_blockcount - del->br_blockcount;
|
|
|
- xfs_bmbt_set_blockcount(ep, temp);
|
|
|
- if (delay) {
|
|
|
- temp = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp),
|
|
|
- da_old);
|
|
|
- xfs_bmbt_set_startblock(ep, nullstartblock((int)temp));
|
|
|
- trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
|
|
|
- da_new = temp;
|
|
|
- break;
|
|
|
- }
|
|
|
- xfs_bmbt_set_startblock(ep, del_endblock);
|
|
|
- trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
|
|
|
+ got.br_startoff = del_endoff;
|
|
|
+ got.br_startblock = del_endblock;
|
|
|
+ got.br_blockcount -= del->br_blockcount;
|
|
|
+ xfs_iext_update_extent(ip, state, icur, &got);
|
|
|
if (!cur) {
|
|
|
flags |= xfs_ilog_fext(whichfork);
|
|
|
break;
|
|
|
}
|
|
|
- if ((error = xfs_bmbt_update(cur, del_endoff, del_endblock,
|
|
|
- got.br_blockcount - del->br_blockcount,
|
|
|
- got.br_state)))
|
|
|
+ error = xfs_bmbt_update(cur, &got);
|
|
|
+ if (error)
|
|
|
goto done;
|
|
|
break;
|
|
|
-
|
|
|
- case 1:
|
|
|
+ case BMAP_RIGHT_FILLING:
|
|
|
/*
|
|
|
* Deleting the last part of the extent.
|
|
|
*/
|
|
|
- temp = got.br_blockcount - del->br_blockcount;
|
|
|
- trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
|
|
|
- xfs_bmbt_set_blockcount(ep, temp);
|
|
|
- if (delay) {
|
|
|
- temp = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp),
|
|
|
- da_old);
|
|
|
- xfs_bmbt_set_startblock(ep, nullstartblock((int)temp));
|
|
|
- trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
|
|
|
- da_new = temp;
|
|
|
- break;
|
|
|
- }
|
|
|
- trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
|
|
|
+ got.br_blockcount -= del->br_blockcount;
|
|
|
+ xfs_iext_update_extent(ip, state, icur, &got);
|
|
|
if (!cur) {
|
|
|
flags |= xfs_ilog_fext(whichfork);
|
|
|
break;
|
|
|
}
|
|
|
- if ((error = xfs_bmbt_update(cur, got.br_startoff,
|
|
|
- got.br_startblock,
|
|
|
- got.br_blockcount - del->br_blockcount,
|
|
|
- got.br_state)))
|
|
|
+ error = xfs_bmbt_update(cur, &got);
|
|
|
+ if (error)
|
|
|
goto done;
|
|
|
break;
|
|
|
-
|
|
|
case 0:
|
|
|
/*
|
|
|
* Deleting the middle of the extent.
|
|
|
*/
|
|
|
- temp = del->br_startoff - got.br_startoff;
|
|
|
- trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
|
|
|
- xfs_bmbt_set_blockcount(ep, temp);
|
|
|
+ old = got;
|
|
|
+
|
|
|
+ got.br_blockcount = del->br_startoff - got.br_startoff;
|
|
|
+ xfs_iext_update_extent(ip, state, icur, &got);
|
|
|
+
|
|
|
new.br_startoff = del_endoff;
|
|
|
- temp2 = got_endoff - del_endoff;
|
|
|
- new.br_blockcount = temp2;
|
|
|
+ new.br_blockcount = got_endoff - del_endoff;
|
|
|
new.br_state = got.br_state;
|
|
|
- if (!delay) {
|
|
|
- new.br_startblock = del_endblock;
|
|
|
- flags |= XFS_ILOG_CORE;
|
|
|
- if (cur) {
|
|
|
- if ((error = xfs_bmbt_update(cur,
|
|
|
- got.br_startoff,
|
|
|
- got.br_startblock, temp,
|
|
|
- got.br_state)))
|
|
|
- goto done;
|
|
|
- if ((error = xfs_btree_increment(cur, 0, &i)))
|
|
|
- goto done;
|
|
|
- cur->bc_rec.b = new;
|
|
|
- error = xfs_btree_insert(cur, &i);
|
|
|
- if (error && error != -ENOSPC)
|
|
|
- goto done;
|
|
|
+ new.br_startblock = del_endblock;
|
|
|
+
|
|
|
+ flags |= XFS_ILOG_CORE;
|
|
|
+ if (cur) {
|
|
|
+ error = xfs_bmbt_update(cur, &got);
|
|
|
+ if (error)
|
|
|
+ goto done;
|
|
|
+ error = xfs_btree_increment(cur, 0, &i);
|
|
|
+ if (error)
|
|
|
+ goto done;
|
|
|
+ cur->bc_rec.b = new;
|
|
|
+ error = xfs_btree_insert(cur, &i);
|
|
|
+ if (error && error != -ENOSPC)
|
|
|
+ goto done;
|
|
|
+ /*
|
|
|
+ * If get no-space back from btree insert, it tried a
|
|
|
+ * split, and we have a zero block reservation. Fix up
|
|
|
+ * our state and return the error.
|
|
|
+ */
|
|
|
+ if (error == -ENOSPC) {
|
|
|
/*
|
|
|
- * If get no-space back from btree insert,
|
|
|
- * it tried a split, and we have a zero
|
|
|
- * block reservation.
|
|
|
- * Fix up our state and return the error.
|
|
|
+ * Reset the cursor, don't trust it after any
|
|
|
+ * insert operation.
|
|
|
*/
|
|
|
- if (error == -ENOSPC) {
|
|
|
- /*
|
|
|
- * Reset the cursor, don't trust
|
|
|
- * it after any insert operation.
|
|
|
- */
|
|
|
- if ((error = xfs_bmbt_lookup_eq(cur,
|
|
|
- got.br_startoff,
|
|
|
- got.br_startblock,
|
|
|
- temp, &i)))
|
|
|
- goto done;
|
|
|
- XFS_WANT_CORRUPTED_GOTO(mp,
|
|
|
- i == 1, done);
|
|
|
- /*
|
|
|
- * Update the btree record back
|
|
|
- * to the original value.
|
|
|
- */
|
|
|
- if ((error = xfs_bmbt_update(cur,
|
|
|
- got.br_startoff,
|
|
|
- got.br_startblock,
|
|
|
- got.br_blockcount,
|
|
|
- got.br_state)))
|
|
|
- goto done;
|
|
|
- /*
|
|
|
- * Reset the extent record back
|
|
|
- * to the original value.
|
|
|
- */
|
|
|
- xfs_bmbt_set_blockcount(ep,
|
|
|
- got.br_blockcount);
|
|
|
- flags = 0;
|
|
|
- error = -ENOSPC;
|
|
|
+ error = xfs_bmbt_lookup_eq(cur, &got, &i);
|
|
|
+ if (error)
|
|
|
goto done;
|
|
|
- }
|
|
|
XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
|
|
|
- } else
|
|
|
- flags |= xfs_ilog_fext(whichfork);
|
|
|
- XFS_IFORK_NEXT_SET(ip, whichfork,
|
|
|
- XFS_IFORK_NEXTENTS(ip, whichfork) + 1);
|
|
|
- } else {
|
|
|
- xfs_filblks_t stolen;
|
|
|
- ASSERT(whichfork == XFS_DATA_FORK);
|
|
|
-
|
|
|
- /*
|
|
|
- * Distribute the original indlen reservation across the
|
|
|
- * two new extents. Steal blocks from the deleted extent
|
|
|
- * if necessary. Stealing blocks simply fudges the
|
|
|
- * fdblocks accounting in xfs_bunmapi().
|
|
|
- */
|
|
|
- temp = xfs_bmap_worst_indlen(ip, got.br_blockcount);
|
|
|
- temp2 = xfs_bmap_worst_indlen(ip, new.br_blockcount);
|
|
|
- stolen = xfs_bmap_split_indlen(da_old, &temp, &temp2,
|
|
|
- del->br_blockcount);
|
|
|
- da_new = temp + temp2 - stolen;
|
|
|
- del->br_blockcount -= stolen;
|
|
|
-
|
|
|
- /*
|
|
|
- * Set the reservation for each extent. Warn if either
|
|
|
- * is zero as this can lead to delalloc problems.
|
|
|
- */
|
|
|
- WARN_ON_ONCE(!temp || !temp2);
|
|
|
- xfs_bmbt_set_startblock(ep, nullstartblock((int)temp));
|
|
|
- new.br_startblock = nullstartblock((int)temp2);
|
|
|
- }
|
|
|
- trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
|
|
|
- xfs_iext_insert(ip, *idx + 1, 1, &new, state);
|
|
|
- ++*idx;
|
|
|
+ /*
|
|
|
+ * Update the btree record back
|
|
|
+ * to the original value.
|
|
|
+ */
|
|
|
+ error = xfs_bmbt_update(cur, &old);
|
|
|
+ if (error)
|
|
|
+ goto done;
|
|
|
+ /*
|
|
|
+ * Reset the extent record back
|
|
|
+ * to the original value.
|
|
|
+ */
|
|
|
+ xfs_iext_update_extent(ip, state, icur, &old);
|
|
|
+ flags = 0;
|
|
|
+ error = -ENOSPC;
|
|
|
+ goto done;
|
|
|
+ }
|
|
|
+ XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
|
|
|
+ } else
|
|
|
+ flags |= xfs_ilog_fext(whichfork);
|
|
|
+ XFS_IFORK_NEXT_SET(ip, whichfork,
|
|
|
+ XFS_IFORK_NEXTENTS(ip, whichfork) + 1);
|
|
|
+ xfs_iext_next(ifp, icur);
|
|
|
+ xfs_iext_insert(ip, icur, &new, state);
|
|
|
break;
|
|
|
}
|
|
|
|
|
|
/* remove reverse mapping */
|
|
|
- if (!delay) {
|
|
|
- error = xfs_rmap_unmap_extent(mp, dfops, ip, whichfork, del);
|
|
|
- if (error)
|
|
|
- goto done;
|
|
|
- }
|
|
|
+ error = xfs_rmap_unmap_extent(mp, dfops, ip, whichfork, del);
|
|
|
+ if (error)
|
|
|
+ goto done;
|
|
|
|
|
|
/*
|
|
|
* If we need to, add to list of extents to delete.
|
|
@@ -5390,13 +5067,6 @@ xfs_bmap_del_extent(
|
|
|
if (qfield && !(bflags & XFS_BMAPI_REMAP))
|
|
|
xfs_trans_mod_dquot_byino(tp, ip, qfield, (long)-nblks);
|
|
|
|
|
|
- /*
|
|
|
- * Account for change in delayed indirect blocks.
|
|
|
- * Nothing to do for disk quota accounting here.
|
|
|
- */
|
|
|
- ASSERT(da_old >= da_new);
|
|
|
- if (da_old > da_new)
|
|
|
- xfs_mod_fdblocks(mp, (int64_t)(da_old - da_new), false);
|
|
|
done:
|
|
|
*logflagsp = flags;
|
|
|
return error;
|
|
@@ -5412,7 +5082,7 @@ int /* error */
|
|
|
__xfs_bunmapi(
|
|
|
xfs_trans_t *tp, /* transaction pointer */
|
|
|
struct xfs_inode *ip, /* incore inode */
|
|
|
- xfs_fileoff_t bno, /* starting offset to unmap */
|
|
|
+ xfs_fileoff_t start, /* first file offset deleted */
|
|
|
xfs_filblks_t *rlen, /* i/o: amount remaining */
|
|
|
int flags, /* misc flags */
|
|
|
xfs_extnum_t nexts, /* number of extents max */
|
|
@@ -5427,11 +5097,9 @@ __xfs_bunmapi(
|
|
|
xfs_bmbt_irec_t got; /* current extent record */
|
|
|
xfs_ifork_t *ifp; /* inode fork pointer */
|
|
|
int isrt; /* freeing in rt area */
|
|
|
- xfs_extnum_t lastx; /* last extent index used */
|
|
|
int logflags; /* transaction logging flags */
|
|
|
xfs_extlen_t mod; /* rt extent offset */
|
|
|
xfs_mount_t *mp; /* mount structure */
|
|
|
- xfs_fileoff_t start; /* first file offset deleted */
|
|
|
int tmp_logflags; /* partial logging flags */
|
|
|
int wasdel; /* was a delayed alloc extent */
|
|
|
int whichfork; /* data or attribute fork */
|
|
@@ -5439,8 +5107,11 @@ __xfs_bunmapi(
|
|
|
xfs_filblks_t len = *rlen; /* length to unmap in file */
|
|
|
xfs_fileoff_t max_len;
|
|
|
xfs_agnumber_t prev_agno = NULLAGNUMBER, agno;
|
|
|
+ xfs_fileoff_t end;
|
|
|
+ struct xfs_iext_cursor icur;
|
|
|
+ bool done = false;
|
|
|
|
|
|
- trace_xfs_bunmap(ip, bno, len, flags, _RET_IP_);
|
|
|
+ trace_xfs_bunmap(ip, start, len, flags, _RET_IP_);
|
|
|
|
|
|
whichfork = xfs_bmapi_whichfork(flags);
|
|
|
ASSERT(whichfork != XFS_COW_FORK);
|
|
@@ -5479,18 +5150,13 @@ __xfs_bunmapi(
|
|
|
}
|
|
|
XFS_STATS_INC(mp, xs_blk_unmap);
|
|
|
isrt = (whichfork == XFS_DATA_FORK) && XFS_IS_REALTIME_INODE(ip);
|
|
|
- start = bno;
|
|
|
- bno = start + len - 1;
|
|
|
+ end = start + len;
|
|
|
|
|
|
- /*
|
|
|
- * Check to see if the given block number is past the end of the
|
|
|
- * file, back up to the last block if so...
|
|
|
- */
|
|
|
- if (!xfs_iext_lookup_extent(ip, ifp, bno, &lastx, &got)) {
|
|
|
- ASSERT(lastx > 0);
|
|
|
- xfs_iext_get_extent(ifp, --lastx, &got);
|
|
|
- bno = got.br_startoff + got.br_blockcount - 1;
|
|
|
+ if (!xfs_iext_lookup_extent_before(ip, ifp, &end, &icur, &got)) {
|
|
|
+ *rlen = 0;
|
|
|
+ return 0;
|
|
|
}
|
|
|
+ end--;
|
|
|
|
|
|
logflags = 0;
|
|
|
if (ifp->if_flags & XFS_IFBROOT) {
|
|
@@ -5513,24 +5179,24 @@ __xfs_bunmapi(
|
|
|
}
|
|
|
|
|
|
extno = 0;
|
|
|
- while (bno != (xfs_fileoff_t)-1 && bno >= start && lastx >= 0 &&
|
|
|
+ while (end != (xfs_fileoff_t)-1 && end >= start &&
|
|
|
(nexts == 0 || extno < nexts) && max_len > 0) {
|
|
|
/*
|
|
|
- * Is the found extent after a hole in which bno lives?
|
|
|
+ * Is the found extent after a hole in which end lives?
|
|
|
* Just back up to the previous extent, if so.
|
|
|
*/
|
|
|
- if (got.br_startoff > bno) {
|
|
|
- if (--lastx < 0)
|
|
|
- break;
|
|
|
- xfs_iext_get_extent(ifp, lastx, &got);
|
|
|
+ if (got.br_startoff > end &&
|
|
|
+ !xfs_iext_prev_extent(ifp, &icur, &got)) {
|
|
|
+ done = true;
|
|
|
+ break;
|
|
|
}
|
|
|
/*
|
|
|
* Is the last block of this extent before the range
|
|
|
* we're supposed to delete? If so, we're done.
|
|
|
*/
|
|
|
- bno = XFS_FILEOFF_MIN(bno,
|
|
|
+ end = XFS_FILEOFF_MIN(end,
|
|
|
got.br_startoff + got.br_blockcount - 1);
|
|
|
- if (bno < start)
|
|
|
+ if (end < start)
|
|
|
break;
|
|
|
/*
|
|
|
* Then deal with the (possibly delayed) allocated space
|
|
@@ -5555,8 +5221,8 @@ __xfs_bunmapi(
|
|
|
if (!wasdel)
|
|
|
del.br_startblock += start - got.br_startoff;
|
|
|
}
|
|
|
- if (del.br_startoff + del.br_blockcount > bno + 1)
|
|
|
- del.br_blockcount = bno + 1 - del.br_startoff;
|
|
|
+ if (del.br_startoff + del.br_blockcount > end + 1)
|
|
|
+ del.br_blockcount = end + 1 - del.br_startoff;
|
|
|
|
|
|
/* How much can we safely unmap? */
|
|
|
if (max_len < del.br_blockcount) {
|
|
@@ -5582,13 +5248,13 @@ __xfs_bunmapi(
|
|
|
* This piece is unwritten, or we're not
|
|
|
* using unwritten extents. Skip over it.
|
|
|
*/
|
|
|
- ASSERT(bno >= mod);
|
|
|
- bno -= mod > del.br_blockcount ?
|
|
|
+ ASSERT(end >= mod);
|
|
|
+ end -= mod > del.br_blockcount ?
|
|
|
del.br_blockcount : mod;
|
|
|
- if (bno < got.br_startoff) {
|
|
|
- if (--lastx >= 0)
|
|
|
- xfs_bmbt_get_all(xfs_iext_get_ext(
|
|
|
- ifp, lastx), &got);
|
|
|
+ if (end < got.br_startoff &&
|
|
|
+ !xfs_iext_prev_extent(ifp, &icur, &got)) {
|
|
|
+ done = true;
|
|
|
+ break;
|
|
|
}
|
|
|
continue;
|
|
|
}
|
|
@@ -5609,7 +5275,7 @@ __xfs_bunmapi(
|
|
|
}
|
|
|
del.br_state = XFS_EXT_UNWRITTEN;
|
|
|
error = xfs_bmap_add_extent_unwritten_real(tp, ip,
|
|
|
- whichfork, &lastx, &cur, &del,
|
|
|
+ whichfork, &icur, &cur, &del,
|
|
|
firstblock, dfops, &logflags);
|
|
|
if (error)
|
|
|
goto error0;
|
|
@@ -5634,10 +5300,13 @@ __xfs_bunmapi(
|
|
|
* Can't make it unwritten. There isn't
|
|
|
* a full extent here so just skip it.
|
|
|
*/
|
|
|
- ASSERT(bno >= del.br_blockcount);
|
|
|
- bno -= del.br_blockcount;
|
|
|
- if (got.br_startoff > bno && --lastx >= 0)
|
|
|
- xfs_iext_get_extent(ifp, lastx, &got);
|
|
|
+ ASSERT(end >= del.br_blockcount);
|
|
|
+ end -= del.br_blockcount;
|
|
|
+ if (got.br_startoff > end &&
|
|
|
+ !xfs_iext_prev_extent(ifp, &icur, &got)) {
|
|
|
+ done = true;
|
|
|
+ break;
|
|
|
+ }
|
|
|
continue;
|
|
|
} else if (del.br_state == XFS_EXT_UNWRITTEN) {
|
|
|
struct xfs_bmbt_irec prev;
|
|
@@ -5648,8 +5317,8 @@ __xfs_bunmapi(
|
|
|
* Unwrite the killed part of that one and
|
|
|
* try again.
|
|
|
*/
|
|
|
- ASSERT(lastx > 0);
|
|
|
- xfs_iext_get_extent(ifp, lastx - 1, &prev);
|
|
|
+ if (!xfs_iext_prev_extent(ifp, &icur, &prev))
|
|
|
+ ASSERT(0);
|
|
|
ASSERT(prev.br_state == XFS_EXT_NORM);
|
|
|
ASSERT(!isnullstartblock(prev.br_startblock));
|
|
|
ASSERT(del.br_startblock ==
|
|
@@ -5661,9 +5330,8 @@ __xfs_bunmapi(
|
|
|
prev.br_startoff = start;
|
|
|
}
|
|
|
prev.br_state = XFS_EXT_UNWRITTEN;
|
|
|
- lastx--;
|
|
|
error = xfs_bmap_add_extent_unwritten_real(tp,
|
|
|
- ip, whichfork, &lastx, &cur,
|
|
|
+ ip, whichfork, &icur, &cur,
|
|
|
&prev, firstblock, dfops,
|
|
|
&logflags);
|
|
|
if (error)
|
|
@@ -5673,7 +5341,7 @@ __xfs_bunmapi(
|
|
|
ASSERT(del.br_state == XFS_EXT_NORM);
|
|
|
del.br_state = XFS_EXT_UNWRITTEN;
|
|
|
error = xfs_bmap_add_extent_unwritten_real(tp,
|
|
|
- ip, whichfork, &lastx, &cur,
|
|
|
+ ip, whichfork, &icur, &cur,
|
|
|
&del, firstblock, dfops,
|
|
|
&logflags);
|
|
|
if (error)
|
|
@@ -5682,85 +5350,39 @@ __xfs_bunmapi(
|
|
|
}
|
|
|
}
|
|
|
|
|
|
- /*
|
|
|
- * If it's the case where the directory code is running
|
|
|
- * with no block reservation, and the deleted block is in
|
|
|
- * the middle of its extent, and the resulting insert
|
|
|
- * of an extent would cause transformation to btree format,
|
|
|
- * then reject it. The calling code will then swap
|
|
|
- * blocks around instead.
|
|
|
- * We have to do this now, rather than waiting for the
|
|
|
- * conversion to btree format, since the transaction
|
|
|
- * will be dirty.
|
|
|
- */
|
|
|
- if (!wasdel && tp->t_blk_res == 0 &&
|
|
|
- XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS &&
|
|
|
- XFS_IFORK_NEXTENTS(ip, whichfork) >= /* Note the >= */
|
|
|
- XFS_IFORK_MAXEXT(ip, whichfork) &&
|
|
|
- del.br_startoff > got.br_startoff &&
|
|
|
- del.br_startoff + del.br_blockcount <
|
|
|
- got.br_startoff + got.br_blockcount) {
|
|
|
- error = -ENOSPC;
|
|
|
- goto error0;
|
|
|
+ if (wasdel) {
|
|
|
+ error = xfs_bmap_del_extent_delay(ip, whichfork, &icur,
|
|
|
+ &got, &del);
|
|
|
+ } else {
|
|
|
+ error = xfs_bmap_del_extent_real(ip, tp, &icur, dfops,
|
|
|
+ cur, &del, &tmp_logflags, whichfork,
|
|
|
+ flags);
|
|
|
+ logflags |= tmp_logflags;
|
|
|
}
|
|
|
|
|
|
- /*
|
|
|
- * Unreserve quota and update realtime free space, if
|
|
|
- * appropriate. If delayed allocation, update the inode delalloc
|
|
|
- * counter now and wait to update the sb counters as
|
|
|
- * xfs_bmap_del_extent() might need to borrow some blocks.
|
|
|
- */
|
|
|
- if (wasdel) {
|
|
|
- ASSERT(startblockval(del.br_startblock) > 0);
|
|
|
- if (isrt) {
|
|
|
- xfs_filblks_t rtexts;
|
|
|
-
|
|
|
- rtexts = XFS_FSB_TO_B(mp, del.br_blockcount);
|
|
|
- do_div(rtexts, mp->m_sb.sb_rextsize);
|
|
|
- xfs_mod_frextents(mp, (int64_t)rtexts);
|
|
|
- (void)xfs_trans_reserve_quota_nblks(NULL,
|
|
|
- ip, -((long)del.br_blockcount), 0,
|
|
|
- XFS_QMOPT_RES_RTBLKS);
|
|
|
- } else {
|
|
|
- (void)xfs_trans_reserve_quota_nblks(NULL,
|
|
|
- ip, -((long)del.br_blockcount), 0,
|
|
|
- XFS_QMOPT_RES_REGBLKS);
|
|
|
- }
|
|
|
- ip->i_delayed_blks -= del.br_blockcount;
|
|
|
- if (cur)
|
|
|
- cur->bc_private.b.flags |=
|
|
|
- XFS_BTCUR_BPRV_WASDEL;
|
|
|
- } else if (cur)
|
|
|
- cur->bc_private.b.flags &= ~XFS_BTCUR_BPRV_WASDEL;
|
|
|
-
|
|
|
- error = xfs_bmap_del_extent(ip, tp, &lastx, dfops, cur, &del,
|
|
|
- &tmp_logflags, whichfork, flags);
|
|
|
- logflags |= tmp_logflags;
|
|
|
if (error)
|
|
|
goto error0;
|
|
|
|
|
|
- if (!isrt && wasdel)
|
|
|
- xfs_mod_fdblocks(mp, (int64_t)del.br_blockcount, false);
|
|
|
-
|
|
|
max_len -= del.br_blockcount;
|
|
|
- bno = del.br_startoff - 1;
|
|
|
+ end = del.br_startoff - 1;
|
|
|
nodelete:
|
|
|
/*
|
|
|
* If not done go on to the next (previous) record.
|
|
|
*/
|
|
|
- if (bno != (xfs_fileoff_t)-1 && bno >= start) {
|
|
|
- if (lastx >= 0) {
|
|
|
- xfs_iext_get_extent(ifp, lastx, &got);
|
|
|
- if (got.br_startoff > bno && --lastx >= 0)
|
|
|
- xfs_iext_get_extent(ifp, lastx, &got);
|
|
|
+ if (end != (xfs_fileoff_t)-1 && end >= start) {
|
|
|
+ if (!xfs_iext_get_extent(ifp, &icur, &got) ||
|
|
|
+ (got.br_startoff > end &&
|
|
|
+ !xfs_iext_prev_extent(ifp, &icur, &got))) {
|
|
|
+ done = true;
|
|
|
+ break;
|
|
|
}
|
|
|
extno++;
|
|
|
}
|
|
|
}
|
|
|
- if (bno == (xfs_fileoff_t)-1 || bno < start || lastx < 0)
|
|
|
+ if (done || end == (xfs_fileoff_t)-1 || end < start)
|
|
|
*rlen = 0;
|
|
|
else
|
|
|
- *rlen = bno - start + 1;
|
|
|
+ *rlen = end - start + 1;
|
|
|
|
|
|
/*
|
|
|
* Convert to a btree if necessary.
|
|
@@ -5878,14 +5500,13 @@ xfs_bmse_merge(
|
|
|
struct xfs_inode *ip,
|
|
|
int whichfork,
|
|
|
xfs_fileoff_t shift, /* shift fsb */
|
|
|
- int current_ext, /* idx of gotp */
|
|
|
+ struct xfs_iext_cursor *icur,
|
|
|
struct xfs_bmbt_irec *got, /* extent to shift */
|
|
|
struct xfs_bmbt_irec *left, /* preceding extent */
|
|
|
struct xfs_btree_cur *cur,
|
|
|
int *logflags, /* output */
|
|
|
struct xfs_defer_ops *dfops)
|
|
|
{
|
|
|
- struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
|
|
|
struct xfs_bmbt_irec new;
|
|
|
xfs_filblks_t blockcount;
|
|
|
int error, i;
|
|
@@ -5913,8 +5534,7 @@ xfs_bmse_merge(
|
|
|
}
|
|
|
|
|
|
/* lookup and remove the extent to merge */
|
|
|
- error = xfs_bmbt_lookup_eq(cur, got->br_startoff, got->br_startblock,
|
|
|
- got->br_blockcount, &i);
|
|
|
+ error = xfs_bmbt_lookup_eq(cur, got, &i);
|
|
|
if (error)
|
|
|
return error;
|
|
|
XFS_WANT_CORRUPTED_RETURN(mp, i == 1);
|
|
@@ -5925,20 +5545,20 @@ xfs_bmse_merge(
|
|
|
XFS_WANT_CORRUPTED_RETURN(mp, i == 1);
|
|
|
|
|
|
/* lookup and update size of the previous extent */
|
|
|
- error = xfs_bmbt_lookup_eq(cur, left->br_startoff, left->br_startblock,
|
|
|
- left->br_blockcount, &i);
|
|
|
+ error = xfs_bmbt_lookup_eq(cur, left, &i);
|
|
|
if (error)
|
|
|
return error;
|
|
|
XFS_WANT_CORRUPTED_RETURN(mp, i == 1);
|
|
|
|
|
|
- error = xfs_bmbt_update(cur, new.br_startoff, new.br_startblock,
|
|
|
- new.br_blockcount, new.br_state);
|
|
|
+ error = xfs_bmbt_update(cur, &new);
|
|
|
if (error)
|
|
|
return error;
|
|
|
|
|
|
done:
|
|
|
- xfs_iext_update_extent(ifp, current_ext - 1, &new);
|
|
|
- xfs_iext_remove(ip, current_ext, 1, 0);
|
|
|
+ xfs_iext_remove(ip, icur, 0);
|
|
|
+ xfs_iext_prev(XFS_IFORK_PTR(ip, whichfork), icur);
|
|
|
+ xfs_iext_update_extent(ip, xfs_bmap_fork_to_state(whichfork), icur,
|
|
|
+ &new);
|
|
|
|
|
|
/* update reverse mapping. rmap functions merge the rmaps for us */
|
|
|
error = xfs_rmap_unmap_extent(mp, dfops, ip, whichfork, got);
|
|
@@ -5949,183 +5569,83 @@ done:
|
|
|
return xfs_rmap_map_extent(mp, dfops, ip, whichfork, &new);
|
|
|
}
|
|
|
|
|
|
-/*
|
|
|
- * Shift a single extent.
|
|
|
- */
|
|
|
-STATIC int
|
|
|
-xfs_bmse_shift_one(
|
|
|
- struct xfs_inode *ip,
|
|
|
- int whichfork,
|
|
|
- xfs_fileoff_t offset_shift_fsb,
|
|
|
- int *current_ext,
|
|
|
- struct xfs_bmbt_irec *got,
|
|
|
- struct xfs_btree_cur *cur,
|
|
|
- int *logflags,
|
|
|
- enum shift_direction direction,
|
|
|
- struct xfs_defer_ops *dfops)
|
|
|
+static int
|
|
|
+xfs_bmap_shift_update_extent(
|
|
|
+ struct xfs_inode *ip,
|
|
|
+ int whichfork,
|
|
|
+ struct xfs_iext_cursor *icur,
|
|
|
+ struct xfs_bmbt_irec *got,
|
|
|
+ struct xfs_btree_cur *cur,
|
|
|
+ int *logflags,
|
|
|
+ struct xfs_defer_ops *dfops,
|
|
|
+ xfs_fileoff_t startoff)
|
|
|
{
|
|
|
- struct xfs_ifork *ifp;
|
|
|
- struct xfs_mount *mp;
|
|
|
- xfs_fileoff_t startoff;
|
|
|
- struct xfs_bmbt_irec adj_irec, new;
|
|
|
- int error;
|
|
|
- int i;
|
|
|
- int total_extents;
|
|
|
-
|
|
|
- mp = ip->i_mount;
|
|
|
- ifp = XFS_IFORK_PTR(ip, whichfork);
|
|
|
- total_extents = xfs_iext_count(ifp);
|
|
|
-
|
|
|
- /* delalloc extents should be prevented by caller */
|
|
|
- XFS_WANT_CORRUPTED_RETURN(mp, !isnullstartblock(got->br_startblock));
|
|
|
-
|
|
|
- if (direction == SHIFT_LEFT) {
|
|
|
- startoff = got->br_startoff - offset_shift_fsb;
|
|
|
-
|
|
|
- /*
|
|
|
- * Check for merge if we've got an extent to the left,
|
|
|
- * otherwise make sure there's enough room at the start
|
|
|
- * of the file for the shift.
|
|
|
- */
|
|
|
- if (!*current_ext) {
|
|
|
- if (got->br_startoff < offset_shift_fsb)
|
|
|
- return -EINVAL;
|
|
|
- goto update_current_ext;
|
|
|
- }
|
|
|
-
|
|
|
- /*
|
|
|
- * grab the left extent and check for a large enough hole.
|
|
|
- */
|
|
|
- xfs_iext_get_extent(ifp, *current_ext - 1, &adj_irec);
|
|
|
- if (startoff < adj_irec.br_startoff + adj_irec.br_blockcount)
|
|
|
- return -EINVAL;
|
|
|
-
|
|
|
- /* check whether to merge the extent or shift it down */
|
|
|
- if (xfs_bmse_can_merge(&adj_irec, got, offset_shift_fsb)) {
|
|
|
- return xfs_bmse_merge(ip, whichfork, offset_shift_fsb,
|
|
|
- *current_ext, got, &adj_irec,
|
|
|
- cur, logflags, dfops);
|
|
|
- }
|
|
|
- } else {
|
|
|
- startoff = got->br_startoff + offset_shift_fsb;
|
|
|
- /* nothing to move if this is the last extent */
|
|
|
- if (*current_ext >= (total_extents - 1))
|
|
|
- goto update_current_ext;
|
|
|
-
|
|
|
- /*
|
|
|
- * If this is not the last extent in the file, make sure there
|
|
|
- * is enough room between current extent and next extent for
|
|
|
- * accommodating the shift.
|
|
|
- */
|
|
|
- xfs_iext_get_extent(ifp, *current_ext + 1, &adj_irec);
|
|
|
- if (startoff + got->br_blockcount > adj_irec.br_startoff)
|
|
|
- return -EINVAL;
|
|
|
-
|
|
|
- /*
|
|
|
- * Unlike a left shift (which involves a hole punch),
|
|
|
- * a right shift does not modify extent neighbors
|
|
|
- * in any way. We should never find mergeable extents
|
|
|
- * in this scenario. Check anyways and warn if we
|
|
|
- * encounter two extents that could be one.
|
|
|
- */
|
|
|
- if (xfs_bmse_can_merge(got, &adj_irec, offset_shift_fsb))
|
|
|
- WARN_ON_ONCE(1);
|
|
|
- }
|
|
|
+ struct xfs_mount *mp = ip->i_mount;
|
|
|
+ struct xfs_bmbt_irec prev = *got;
|
|
|
+ int error, i;
|
|
|
|
|
|
- /*
|
|
|
- * Increment the extent index for the next iteration, update the start
|
|
|
- * offset of the in-core extent and update the btree if applicable.
|
|
|
- */
|
|
|
-update_current_ext:
|
|
|
*logflags |= XFS_ILOG_CORE;
|
|
|
|
|
|
- new = *got;
|
|
|
- new.br_startoff = startoff;
|
|
|
+ got->br_startoff = startoff;
|
|
|
|
|
|
if (cur) {
|
|
|
- error = xfs_bmbt_lookup_eq(cur, got->br_startoff,
|
|
|
- got->br_startblock, got->br_blockcount, &i);
|
|
|
+ error = xfs_bmbt_lookup_eq(cur, &prev, &i);
|
|
|
if (error)
|
|
|
return error;
|
|
|
XFS_WANT_CORRUPTED_RETURN(mp, i == 1);
|
|
|
|
|
|
- error = xfs_bmbt_update(cur, new.br_startoff,
|
|
|
- new.br_startblock, new.br_blockcount,
|
|
|
- new.br_state);
|
|
|
+ error = xfs_bmbt_update(cur, got);
|
|
|
if (error)
|
|
|
return error;
|
|
|
} else {
|
|
|
*logflags |= XFS_ILOG_DEXT;
|
|
|
}
|
|
|
|
|
|
- xfs_iext_update_extent(ifp, *current_ext, &new);
|
|
|
-
|
|
|
- if (direction == SHIFT_LEFT)
|
|
|
- (*current_ext)++;
|
|
|
- else
|
|
|
- (*current_ext)--;
|
|
|
+ xfs_iext_update_extent(ip, xfs_bmap_fork_to_state(whichfork), icur,
|
|
|
+ got);
|
|
|
|
|
|
/* update reverse mapping */
|
|
|
- error = xfs_rmap_unmap_extent(mp, dfops, ip, whichfork, got);
|
|
|
+ error = xfs_rmap_unmap_extent(mp, dfops, ip, whichfork, &prev);
|
|
|
if (error)
|
|
|
return error;
|
|
|
- return xfs_rmap_map_extent(mp, dfops, ip, whichfork, &new);
|
|
|
+ return xfs_rmap_map_extent(mp, dfops, ip, whichfork, got);
|
|
|
}
|
|
|
|
|
|
-/*
|
|
|
- * Shift extent records to the left/right to cover/create a hole.
|
|
|
- *
|
|
|
- * The maximum number of extents to be shifted in a single operation is
|
|
|
- * @num_exts. @stop_fsb specifies the file offset at which to stop shift and the
|
|
|
- * file offset where we've left off is returned in @next_fsb. @offset_shift_fsb
|
|
|
- * is the length by which each extent is shifted. If there is no hole to shift
|
|
|
- * the extents into, this will be considered invalid operation and we abort
|
|
|
- * immediately.
|
|
|
- */
|
|
|
int
|
|
|
-xfs_bmap_shift_extents(
|
|
|
+xfs_bmap_collapse_extents(
|
|
|
struct xfs_trans *tp,
|
|
|
struct xfs_inode *ip,
|
|
|
xfs_fileoff_t *next_fsb,
|
|
|
xfs_fileoff_t offset_shift_fsb,
|
|
|
- int *done,
|
|
|
+ bool *done,
|
|
|
xfs_fileoff_t stop_fsb,
|
|
|
xfs_fsblock_t *firstblock,
|
|
|
- struct xfs_defer_ops *dfops,
|
|
|
- enum shift_direction direction,
|
|
|
- int num_exts)
|
|
|
+ struct xfs_defer_ops *dfops)
|
|
|
{
|
|
|
- struct xfs_btree_cur *cur = NULL;
|
|
|
- struct xfs_bmbt_irec got;
|
|
|
- struct xfs_mount *mp = ip->i_mount;
|
|
|
- struct xfs_ifork *ifp;
|
|
|
- xfs_extnum_t nexts = 0;
|
|
|
- xfs_extnum_t current_ext;
|
|
|
- xfs_extnum_t total_extents;
|
|
|
- xfs_extnum_t stop_extent;
|
|
|
- int error = 0;
|
|
|
- int whichfork = XFS_DATA_FORK;
|
|
|
- int logflags = 0;
|
|
|
+ int whichfork = XFS_DATA_FORK;
|
|
|
+ struct xfs_mount *mp = ip->i_mount;
|
|
|
+ struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
|
|
|
+ struct xfs_btree_cur *cur = NULL;
|
|
|
+ struct xfs_bmbt_irec got, prev;
|
|
|
+ struct xfs_iext_cursor icur;
|
|
|
+ xfs_fileoff_t new_startoff;
|
|
|
+ int error = 0;
|
|
|
+ int logflags = 0;
|
|
|
|
|
|
if (unlikely(XFS_TEST_ERROR(
|
|
|
(XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS &&
|
|
|
XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE),
|
|
|
mp, XFS_ERRTAG_BMAPIFORMAT))) {
|
|
|
- XFS_ERROR_REPORT("xfs_bmap_shift_extents",
|
|
|
- XFS_ERRLEVEL_LOW, mp);
|
|
|
+ XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, mp);
|
|
|
return -EFSCORRUPTED;
|
|
|
}
|
|
|
|
|
|
if (XFS_FORCED_SHUTDOWN(mp))
|
|
|
return -EIO;
|
|
|
|
|
|
- ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
|
|
|
- ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
|
|
|
- ASSERT(direction == SHIFT_LEFT || direction == SHIFT_RIGHT);
|
|
|
+ ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL));
|
|
|
|
|
|
- ifp = XFS_IFORK_PTR(ip, whichfork);
|
|
|
if (!(ifp->if_flags & XFS_IFEXTENTS)) {
|
|
|
- /* Read in all the extents */
|
|
|
error = xfs_iread_extents(tp, ip, whichfork);
|
|
|
if (error)
|
|
|
return error;
|
|
@@ -6138,107 +5658,165 @@ xfs_bmap_shift_extents(
|
|
|
cur->bc_private.b.flags = 0;
|
|
|
}
|
|
|
|
|
|
- /*
|
|
|
- * There may be delalloc extents in the data fork before the range we
|
|
|
- * are collapsing out, so we cannot use the count of real extents here.
|
|
|
- * Instead we have to calculate it from the incore fork.
|
|
|
- */
|
|
|
- total_extents = xfs_iext_count(ifp);
|
|
|
- if (total_extents == 0) {
|
|
|
- *done = 1;
|
|
|
+ if (!xfs_iext_lookup_extent(ip, ifp, *next_fsb, &icur, &got)) {
|
|
|
+ *done = true;
|
|
|
goto del_cursor;
|
|
|
}
|
|
|
+ XFS_WANT_CORRUPTED_RETURN(mp, !isnullstartblock(got.br_startblock));
|
|
|
|
|
|
- /*
|
|
|
- * In case of first right shift, we need to initialize next_fsb
|
|
|
- */
|
|
|
- if (*next_fsb == NULLFSBLOCK) {
|
|
|
- ASSERT(direction == SHIFT_RIGHT);
|
|
|
-
|
|
|
- current_ext = total_extents - 1;
|
|
|
- xfs_iext_get_extent(ifp, current_ext, &got);
|
|
|
- if (stop_fsb > got.br_startoff) {
|
|
|
- *done = 1;
|
|
|
+ new_startoff = got.br_startoff - offset_shift_fsb;
|
|
|
+ if (xfs_iext_peek_prev_extent(ifp, &icur, &prev)) {
|
|
|
+ if (new_startoff < prev.br_startoff + prev.br_blockcount) {
|
|
|
+ error = -EINVAL;
|
|
|
goto del_cursor;
|
|
|
}
|
|
|
- *next_fsb = got.br_startoff;
|
|
|
+
|
|
|
+ if (xfs_bmse_can_merge(&prev, &got, offset_shift_fsb)) {
|
|
|
+ error = xfs_bmse_merge(ip, whichfork, offset_shift_fsb,
|
|
|
+ &icur, &got, &prev, cur, &logflags,
|
|
|
+ dfops);
|
|
|
+ if (error)
|
|
|
+ goto del_cursor;
|
|
|
+ goto done;
|
|
|
+ }
|
|
|
} else {
|
|
|
- /*
|
|
|
- * Look up the extent index for the fsb where we start shifting. We can
|
|
|
- * henceforth iterate with current_ext as extent list changes are locked
|
|
|
- * out via ilock.
|
|
|
- *
|
|
|
- * If next_fsb lies in a hole beyond which there are no extents we are
|
|
|
- * done.
|
|
|
- */
|
|
|
- if (!xfs_iext_lookup_extent(ip, ifp, *next_fsb, ¤t_ext,
|
|
|
- &got)) {
|
|
|
- *done = 1;
|
|
|
+ if (got.br_startoff < offset_shift_fsb) {
|
|
|
+ error = -EINVAL;
|
|
|
goto del_cursor;
|
|
|
}
|
|
|
}
|
|
|
|
|
|
- /* Lookup the extent index at which we have to stop */
|
|
|
- if (direction == SHIFT_RIGHT) {
|
|
|
- struct xfs_bmbt_irec s;
|
|
|
+ error = xfs_bmap_shift_update_extent(ip, whichfork, &icur, &got, cur,
|
|
|
+ &logflags, dfops, new_startoff);
|
|
|
+ if (error)
|
|
|
+ goto del_cursor;
|
|
|
+
|
|
|
+done:
|
|
|
+ if (!xfs_iext_next_extent(ifp, &icur, &got)) {
|
|
|
+ *done = true;
|
|
|
+ goto del_cursor;
|
|
|
+ }
|
|
|
+
|
|
|
+ *next_fsb = got.br_startoff;
|
|
|
+del_cursor:
|
|
|
+ if (cur)
|
|
|
+ xfs_btree_del_cursor(cur,
|
|
|
+ error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR);
|
|
|
+ if (logflags)
|
|
|
+ xfs_trans_log_inode(tp, ip, logflags);
|
|
|
+ return error;
|
|
|
+}
|
|
|
+
|
|
|
+int
|
|
|
+xfs_bmap_insert_extents(
|
|
|
+ struct xfs_trans *tp,
|
|
|
+ struct xfs_inode *ip,
|
|
|
+ xfs_fileoff_t *next_fsb,
|
|
|
+ xfs_fileoff_t offset_shift_fsb,
|
|
|
+ bool *done,
|
|
|
+ xfs_fileoff_t stop_fsb,
|
|
|
+ xfs_fsblock_t *firstblock,
|
|
|
+ struct xfs_defer_ops *dfops)
|
|
|
+{
|
|
|
+ int whichfork = XFS_DATA_FORK;
|
|
|
+ struct xfs_mount *mp = ip->i_mount;
|
|
|
+ struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
|
|
|
+ struct xfs_btree_cur *cur = NULL;
|
|
|
+ struct xfs_bmbt_irec got, next;
|
|
|
+ struct xfs_iext_cursor icur;
|
|
|
+ xfs_fileoff_t new_startoff;
|
|
|
+ int error = 0;
|
|
|
+ int logflags = 0;
|
|
|
+
|
|
|
+ if (unlikely(XFS_TEST_ERROR(
|
|
|
+ (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS &&
|
|
|
+ XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE),
|
|
|
+ mp, XFS_ERRTAG_BMAPIFORMAT))) {
|
|
|
+ XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, mp);
|
|
|
+ return -EFSCORRUPTED;
|
|
|
+ }
|
|
|
+
|
|
|
+ if (XFS_FORCED_SHUTDOWN(mp))
|
|
|
+ return -EIO;
|
|
|
+
|
|
|
+ ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL));
|
|
|
+
|
|
|
+ if (!(ifp->if_flags & XFS_IFEXTENTS)) {
|
|
|
+ error = xfs_iread_extents(tp, ip, whichfork);
|
|
|
+ if (error)
|
|
|
+ return error;
|
|
|
+ }
|
|
|
+
|
|
|
+ if (ifp->if_flags & XFS_IFBROOT) {
|
|
|
+ cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
|
|
|
+ cur->bc_private.b.firstblock = *firstblock;
|
|
|
+ cur->bc_private.b.dfops = dfops;
|
|
|
+ cur->bc_private.b.flags = 0;
|
|
|
+ }
|
|
|
|
|
|
- xfs_iext_lookup_extent(ip, ifp, stop_fsb, &stop_extent, &s);
|
|
|
- /* Make stop_extent exclusive of shift range */
|
|
|
- stop_extent--;
|
|
|
- if (current_ext <= stop_extent) {
|
|
|
- error = -EIO;
|
|
|
+ if (*next_fsb == NULLFSBLOCK) {
|
|
|
+ xfs_iext_last(ifp, &icur);
|
|
|
+ if (!xfs_iext_get_extent(ifp, &icur, &got) ||
|
|
|
+ stop_fsb > got.br_startoff) {
|
|
|
+ *done = true;
|
|
|
goto del_cursor;
|
|
|
}
|
|
|
} else {
|
|
|
- stop_extent = total_extents;
|
|
|
- if (current_ext >= stop_extent) {
|
|
|
- error = -EIO;
|
|
|
+ if (!xfs_iext_lookup_extent(ip, ifp, *next_fsb, &icur, &got)) {
|
|
|
+ *done = true;
|
|
|
goto del_cursor;
|
|
|
}
|
|
|
}
|
|
|
+ XFS_WANT_CORRUPTED_RETURN(mp, !isnullstartblock(got.br_startblock));
|
|
|
|
|
|
- while (nexts++ < num_exts) {
|
|
|
- error = xfs_bmse_shift_one(ip, whichfork, offset_shift_fsb,
|
|
|
- ¤t_ext, &got, cur, &logflags,
|
|
|
- direction, dfops);
|
|
|
- if (error)
|
|
|
+ if (stop_fsb >= got.br_startoff + got.br_blockcount) {
|
|
|
+ error = -EIO;
|
|
|
+ goto del_cursor;
|
|
|
+ }
|
|
|
+
|
|
|
+ new_startoff = got.br_startoff + offset_shift_fsb;
|
|
|
+ if (xfs_iext_peek_next_extent(ifp, &icur, &next)) {
|
|
|
+ if (new_startoff + got.br_blockcount > next.br_startoff) {
|
|
|
+ error = -EINVAL;
|
|
|
goto del_cursor;
|
|
|
- /*
|
|
|
- * If there was an extent merge during the shift, the extent
|
|
|
- * count can change. Update the total and grade the next record.
|
|
|
- */
|
|
|
- if (direction == SHIFT_LEFT) {
|
|
|
- total_extents = xfs_iext_count(ifp);
|
|
|
- stop_extent = total_extents;
|
|
|
}
|
|
|
|
|
|
- if (current_ext == stop_extent) {
|
|
|
- *done = 1;
|
|
|
- *next_fsb = NULLFSBLOCK;
|
|
|
- break;
|
|
|
- }
|
|
|
- xfs_iext_get_extent(ifp, current_ext, &got);
|
|
|
+ /*
|
|
|
+ * Unlike a left shift (which involves a hole punch), a right
|
|
|
+ * shift does not modify extent neighbors in any way. We should
|
|
|
+ * never find mergeable extents in this scenario. Check anyways
|
|
|
+ * and warn if we encounter two extents that could be one.
|
|
|
+ */
|
|
|
+ if (xfs_bmse_can_merge(&got, &next, offset_shift_fsb))
|
|
|
+ WARN_ON_ONCE(1);
|
|
|
}
|
|
|
|
|
|
- if (!*done)
|
|
|
- *next_fsb = got.br_startoff;
|
|
|
+ error = xfs_bmap_shift_update_extent(ip, whichfork, &icur, &got, cur,
|
|
|
+ &logflags, dfops, new_startoff);
|
|
|
+ if (error)
|
|
|
+ goto del_cursor;
|
|
|
+
|
|
|
+ if (!xfs_iext_prev_extent(ifp, &icur, &got) ||
|
|
|
+ stop_fsb >= got.br_startoff + got.br_blockcount) {
|
|
|
+ *done = true;
|
|
|
+ goto del_cursor;
|
|
|
+ }
|
|
|
|
|
|
+ *next_fsb = got.br_startoff;
|
|
|
del_cursor:
|
|
|
if (cur)
|
|
|
xfs_btree_del_cursor(cur,
|
|
|
error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR);
|
|
|
-
|
|
|
if (logflags)
|
|
|
xfs_trans_log_inode(tp, ip, logflags);
|
|
|
-
|
|
|
return error;
|
|
|
}
|
|
|
|
|
|
/*
|
|
|
- * Splits an extent into two extents at split_fsb block such that it is
|
|
|
- * the first block of the current_ext. @current_ext is a target extent
|
|
|
- * to be split. @split_fsb is a block where the extents is split.
|
|
|
- * If split_fsb lies in a hole or the first block of extents, just return 0.
|
|
|
+ * Splits an extent into two extents at split_fsb block such that it is the
|
|
|
+ * first block of the current_ext. @ext is a target extent to be split.
|
|
|
+ * @split_fsb is a block where the extents is split. If split_fsb lies in a
|
|
|
+ * hole or the first block of extents, just return 0.
|
|
|
*/
|
|
|
STATIC int
|
|
|
xfs_bmap_split_extent_at(
|
|
@@ -6255,7 +5833,7 @@ xfs_bmap_split_extent_at(
|
|
|
struct xfs_mount *mp = ip->i_mount;
|
|
|
struct xfs_ifork *ifp;
|
|
|
xfs_fsblock_t gotblkcnt; /* new block count for got */
|
|
|
- xfs_extnum_t current_ext;
|
|
|
+ struct xfs_iext_cursor icur;
|
|
|
int error = 0;
|
|
|
int logflags = 0;
|
|
|
int i = 0;
|
|
@@ -6283,7 +5861,7 @@ xfs_bmap_split_extent_at(
|
|
|
/*
|
|
|
* If there are not extents, or split_fsb lies in a hole we are done.
|
|
|
*/
|
|
|
- if (!xfs_iext_lookup_extent(ip, ifp, split_fsb, ¤t_ext, &got) ||
|
|
|
+ if (!xfs_iext_lookup_extent(ip, ifp, split_fsb, &icur, &got) ||
|
|
|
got.br_startoff >= split_fsb)
|
|
|
return 0;
|
|
|
|
|
@@ -6298,44 +5876,35 @@ xfs_bmap_split_extent_at(
|
|
|
cur->bc_private.b.firstblock = *firstfsb;
|
|
|
cur->bc_private.b.dfops = dfops;
|
|
|
cur->bc_private.b.flags = 0;
|
|
|
- error = xfs_bmbt_lookup_eq(cur, got.br_startoff,
|
|
|
- got.br_startblock,
|
|
|
- got.br_blockcount,
|
|
|
- &i);
|
|
|
+ error = xfs_bmbt_lookup_eq(cur, &got, &i);
|
|
|
if (error)
|
|
|
goto del_cursor;
|
|
|
XFS_WANT_CORRUPTED_GOTO(mp, i == 1, del_cursor);
|
|
|
}
|
|
|
|
|
|
got.br_blockcount = gotblkcnt;
|
|
|
- xfs_iext_update_extent(ifp, current_ext, &got);
|
|
|
+ xfs_iext_update_extent(ip, xfs_bmap_fork_to_state(whichfork), &icur,
|
|
|
+ &got);
|
|
|
|
|
|
logflags = XFS_ILOG_CORE;
|
|
|
if (cur) {
|
|
|
- error = xfs_bmbt_update(cur, got.br_startoff,
|
|
|
- got.br_startblock,
|
|
|
- got.br_blockcount,
|
|
|
- got.br_state);
|
|
|
+ error = xfs_bmbt_update(cur, &got);
|
|
|
if (error)
|
|
|
goto del_cursor;
|
|
|
} else
|
|
|
logflags |= XFS_ILOG_DEXT;
|
|
|
|
|
|
/* Add new extent */
|
|
|
- current_ext++;
|
|
|
- xfs_iext_insert(ip, current_ext, 1, &new, 0);
|
|
|
+ xfs_iext_next(ifp, &icur);
|
|
|
+ xfs_iext_insert(ip, &icur, &new, 0);
|
|
|
XFS_IFORK_NEXT_SET(ip, whichfork,
|
|
|
XFS_IFORK_NEXTENTS(ip, whichfork) + 1);
|
|
|
|
|
|
if (cur) {
|
|
|
- error = xfs_bmbt_lookup_eq(cur, new.br_startoff,
|
|
|
- new.br_startblock, new.br_blockcount,
|
|
|
- &i);
|
|
|
+ error = xfs_bmbt_lookup_eq(cur, &new, &i);
|
|
|
if (error)
|
|
|
goto del_cursor;
|
|
|
XFS_WANT_CORRUPTED_GOTO(mp, i == 0, del_cursor);
|
|
|
- cur->bc_rec.b.br_state = new.br_state;
|
|
|
-
|
|
|
error = xfs_btree_insert(cur, &i);
|
|
|
if (error)
|
|
|
goto del_cursor;
|