|
@@ -57,75 +57,35 @@ xfs_ialloc_cluster_alignment(
|
|
|
}
|
|
|
|
|
|
/*
|
|
|
- * Lookup the record equal to ino in the btree given by cur.
|
|
|
- */
|
|
|
-STATIC int /* error */
|
|
|
-xfs_inobt_lookup_eq(
|
|
|
- struct xfs_btree_cur *cur, /* btree cursor */
|
|
|
- xfs_agino_t ino, /* starting inode of chunk */
|
|
|
- __int32_t fcnt, /* free inode count */
|
|
|
- xfs_inofree_t free, /* free inode mask */
|
|
|
- int *stat) /* success/failure */
|
|
|
-{
|
|
|
- cur->bc_rec.i.ir_startino = ino;
|
|
|
- cur->bc_rec.i.ir_freecount = fcnt;
|
|
|
- cur->bc_rec.i.ir_free = free;
|
|
|
- return xfs_btree_lookup(cur, XFS_LOOKUP_EQ, stat);
|
|
|
-}
|
|
|
-
|
|
|
-/*
|
|
|
- * Lookup the first record greater than or equal to ino
|
|
|
- * in the btree given by cur.
|
|
|
+ * Lookup a record by ino in the btree given by cur.
|
|
|
*/
|
|
|
int /* error */
|
|
|
-xfs_inobt_lookup_ge(
|
|
|
+xfs_inobt_lookup(
|
|
|
struct xfs_btree_cur *cur, /* btree cursor */
|
|
|
xfs_agino_t ino, /* starting inode of chunk */
|
|
|
- __int32_t fcnt, /* free inode count */
|
|
|
- xfs_inofree_t free, /* free inode mask */
|
|
|
+ xfs_lookup_t dir, /* <=, >=, == */
|
|
|
int *stat) /* success/failure */
|
|
|
{
|
|
|
cur->bc_rec.i.ir_startino = ino;
|
|
|
- cur->bc_rec.i.ir_freecount = fcnt;
|
|
|
- cur->bc_rec.i.ir_free = free;
|
|
|
- return xfs_btree_lookup(cur, XFS_LOOKUP_GE, stat);
|
|
|
+ cur->bc_rec.i.ir_freecount = 0;
|
|
|
+ cur->bc_rec.i.ir_free = 0;
|
|
|
+ return xfs_btree_lookup(cur, dir, stat);
|
|
|
}
|
|
|
|
|
|
/*
|
|
|
- * Lookup the first record less than or equal to ino
|
|
|
- * in the btree given by cur.
|
|
|
- */
|
|
|
-int /* error */
|
|
|
-xfs_inobt_lookup_le(
|
|
|
- struct xfs_btree_cur *cur, /* btree cursor */
|
|
|
- xfs_agino_t ino, /* starting inode of chunk */
|
|
|
- __int32_t fcnt, /* free inode count */
|
|
|
- xfs_inofree_t free, /* free inode mask */
|
|
|
- int *stat) /* success/failure */
|
|
|
-{
|
|
|
- cur->bc_rec.i.ir_startino = ino;
|
|
|
- cur->bc_rec.i.ir_freecount = fcnt;
|
|
|
- cur->bc_rec.i.ir_free = free;
|
|
|
- return xfs_btree_lookup(cur, XFS_LOOKUP_LE, stat);
|
|
|
-}
|
|
|
-
|
|
|
-/*
|
|
|
- * Update the record referred to by cur to the value given
|
|
|
- * by [ino, fcnt, free].
|
|
|
+ * Update the record referred to by cur to the value given.
|
|
|
* This either works (return 0) or gets an EFSCORRUPTED error.
|
|
|
*/
|
|
|
STATIC int /* error */
|
|
|
xfs_inobt_update(
|
|
|
struct xfs_btree_cur *cur, /* btree cursor */
|
|
|
- xfs_agino_t ino, /* starting inode of chunk */
|
|
|
- __int32_t fcnt, /* free inode count */
|
|
|
- xfs_inofree_t free) /* free inode mask */
|
|
|
+ xfs_inobt_rec_incore_t *irec) /* btree record */
|
|
|
{
|
|
|
union xfs_btree_rec rec;
|
|
|
|
|
|
- rec.inobt.ir_startino = cpu_to_be32(ino);
|
|
|
- rec.inobt.ir_freecount = cpu_to_be32(fcnt);
|
|
|
- rec.inobt.ir_free = cpu_to_be64(free);
|
|
|
+ rec.inobt.ir_startino = cpu_to_be32(irec->ir_startino);
|
|
|
+ rec.inobt.ir_freecount = cpu_to_be32(irec->ir_freecount);
|
|
|
+ rec.inobt.ir_free = cpu_to_be64(irec->ir_free);
|
|
|
return xfs_btree_update(cur, &rec);
|
|
|
}
|
|
|
|
|
@@ -135,9 +95,7 @@ xfs_inobt_update(
|
|
|
int /* error */
|
|
|
xfs_inobt_get_rec(
|
|
|
struct xfs_btree_cur *cur, /* btree cursor */
|
|
|
- xfs_agino_t *ino, /* output: starting inode of chunk */
|
|
|
- __int32_t *fcnt, /* output: number of free inodes */
|
|
|
- xfs_inofree_t *free, /* output: free inode mask */
|
|
|
+ xfs_inobt_rec_incore_t *irec, /* btree record */
|
|
|
int *stat) /* output: success/failure */
|
|
|
{
|
|
|
union xfs_btree_rec *rec;
|
|
@@ -145,13 +103,135 @@ xfs_inobt_get_rec(
|
|
|
|
|
|
error = xfs_btree_get_rec(cur, &rec, stat);
|
|
|
if (!error && *stat == 1) {
|
|
|
- *ino = be32_to_cpu(rec->inobt.ir_startino);
|
|
|
- *fcnt = be32_to_cpu(rec->inobt.ir_freecount);
|
|
|
- *free = be64_to_cpu(rec->inobt.ir_free);
|
|
|
+ irec->ir_startino = be32_to_cpu(rec->inobt.ir_startino);
|
|
|
+ irec->ir_freecount = be32_to_cpu(rec->inobt.ir_freecount);
|
|
|
+ irec->ir_free = be64_to_cpu(rec->inobt.ir_free);
|
|
|
}
|
|
|
return error;
|
|
|
}
|
|
|
|
|
|
+/*
|
|
|
+ * Verify that the number of free inodes in the AGI is correct.
|
|
|
+ */
|
|
|
+#ifdef DEBUG
|
|
|
+STATIC int
|
|
|
+xfs_check_agi_freecount(
|
|
|
+ struct xfs_btree_cur *cur,
|
|
|
+ struct xfs_agi *agi)
|
|
|
+{
|
|
|
+ if (cur->bc_nlevels == 1) {
|
|
|
+ xfs_inobt_rec_incore_t rec;
|
|
|
+ int freecount = 0;
|
|
|
+ int error;
|
|
|
+ int i;
|
|
|
+
|
|
|
+ error = xfs_inobt_lookup(cur, 0, XFS_LOOKUP_GE, &i);
|
|
|
+ if (error)
|
|
|
+ return error;
|
|
|
+
|
|
|
+ do {
|
|
|
+ error = xfs_inobt_get_rec(cur, &rec, &i);
|
|
|
+ if (error)
|
|
|
+ return error;
|
|
|
+
|
|
|
+ if (i) {
|
|
|
+ freecount += rec.ir_freecount;
|
|
|
+ error = xfs_btree_increment(cur, 0, &i);
|
|
|
+ if (error)
|
|
|
+ return error;
|
|
|
+ }
|
|
|
+ } while (i == 1);
|
|
|
+
|
|
|
+ if (!XFS_FORCED_SHUTDOWN(cur->bc_mp))
|
|
|
+ ASSERT(freecount == be32_to_cpu(agi->agi_freecount));
|
|
|
+ }
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+#else
|
|
|
+#define xfs_check_agi_freecount(cur, agi) 0
|
|
|
+#endif
|
|
|
+
|
|
|
+/*
|
|
|
+ * Initialise a new set of inodes.
|
|
|
+ */
|
|
|
+STATIC void
|
|
|
+xfs_ialloc_inode_init(
|
|
|
+ struct xfs_mount *mp,
|
|
|
+ struct xfs_trans *tp,
|
|
|
+ xfs_agnumber_t agno,
|
|
|
+ xfs_agblock_t agbno,
|
|
|
+ xfs_agblock_t length,
|
|
|
+ unsigned int gen)
|
|
|
+{
|
|
|
+ struct xfs_buf *fbuf;
|
|
|
+ struct xfs_dinode *free;
|
|
|
+ int blks_per_cluster, nbufs, ninodes;
|
|
|
+ int version;
|
|
|
+ int i, j;
|
|
|
+ xfs_daddr_t d;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Loop over the new block(s), filling in the inodes.
|
|
|
+ * For small block sizes, manipulate the inodes in buffers
|
|
|
+ * which are multiples of the blocks size.
|
|
|
+ */
|
|
|
+ if (mp->m_sb.sb_blocksize >= XFS_INODE_CLUSTER_SIZE(mp)) {
|
|
|
+ blks_per_cluster = 1;
|
|
|
+ nbufs = length;
|
|
|
+ ninodes = mp->m_sb.sb_inopblock;
|
|
|
+ } else {
|
|
|
+ blks_per_cluster = XFS_INODE_CLUSTER_SIZE(mp) /
|
|
|
+ mp->m_sb.sb_blocksize;
|
|
|
+ nbufs = length / blks_per_cluster;
|
|
|
+ ninodes = blks_per_cluster * mp->m_sb.sb_inopblock;
|
|
|
+ }
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Figure out what version number to use in the inodes we create.
|
|
|
+ * If the superblock version has caught up to the one that supports
|
|
|
+ * the new inode format, then use the new inode version. Otherwise
|
|
|
+ * use the old version so that old kernels will continue to be
|
|
|
+ * able to use the file system.
|
|
|
+ */
|
|
|
+ if (xfs_sb_version_hasnlink(&mp->m_sb))
|
|
|
+ version = 2;
|
|
|
+ else
|
|
|
+ version = 1;
|
|
|
+
|
|
|
+ for (j = 0; j < nbufs; j++) {
|
|
|
+ /*
|
|
|
+ * Get the block.
|
|
|
+ */
|
|
|
+ d = XFS_AGB_TO_DADDR(mp, agno, agbno + (j * blks_per_cluster));
|
|
|
+ fbuf = xfs_trans_get_buf(tp, mp->m_ddev_targp, d,
|
|
|
+ mp->m_bsize * blks_per_cluster,
|
|
|
+ XFS_BUF_LOCK);
|
|
|
+ ASSERT(fbuf);
|
|
|
+ ASSERT(!XFS_BUF_GETERROR(fbuf));
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Initialize all inodes in this buffer and then log them.
|
|
|
+ *
|
|
|
+ * XXX: It would be much better if we had just one transaction
|
|
|
+ * to log a whole cluster of inodes instead of all the
|
|
|
+ * individual transactions causing a lot of log traffic.
|
|
|
+ */
|
|
|
+ xfs_biozero(fbuf, 0, ninodes << mp->m_sb.sb_inodelog);
|
|
|
+ for (i = 0; i < ninodes; i++) {
|
|
|
+ int ioffset = i << mp->m_sb.sb_inodelog;
|
|
|
+ uint isize = sizeof(struct xfs_dinode);
|
|
|
+
|
|
|
+ free = xfs_make_iptr(mp, fbuf, i);
|
|
|
+ free->di_magic = cpu_to_be16(XFS_DINODE_MAGIC);
|
|
|
+ free->di_version = version;
|
|
|
+ free->di_gen = cpu_to_be32(gen);
|
|
|
+ free->di_next_unlinked = cpu_to_be32(NULLAGINO);
|
|
|
+ xfs_trans_log_buf(tp, fbuf, ioffset, ioffset + isize - 1);
|
|
|
+ }
|
|
|
+ xfs_trans_inode_alloc_buf(tp, fbuf);
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
/*
|
|
|
* Allocate new inodes in the allocation group specified by agbp.
|
|
|
* Return 0 for success, else error code.
|
|
@@ -164,24 +244,15 @@ xfs_ialloc_ag_alloc(
|
|
|
{
|
|
|
xfs_agi_t *agi; /* allocation group header */
|
|
|
xfs_alloc_arg_t args; /* allocation argument structure */
|
|
|
- int blks_per_cluster; /* fs blocks per inode cluster */
|
|
|
xfs_btree_cur_t *cur; /* inode btree cursor */
|
|
|
- xfs_daddr_t d; /* disk addr of buffer */
|
|
|
xfs_agnumber_t agno;
|
|
|
int error;
|
|
|
- xfs_buf_t *fbuf; /* new free inodes' buffer */
|
|
|
- xfs_dinode_t *free; /* new free inode structure */
|
|
|
- int i; /* inode counter */
|
|
|
- int j; /* block counter */
|
|
|
- int nbufs; /* num bufs of new inodes */
|
|
|
+ int i;
|
|
|
xfs_agino_t newino; /* new first inode's number */
|
|
|
xfs_agino_t newlen; /* new number of inodes */
|
|
|
- int ninodes; /* num inodes per buf */
|
|
|
xfs_agino_t thisino; /* current inode number, for loop */
|
|
|
- int version; /* inode version number to use */
|
|
|
int isaligned = 0; /* inode allocation at stripe unit */
|
|
|
/* boundary */
|
|
|
- unsigned int gen;
|
|
|
|
|
|
args.tp = tp;
|
|
|
args.mp = tp->t_mountp;
|
|
@@ -202,12 +273,12 @@ xfs_ialloc_ag_alloc(
|
|
|
*/
|
|
|
agi = XFS_BUF_TO_AGI(agbp);
|
|
|
newino = be32_to_cpu(agi->agi_newino);
|
|
|
+ agno = be32_to_cpu(agi->agi_seqno);
|
|
|
args.agbno = XFS_AGINO_TO_AGBNO(args.mp, newino) +
|
|
|
XFS_IALLOC_BLOCKS(args.mp);
|
|
|
if (likely(newino != NULLAGINO &&
|
|
|
(args.agbno < be32_to_cpu(agi->agi_length)))) {
|
|
|
- args.fsbno = XFS_AGB_TO_FSB(args.mp,
|
|
|
- be32_to_cpu(agi->agi_seqno), args.agbno);
|
|
|
+ args.fsbno = XFS_AGB_TO_FSB(args.mp, agno, args.agbno);
|
|
|
args.type = XFS_ALLOCTYPE_THIS_BNO;
|
|
|
args.mod = args.total = args.wasdel = args.isfl =
|
|
|
args.userdata = args.minalignslop = 0;
|
|
@@ -258,8 +329,7 @@ xfs_ialloc_ag_alloc(
|
|
|
* For now, just allocate blocks up front.
|
|
|
*/
|
|
|
args.agbno = be32_to_cpu(agi->agi_root);
|
|
|
- args.fsbno = XFS_AGB_TO_FSB(args.mp,
|
|
|
- be32_to_cpu(agi->agi_seqno), args.agbno);
|
|
|
+ args.fsbno = XFS_AGB_TO_FSB(args.mp, agno, args.agbno);
|
|
|
/*
|
|
|
* Allocate a fixed-size extent of inodes.
|
|
|
*/
|
|
@@ -282,8 +352,7 @@ xfs_ialloc_ag_alloc(
|
|
|
if (isaligned && args.fsbno == NULLFSBLOCK) {
|
|
|
args.type = XFS_ALLOCTYPE_NEAR_BNO;
|
|
|
args.agbno = be32_to_cpu(agi->agi_root);
|
|
|
- args.fsbno = XFS_AGB_TO_FSB(args.mp,
|
|
|
- be32_to_cpu(agi->agi_seqno), args.agbno);
|
|
|
+ args.fsbno = XFS_AGB_TO_FSB(args.mp, agno, args.agbno);
|
|
|
args.alignment = xfs_ialloc_cluster_alignment(&args);
|
|
|
if ((error = xfs_alloc_vextent(&args)))
|
|
|
return error;
|
|
@@ -294,85 +363,30 @@ xfs_ialloc_ag_alloc(
|
|
|
return 0;
|
|
|
}
|
|
|
ASSERT(args.len == args.minlen);
|
|
|
- /*
|
|
|
- * Convert the results.
|
|
|
- */
|
|
|
- newino = XFS_OFFBNO_TO_AGINO(args.mp, args.agbno, 0);
|
|
|
- /*
|
|
|
- * Loop over the new block(s), filling in the inodes.
|
|
|
- * For small block sizes, manipulate the inodes in buffers
|
|
|
- * which are multiples of the blocks size.
|
|
|
- */
|
|
|
- if (args.mp->m_sb.sb_blocksize >= XFS_INODE_CLUSTER_SIZE(args.mp)) {
|
|
|
- blks_per_cluster = 1;
|
|
|
- nbufs = (int)args.len;
|
|
|
- ninodes = args.mp->m_sb.sb_inopblock;
|
|
|
- } else {
|
|
|
- blks_per_cluster = XFS_INODE_CLUSTER_SIZE(args.mp) /
|
|
|
- args.mp->m_sb.sb_blocksize;
|
|
|
- nbufs = (int)args.len / blks_per_cluster;
|
|
|
- ninodes = blks_per_cluster * args.mp->m_sb.sb_inopblock;
|
|
|
- }
|
|
|
- /*
|
|
|
- * Figure out what version number to use in the inodes we create.
|
|
|
- * If the superblock version has caught up to the one that supports
|
|
|
- * the new inode format, then use the new inode version. Otherwise
|
|
|
- * use the old version so that old kernels will continue to be
|
|
|
- * able to use the file system.
|
|
|
- */
|
|
|
- if (xfs_sb_version_hasnlink(&args.mp->m_sb))
|
|
|
- version = 2;
|
|
|
- else
|
|
|
- version = 1;
|
|
|
|
|
|
/*
|
|
|
+ * Stamp and write the inode buffers.
|
|
|
+ *
|
|
|
* Seed the new inode cluster with a random generation number. This
|
|
|
* prevents short-term reuse of generation numbers if a chunk is
|
|
|
* freed and then immediately reallocated. We use random numbers
|
|
|
* rather than a linear progression to prevent the next generation
|
|
|
* number from being easily guessable.
|
|
|
*/
|
|
|
- gen = random32();
|
|
|
- for (j = 0; j < nbufs; j++) {
|
|
|
- /*
|
|
|
- * Get the block.
|
|
|
- */
|
|
|
- d = XFS_AGB_TO_DADDR(args.mp, be32_to_cpu(agi->agi_seqno),
|
|
|
- args.agbno + (j * blks_per_cluster));
|
|
|
- fbuf = xfs_trans_get_buf(tp, args.mp->m_ddev_targp, d,
|
|
|
- args.mp->m_bsize * blks_per_cluster,
|
|
|
- XFS_BUF_LOCK);
|
|
|
- ASSERT(fbuf);
|
|
|
- ASSERT(!XFS_BUF_GETERROR(fbuf));
|
|
|
+ xfs_ialloc_inode_init(args.mp, tp, agno, args.agbno, args.len,
|
|
|
+ random32());
|
|
|
|
|
|
- /*
|
|
|
- * Initialize all inodes in this buffer and then log them.
|
|
|
- *
|
|
|
- * XXX: It would be much better if we had just one transaction to
|
|
|
- * log a whole cluster of inodes instead of all the individual
|
|
|
- * transactions causing a lot of log traffic.
|
|
|
- */
|
|
|
- xfs_biozero(fbuf, 0, ninodes << args.mp->m_sb.sb_inodelog);
|
|
|
- for (i = 0; i < ninodes; i++) {
|
|
|
- int ioffset = i << args.mp->m_sb.sb_inodelog;
|
|
|
- uint isize = sizeof(struct xfs_dinode);
|
|
|
-
|
|
|
- free = xfs_make_iptr(args.mp, fbuf, i);
|
|
|
- free->di_magic = cpu_to_be16(XFS_DINODE_MAGIC);
|
|
|
- free->di_version = version;
|
|
|
- free->di_gen = cpu_to_be32(gen);
|
|
|
- free->di_next_unlinked = cpu_to_be32(NULLAGINO);
|
|
|
- xfs_trans_log_buf(tp, fbuf, ioffset, ioffset + isize - 1);
|
|
|
- }
|
|
|
- xfs_trans_inode_alloc_buf(tp, fbuf);
|
|
|
- }
|
|
|
+ /*
|
|
|
+ * Convert the results.
|
|
|
+ */
|
|
|
+ newino = XFS_OFFBNO_TO_AGINO(args.mp, args.agbno, 0);
|
|
|
be32_add_cpu(&agi->agi_count, newlen);
|
|
|
be32_add_cpu(&agi->agi_freecount, newlen);
|
|
|
- agno = be32_to_cpu(agi->agi_seqno);
|
|
|
down_read(&args.mp->m_peraglock);
|
|
|
args.mp->m_perag[agno].pagi_freecount += newlen;
|
|
|
up_read(&args.mp->m_peraglock);
|
|
|
agi->agi_newino = cpu_to_be32(newino);
|
|
|
+
|
|
|
/*
|
|
|
* Insert records describing the new inode chunk into the btree.
|
|
|
*/
|
|
@@ -380,13 +394,17 @@ xfs_ialloc_ag_alloc(
|
|
|
for (thisino = newino;
|
|
|
thisino < newino + newlen;
|
|
|
thisino += XFS_INODES_PER_CHUNK) {
|
|
|
- if ((error = xfs_inobt_lookup_eq(cur, thisino,
|
|
|
- XFS_INODES_PER_CHUNK, XFS_INOBT_ALL_FREE, &i))) {
|
|
|
+ cur->bc_rec.i.ir_startino = thisino;
|
|
|
+ cur->bc_rec.i.ir_freecount = XFS_INODES_PER_CHUNK;
|
|
|
+ cur->bc_rec.i.ir_free = XFS_INOBT_ALL_FREE;
|
|
|
+ error = xfs_btree_lookup(cur, XFS_LOOKUP_EQ, &i);
|
|
|
+ if (error) {
|
|
|
xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
|
|
|
return error;
|
|
|
}
|
|
|
ASSERT(i == 0);
|
|
|
- if ((error = xfs_btree_insert(cur, &i))) {
|
|
|
+ error = xfs_btree_insert(cur, &i);
|
|
|
+ if (error) {
|
|
|
xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
|
|
|
return error;
|
|
|
}
|
|
@@ -538,6 +556,62 @@ nextag:
|
|
|
}
|
|
|
}
|
|
|
|
|
|
+/*
|
|
|
+ * Try to retrieve the next record to the left/right from the current one.
|
|
|
+ */
|
|
|
+STATIC int
|
|
|
+xfs_ialloc_next_rec(
|
|
|
+ struct xfs_btree_cur *cur,
|
|
|
+ xfs_inobt_rec_incore_t *rec,
|
|
|
+ int *done,
|
|
|
+ int left)
|
|
|
+{
|
|
|
+ int error;
|
|
|
+ int i;
|
|
|
+
|
|
|
+ if (left)
|
|
|
+ error = xfs_btree_decrement(cur, 0, &i);
|
|
|
+ else
|
|
|
+ error = xfs_btree_increment(cur, 0, &i);
|
|
|
+
|
|
|
+ if (error)
|
|
|
+ return error;
|
|
|
+ *done = !i;
|
|
|
+ if (i) {
|
|
|
+ error = xfs_inobt_get_rec(cur, rec, &i);
|
|
|
+ if (error)
|
|
|
+ return error;
|
|
|
+ XFS_WANT_CORRUPTED_RETURN(i == 1);
|
|
|
+ }
|
|
|
+
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
+STATIC int
|
|
|
+xfs_ialloc_get_rec(
|
|
|
+ struct xfs_btree_cur *cur,
|
|
|
+ xfs_agino_t agino,
|
|
|
+ xfs_inobt_rec_incore_t *rec,
|
|
|
+ int *done,
|
|
|
+ int left)
|
|
|
+{
|
|
|
+ int error;
|
|
|
+ int i;
|
|
|
+
|
|
|
+ error = xfs_inobt_lookup(cur, agino, XFS_LOOKUP_EQ, &i);
|
|
|
+ if (error)
|
|
|
+ return error;
|
|
|
+ *done = !i;
|
|
|
+ if (i) {
|
|
|
+ error = xfs_inobt_get_rec(cur, rec, &i);
|
|
|
+ if (error)
|
|
|
+ return error;
|
|
|
+ XFS_WANT_CORRUPTED_RETURN(i == 1);
|
|
|
+ }
|
|
|
+
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
/*
|
|
|
* Visible inode allocation functions.
|
|
|
*/
|
|
@@ -592,8 +666,8 @@ xfs_dialloc(
|
|
|
int j; /* result code */
|
|
|
xfs_mount_t *mp; /* file system mount structure */
|
|
|
int offset; /* index of inode in chunk */
|
|
|
- xfs_agino_t pagino; /* parent's a.g. relative inode # */
|
|
|
- xfs_agnumber_t pagno; /* parent's allocation group number */
|
|
|
+ xfs_agino_t pagino; /* parent's AG relative inode # */
|
|
|
+ xfs_agnumber_t pagno; /* parent's AG number */
|
|
|
xfs_inobt_rec_incore_t rec; /* inode allocation record */
|
|
|
xfs_agnumber_t tagno; /* testing allocation group number */
|
|
|
xfs_btree_cur_t *tcur; /* temp cursor */
|
|
@@ -716,6 +790,8 @@ nextag:
|
|
|
*/
|
|
|
agno = tagno;
|
|
|
*IO_agbp = NULL;
|
|
|
+
|
|
|
+ restart_pagno:
|
|
|
cur = xfs_inobt_init_cursor(mp, tp, agbp, be32_to_cpu(agi->agi_seqno));
|
|
|
/*
|
|
|
* If pagino is 0 (this is the root inode allocation) use newino.
|
|
@@ -723,220 +799,199 @@ nextag:
|
|
|
*/
|
|
|
if (!pagino)
|
|
|
pagino = be32_to_cpu(agi->agi_newino);
|
|
|
-#ifdef DEBUG
|
|
|
- if (cur->bc_nlevels == 1) {
|
|
|
- int freecount = 0;
|
|
|
|
|
|
- if ((error = xfs_inobt_lookup_ge(cur, 0, 0, 0, &i)))
|
|
|
- goto error0;
|
|
|
- XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
|
|
|
- do {
|
|
|
- if ((error = xfs_inobt_get_rec(cur, &rec.ir_startino,
|
|
|
- &rec.ir_freecount, &rec.ir_free, &i)))
|
|
|
- goto error0;
|
|
|
- XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
|
|
|
- freecount += rec.ir_freecount;
|
|
|
- if ((error = xfs_btree_increment(cur, 0, &i)))
|
|
|
- goto error0;
|
|
|
- } while (i == 1);
|
|
|
+ error = xfs_check_agi_freecount(cur, agi);
|
|
|
+ if (error)
|
|
|
+ goto error0;
|
|
|
|
|
|
- ASSERT(freecount == be32_to_cpu(agi->agi_freecount) ||
|
|
|
- XFS_FORCED_SHUTDOWN(mp));
|
|
|
- }
|
|
|
-#endif
|
|
|
/*
|
|
|
- * If in the same a.g. as the parent, try to get near the parent.
|
|
|
+ * If in the same AG as the parent, try to get near the parent.
|
|
|
*/
|
|
|
if (pagno == agno) {
|
|
|
- if ((error = xfs_inobt_lookup_le(cur, pagino, 0, 0, &i)))
|
|
|
+ xfs_perag_t *pag = &mp->m_perag[agno];
|
|
|
+ int doneleft; /* done, to the left */
|
|
|
+ int doneright; /* done, to the right */
|
|
|
+ int searchdistance = 10;
|
|
|
+
|
|
|
+ error = xfs_inobt_lookup(cur, pagino, XFS_LOOKUP_LE, &i);
|
|
|
+ if (error)
|
|
|
+ goto error0;
|
|
|
+ XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
|
|
|
+
|
|
|
+ error = xfs_inobt_get_rec(cur, &rec, &j);
|
|
|
+ if (error)
|
|
|
goto error0;
|
|
|
- if (i != 0 &&
|
|
|
- (error = xfs_inobt_get_rec(cur, &rec.ir_startino,
|
|
|
- &rec.ir_freecount, &rec.ir_free, &j)) == 0 &&
|
|
|
- j == 1 &&
|
|
|
- rec.ir_freecount > 0) {
|
|
|
+ XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
|
|
|
+
|
|
|
+ if (rec.ir_freecount > 0) {
|
|
|
/*
|
|
|
* Found a free inode in the same chunk
|
|
|
- * as parent, done.
|
|
|
+ * as the parent, done.
|
|
|
*/
|
|
|
+ goto alloc_inode;
|
|
|
}
|
|
|
+
|
|
|
+
|
|
|
+ /*
|
|
|
+ * In the same AG as parent, but parent's chunk is full.
|
|
|
+ */
|
|
|
+
|
|
|
+ /* duplicate the cursor, search left & right simultaneously */
|
|
|
+ error = xfs_btree_dup_cursor(cur, &tcur);
|
|
|
+ if (error)
|
|
|
+ goto error0;
|
|
|
+
|
|
|
/*
|
|
|
- * In the same a.g. as parent, but parent's chunk is full.
|
|
|
+ * Skip to last blocks looked up if same parent inode.
|
|
|
*/
|
|
|
- else {
|
|
|
- int doneleft; /* done, to the left */
|
|
|
- int doneright; /* done, to the right */
|
|
|
+ if (pagino != NULLAGINO &&
|
|
|
+ pag->pagl_pagino == pagino &&
|
|
|
+ pag->pagl_leftrec != NULLAGINO &&
|
|
|
+ pag->pagl_rightrec != NULLAGINO) {
|
|
|
+ error = xfs_ialloc_get_rec(tcur, pag->pagl_leftrec,
|
|
|
+ &trec, &doneleft, 1);
|
|
|
+ if (error)
|
|
|
+ goto error1;
|
|
|
|
|
|
+ error = xfs_ialloc_get_rec(cur, pag->pagl_rightrec,
|
|
|
+ &rec, &doneright, 0);
|
|
|
if (error)
|
|
|
- goto error0;
|
|
|
- ASSERT(i == 1);
|
|
|
- ASSERT(j == 1);
|
|
|
- /*
|
|
|
- * Duplicate the cursor, search left & right
|
|
|
- * simultaneously.
|
|
|
- */
|
|
|
- if ((error = xfs_btree_dup_cursor(cur, &tcur)))
|
|
|
- goto error0;
|
|
|
- /*
|
|
|
- * Search left with tcur, back up 1 record.
|
|
|
- */
|
|
|
- if ((error = xfs_btree_decrement(tcur, 0, &i)))
|
|
|
goto error1;
|
|
|
- doneleft = !i;
|
|
|
- if (!doneleft) {
|
|
|
- if ((error = xfs_inobt_get_rec(tcur,
|
|
|
- &trec.ir_startino,
|
|
|
- &trec.ir_freecount,
|
|
|
- &trec.ir_free, &i)))
|
|
|
- goto error1;
|
|
|
- XFS_WANT_CORRUPTED_GOTO(i == 1, error1);
|
|
|
- }
|
|
|
- /*
|
|
|
- * Search right with cur, go forward 1 record.
|
|
|
- */
|
|
|
- if ((error = xfs_btree_increment(cur, 0, &i)))
|
|
|
+ } else {
|
|
|
+ /* search left with tcur, back up 1 record */
|
|
|
+ error = xfs_ialloc_next_rec(tcur, &trec, &doneleft, 1);
|
|
|
+ if (error)
|
|
|
goto error1;
|
|
|
- doneright = !i;
|
|
|
- if (!doneright) {
|
|
|
- if ((error = xfs_inobt_get_rec(cur,
|
|
|
- &rec.ir_startino,
|
|
|
- &rec.ir_freecount,
|
|
|
- &rec.ir_free, &i)))
|
|
|
- goto error1;
|
|
|
- XFS_WANT_CORRUPTED_GOTO(i == 1, error1);
|
|
|
- }
|
|
|
- /*
|
|
|
- * Loop until we find the closest inode chunk
|
|
|
- * with a free one.
|
|
|
- */
|
|
|
- while (!doneleft || !doneright) {
|
|
|
- int useleft; /* using left inode
|
|
|
- chunk this time */
|
|
|
|
|
|
+ /* search right with cur, go forward 1 record. */
|
|
|
+ error = xfs_ialloc_next_rec(cur, &rec, &doneright, 0);
|
|
|
+ if (error)
|
|
|
+ goto error1;
|
|
|
+ }
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Loop until we find an inode chunk with a free inode.
|
|
|
+ */
|
|
|
+ while (!doneleft || !doneright) {
|
|
|
+ int useleft; /* using left inode chunk this time */
|
|
|
+
|
|
|
+ if (!--searchdistance) {
|
|
|
/*
|
|
|
- * Figure out which block is closer,
|
|
|
- * if both are valid.
|
|
|
- */
|
|
|
- if (!doneleft && !doneright)
|
|
|
- useleft =
|
|
|
- pagino -
|
|
|
- (trec.ir_startino +
|
|
|
- XFS_INODES_PER_CHUNK - 1) <
|
|
|
- rec.ir_startino - pagino;
|
|
|
- else
|
|
|
- useleft = !doneleft;
|
|
|
- /*
|
|
|
- * If checking the left, does it have
|
|
|
- * free inodes?
|
|
|
- */
|
|
|
- if (useleft && trec.ir_freecount) {
|
|
|
- /*
|
|
|
- * Yes, set it up as the chunk to use.
|
|
|
- */
|
|
|
- rec = trec;
|
|
|
- xfs_btree_del_cursor(cur,
|
|
|
- XFS_BTREE_NOERROR);
|
|
|
- cur = tcur;
|
|
|
- break;
|
|
|
- }
|
|
|
- /*
|
|
|
- * If checking the right, does it have
|
|
|
- * free inodes?
|
|
|
- */
|
|
|
- if (!useleft && rec.ir_freecount) {
|
|
|
- /*
|
|
|
- * Yes, it's already set up.
|
|
|
- */
|
|
|
- xfs_btree_del_cursor(tcur,
|
|
|
- XFS_BTREE_NOERROR);
|
|
|
- break;
|
|
|
- }
|
|
|
- /*
|
|
|
- * If used the left, get another one
|
|
|
- * further left.
|
|
|
- */
|
|
|
- if (useleft) {
|
|
|
- if ((error = xfs_btree_decrement(tcur, 0,
|
|
|
- &i)))
|
|
|
- goto error1;
|
|
|
- doneleft = !i;
|
|
|
- if (!doneleft) {
|
|
|
- if ((error = xfs_inobt_get_rec(
|
|
|
- tcur,
|
|
|
- &trec.ir_startino,
|
|
|
- &trec.ir_freecount,
|
|
|
- &trec.ir_free, &i)))
|
|
|
- goto error1;
|
|
|
- XFS_WANT_CORRUPTED_GOTO(i == 1,
|
|
|
- error1);
|
|
|
- }
|
|
|
- }
|
|
|
- /*
|
|
|
- * If used the right, get another one
|
|
|
- * further right.
|
|
|
+ * Not in range - save last search
|
|
|
+ * location and allocate a new inode
|
|
|
*/
|
|
|
- else {
|
|
|
- if ((error = xfs_btree_increment(cur, 0,
|
|
|
- &i)))
|
|
|
- goto error1;
|
|
|
- doneright = !i;
|
|
|
- if (!doneright) {
|
|
|
- if ((error = xfs_inobt_get_rec(
|
|
|
- cur,
|
|
|
- &rec.ir_startino,
|
|
|
- &rec.ir_freecount,
|
|
|
- &rec.ir_free, &i)))
|
|
|
- goto error1;
|
|
|
- XFS_WANT_CORRUPTED_GOTO(i == 1,
|
|
|
- error1);
|
|
|
- }
|
|
|
- }
|
|
|
+ pag->pagl_leftrec = trec.ir_startino;
|
|
|
+ pag->pagl_rightrec = rec.ir_startino;
|
|
|
+ pag->pagl_pagino = pagino;
|
|
|
+ goto newino;
|
|
|
+ }
|
|
|
+
|
|
|
+ /* figure out the closer block if both are valid. */
|
|
|
+ if (!doneleft && !doneright) {
|
|
|
+ useleft = pagino -
|
|
|
+ (trec.ir_startino + XFS_INODES_PER_CHUNK - 1) <
|
|
|
+ rec.ir_startino - pagino;
|
|
|
+ } else {
|
|
|
+ useleft = !doneleft;
|
|
|
}
|
|
|
- ASSERT(!doneleft || !doneright);
|
|
|
+
|
|
|
+ /* free inodes to the left? */
|
|
|
+ if (useleft && trec.ir_freecount) {
|
|
|
+ rec = trec;
|
|
|
+ xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
|
|
|
+ cur = tcur;
|
|
|
+
|
|
|
+ pag->pagl_leftrec = trec.ir_startino;
|
|
|
+ pag->pagl_rightrec = rec.ir_startino;
|
|
|
+ pag->pagl_pagino = pagino;
|
|
|
+ goto alloc_inode;
|
|
|
+ }
|
|
|
+
|
|
|
+ /* free inodes to the right? */
|
|
|
+ if (!useleft && rec.ir_freecount) {
|
|
|
+ xfs_btree_del_cursor(tcur, XFS_BTREE_NOERROR);
|
|
|
+
|
|
|
+ pag->pagl_leftrec = trec.ir_startino;
|
|
|
+ pag->pagl_rightrec = rec.ir_startino;
|
|
|
+ pag->pagl_pagino = pagino;
|
|
|
+ goto alloc_inode;
|
|
|
+ }
|
|
|
+
|
|
|
+ /* get next record to check */
|
|
|
+ if (useleft) {
|
|
|
+ error = xfs_ialloc_next_rec(tcur, &trec,
|
|
|
+ &doneleft, 1);
|
|
|
+ } else {
|
|
|
+ error = xfs_ialloc_next_rec(cur, &rec,
|
|
|
+ &doneright, 0);
|
|
|
+ }
|
|
|
+ if (error)
|
|
|
+ goto error1;
|
|
|
}
|
|
|
+
|
|
|
+ /*
|
|
|
+ * We've reached the end of the btree. because
|
|
|
+ * we are only searching a small chunk of the
|
|
|
+ * btree each search, there is obviously free
|
|
|
+ * inodes closer to the parent inode than we
|
|
|
+ * are now. restart the search again.
|
|
|
+ */
|
|
|
+ pag->pagl_pagino = NULLAGINO;
|
|
|
+ pag->pagl_leftrec = NULLAGINO;
|
|
|
+ pag->pagl_rightrec = NULLAGINO;
|
|
|
+ xfs_btree_del_cursor(tcur, XFS_BTREE_NOERROR);
|
|
|
+ xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
|
|
|
+ goto restart_pagno;
|
|
|
}
|
|
|
+
|
|
|
/*
|
|
|
- * In a different a.g. from the parent.
|
|
|
+ * In a different AG from the parent.
|
|
|
* See if the most recently allocated block has any free.
|
|
|
*/
|
|
|
- else if (be32_to_cpu(agi->agi_newino) != NULLAGINO) {
|
|
|
- if ((error = xfs_inobt_lookup_eq(cur,
|
|
|
- be32_to_cpu(agi->agi_newino), 0, 0, &i)))
|
|
|
+newino:
|
|
|
+ if (be32_to_cpu(agi->agi_newino) != NULLAGINO) {
|
|
|
+ error = xfs_inobt_lookup(cur, be32_to_cpu(agi->agi_newino),
|
|
|
+ XFS_LOOKUP_EQ, &i);
|
|
|
+ if (error)
|
|
|
goto error0;
|
|
|
- if (i == 1 &&
|
|
|
- (error = xfs_inobt_get_rec(cur, &rec.ir_startino,
|
|
|
- &rec.ir_freecount, &rec.ir_free, &j)) == 0 &&
|
|
|
- j == 1 &&
|
|
|
- rec.ir_freecount > 0) {
|
|
|
- /*
|
|
|
- * The last chunk allocated in the group still has
|
|
|
- * a free inode.
|
|
|
- */
|
|
|
- }
|
|
|
- /*
|
|
|
- * None left in the last group, search the whole a.g.
|
|
|
- */
|
|
|
- else {
|
|
|
+
|
|
|
+ if (i == 1) {
|
|
|
+ error = xfs_inobt_get_rec(cur, &rec, &j);
|
|
|
if (error)
|
|
|
goto error0;
|
|
|
- if ((error = xfs_inobt_lookup_ge(cur, 0, 0, 0, &i)))
|
|
|
- goto error0;
|
|
|
- ASSERT(i == 1);
|
|
|
- for (;;) {
|
|
|
- if ((error = xfs_inobt_get_rec(cur,
|
|
|
- &rec.ir_startino,
|
|
|
- &rec.ir_freecount, &rec.ir_free,
|
|
|
- &i)))
|
|
|
- goto error0;
|
|
|
- XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
|
|
|
- if (rec.ir_freecount > 0)
|
|
|
- break;
|
|
|
- if ((error = xfs_btree_increment(cur, 0, &i)))
|
|
|
- goto error0;
|
|
|
- XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
|
|
|
+
|
|
|
+ if (j == 1 && rec.ir_freecount > 0) {
|
|
|
+ /*
|
|
|
+ * The last chunk allocated in the group
|
|
|
+ * still has a free inode.
|
|
|
+ */
|
|
|
+ goto alloc_inode;
|
|
|
}
|
|
|
}
|
|
|
}
|
|
|
+
|
|
|
+ /*
|
|
|
+ * None left in the last group, search the whole AG
|
|
|
+ */
|
|
|
+ error = xfs_inobt_lookup(cur, 0, XFS_LOOKUP_GE, &i);
|
|
|
+ if (error)
|
|
|
+ goto error0;
|
|
|
+ XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
|
|
|
+
|
|
|
+ for (;;) {
|
|
|
+ error = xfs_inobt_get_rec(cur, &rec, &i);
|
|
|
+ if (error)
|
|
|
+ goto error0;
|
|
|
+ XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
|
|
|
+ if (rec.ir_freecount > 0)
|
|
|
+ break;
|
|
|
+ error = xfs_btree_increment(cur, 0, &i);
|
|
|
+ if (error)
|
|
|
+ goto error0;
|
|
|
+ XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
|
|
|
+ }
|
|
|
+
|
|
|
+alloc_inode:
|
|
|
offset = xfs_ialloc_find_free(&rec.ir_free);
|
|
|
ASSERT(offset >= 0);
|
|
|
ASSERT(offset < XFS_INODES_PER_CHUNK);
|
|
@@ -945,33 +1000,19 @@ nextag:
|
|
|
ino = XFS_AGINO_TO_INO(mp, agno, rec.ir_startino + offset);
|
|
|
rec.ir_free &= ~XFS_INOBT_MASK(offset);
|
|
|
rec.ir_freecount--;
|
|
|
- if ((error = xfs_inobt_update(cur, rec.ir_startino, rec.ir_freecount,
|
|
|
- rec.ir_free)))
|
|
|
+ error = xfs_inobt_update(cur, &rec);
|
|
|
+ if (error)
|
|
|
goto error0;
|
|
|
be32_add_cpu(&agi->agi_freecount, -1);
|
|
|
xfs_ialloc_log_agi(tp, agbp, XFS_AGI_FREECOUNT);
|
|
|
down_read(&mp->m_peraglock);
|
|
|
mp->m_perag[tagno].pagi_freecount--;
|
|
|
up_read(&mp->m_peraglock);
|
|
|
-#ifdef DEBUG
|
|
|
- if (cur->bc_nlevels == 1) {
|
|
|
- int freecount = 0;
|
|
|
|
|
|
- if ((error = xfs_inobt_lookup_ge(cur, 0, 0, 0, &i)))
|
|
|
- goto error0;
|
|
|
- do {
|
|
|
- if ((error = xfs_inobt_get_rec(cur, &rec.ir_startino,
|
|
|
- &rec.ir_freecount, &rec.ir_free, &i)))
|
|
|
- goto error0;
|
|
|
- XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
|
|
|
- freecount += rec.ir_freecount;
|
|
|
- if ((error = xfs_btree_increment(cur, 0, &i)))
|
|
|
- goto error0;
|
|
|
- } while (i == 1);
|
|
|
- ASSERT(freecount == be32_to_cpu(agi->agi_freecount) ||
|
|
|
- XFS_FORCED_SHUTDOWN(mp));
|
|
|
- }
|
|
|
-#endif
|
|
|
+ error = xfs_check_agi_freecount(cur, agi);
|
|
|
+ if (error)
|
|
|
+ goto error0;
|
|
|
+
|
|
|
xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
|
|
|
xfs_trans_mod_sb(tp, XFS_TRANS_SB_IFREE, -1);
|
|
|
*inop = ino;
|
|
@@ -1062,38 +1103,23 @@ xfs_difree(
|
|
|
* Initialize the cursor.
|
|
|
*/
|
|
|
cur = xfs_inobt_init_cursor(mp, tp, agbp, agno);
|
|
|
-#ifdef DEBUG
|
|
|
- if (cur->bc_nlevels == 1) {
|
|
|
- int freecount = 0;
|
|
|
|
|
|
- if ((error = xfs_inobt_lookup_ge(cur, 0, 0, 0, &i)))
|
|
|
- goto error0;
|
|
|
- do {
|
|
|
- if ((error = xfs_inobt_get_rec(cur, &rec.ir_startino,
|
|
|
- &rec.ir_freecount, &rec.ir_free, &i)))
|
|
|
- goto error0;
|
|
|
- if (i) {
|
|
|
- freecount += rec.ir_freecount;
|
|
|
- if ((error = xfs_btree_increment(cur, 0, &i)))
|
|
|
- goto error0;
|
|
|
- }
|
|
|
- } while (i == 1);
|
|
|
- ASSERT(freecount == be32_to_cpu(agi->agi_freecount) ||
|
|
|
- XFS_FORCED_SHUTDOWN(mp));
|
|
|
- }
|
|
|
-#endif
|
|
|
+ error = xfs_check_agi_freecount(cur, agi);
|
|
|
+ if (error)
|
|
|
+ goto error0;
|
|
|
+
|
|
|
/*
|
|
|
* Look for the entry describing this inode.
|
|
|
*/
|
|
|
- if ((error = xfs_inobt_lookup_le(cur, agino, 0, 0, &i))) {
|
|
|
+ if ((error = xfs_inobt_lookup(cur, agino, XFS_LOOKUP_LE, &i))) {
|
|
|
cmn_err(CE_WARN,
|
|
|
- "xfs_difree: xfs_inobt_lookup_le returned() an error %d on %s. Returning error.",
|
|
|
+ "xfs_difree: xfs_inobt_lookup returned() an error %d on %s. Returning error.",
|
|
|
error, mp->m_fsname);
|
|
|
goto error0;
|
|
|
}
|
|
|
XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
|
|
|
- if ((error = xfs_inobt_get_rec(cur, &rec.ir_startino, &rec.ir_freecount,
|
|
|
- &rec.ir_free, &i))) {
|
|
|
+ error = xfs_inobt_get_rec(cur, &rec, &i);
|
|
|
+ if (error) {
|
|
|
cmn_err(CE_WARN,
|
|
|
"xfs_difree: xfs_inobt_get_rec() returned an error %d on %s. Returning error.",
|
|
|
error, mp->m_fsname);
|
|
@@ -1148,12 +1174,14 @@ xfs_difree(
|
|
|
} else {
|
|
|
*delete = 0;
|
|
|
|
|
|
- if ((error = xfs_inobt_update(cur, rec.ir_startino, rec.ir_freecount, rec.ir_free))) {
|
|
|
+ error = xfs_inobt_update(cur, &rec);
|
|
|
+ if (error) {
|
|
|
cmn_err(CE_WARN,
|
|
|
- "xfs_difree: xfs_inobt_update() returned an error %d on %s. Returning error.",
|
|
|
+ "xfs_difree: xfs_inobt_update returned an error %d on %s.",
|
|
|
error, mp->m_fsname);
|
|
|
goto error0;
|
|
|
}
|
|
|
+
|
|
|
/*
|
|
|
* Change the inode free counts and log the ag/sb changes.
|
|
|
*/
|
|
@@ -1165,28 +1193,10 @@ xfs_difree(
|
|
|
xfs_trans_mod_sb(tp, XFS_TRANS_SB_IFREE, 1);
|
|
|
}
|
|
|
|
|
|
-#ifdef DEBUG
|
|
|
- if (cur->bc_nlevels == 1) {
|
|
|
- int freecount = 0;
|
|
|
+ error = xfs_check_agi_freecount(cur, agi);
|
|
|
+ if (error)
|
|
|
+ goto error0;
|
|
|
|
|
|
- if ((error = xfs_inobt_lookup_ge(cur, 0, 0, 0, &i)))
|
|
|
- goto error0;
|
|
|
- do {
|
|
|
- if ((error = xfs_inobt_get_rec(cur,
|
|
|
- &rec.ir_startino,
|
|
|
- &rec.ir_freecount,
|
|
|
- &rec.ir_free, &i)))
|
|
|
- goto error0;
|
|
|
- if (i) {
|
|
|
- freecount += rec.ir_freecount;
|
|
|
- if ((error = xfs_btree_increment(cur, 0, &i)))
|
|
|
- goto error0;
|
|
|
- }
|
|
|
- } while (i == 1);
|
|
|
- ASSERT(freecount == be32_to_cpu(agi->agi_freecount) ||
|
|
|
- XFS_FORCED_SHUTDOWN(mp));
|
|
|
- }
|
|
|
-#endif
|
|
|
xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
|
|
|
return 0;
|
|
|
|
|
@@ -1297,9 +1307,7 @@ xfs_imap(
|
|
|
chunk_agbno = agbno - offset_agbno;
|
|
|
} else {
|
|
|
xfs_btree_cur_t *cur; /* inode btree cursor */
|
|
|
- xfs_agino_t chunk_agino; /* first agino in inode chunk */
|
|
|
- __int32_t chunk_cnt; /* count of free inodes in chunk */
|
|
|
- xfs_inofree_t chunk_free; /* mask of free inodes in chunk */
|
|
|
+ xfs_inobt_rec_incore_t chunk_rec;
|
|
|
xfs_buf_t *agbp; /* agi buffer */
|
|
|
int i; /* temp state */
|
|
|
|
|
@@ -1315,15 +1323,14 @@ xfs_imap(
|
|
|
}
|
|
|
|
|
|
cur = xfs_inobt_init_cursor(mp, tp, agbp, agno);
|
|
|
- error = xfs_inobt_lookup_le(cur, agino, 0, 0, &i);
|
|
|
+ error = xfs_inobt_lookup(cur, agino, XFS_LOOKUP_LE, &i);
|
|
|
if (error) {
|
|
|
xfs_fs_cmn_err(CE_ALERT, mp, "xfs_imap: "
|
|
|
- "xfs_inobt_lookup_le() failed");
|
|
|
+ "xfs_inobt_lookup() failed");
|
|
|
goto error0;
|
|
|
}
|
|
|
|
|
|
- error = xfs_inobt_get_rec(cur, &chunk_agino, &chunk_cnt,
|
|
|
- &chunk_free, &i);
|
|
|
+ error = xfs_inobt_get_rec(cur, &chunk_rec, &i);
|
|
|
if (error) {
|
|
|
xfs_fs_cmn_err(CE_ALERT, mp, "xfs_imap: "
|
|
|
"xfs_inobt_get_rec() failed");
|
|
@@ -1341,7 +1348,7 @@ xfs_imap(
|
|
|
xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
|
|
|
if (error)
|
|
|
return error;
|
|
|
- chunk_agbno = XFS_AGINO_TO_AGBNO(mp, chunk_agino);
|
|
|
+ chunk_agbno = XFS_AGINO_TO_AGBNO(mp, chunk_rec.ir_startino);
|
|
|
offset_agbno = agbno - chunk_agbno;
|
|
|
}
|
|
|
|