|
@@ -205,19 +205,37 @@ xfs_allocbt_init_key_from_rec(
|
|
|
union xfs_btree_key *key,
|
|
|
union xfs_btree_rec *rec)
|
|
|
{
|
|
|
- ASSERT(rec->alloc.ar_startblock != 0);
|
|
|
-
|
|
|
key->alloc.ar_startblock = rec->alloc.ar_startblock;
|
|
|
key->alloc.ar_blockcount = rec->alloc.ar_blockcount;
|
|
|
}
|
|
|
|
|
|
+STATIC void
|
|
|
+xfs_bnobt_init_high_key_from_rec(
|
|
|
+ union xfs_btree_key *key,
|
|
|
+ union xfs_btree_rec *rec)
|
|
|
+{
|
|
|
+ __u32 x;
|
|
|
+
|
|
|
+ x = be32_to_cpu(rec->alloc.ar_startblock);
|
|
|
+ x += be32_to_cpu(rec->alloc.ar_blockcount) - 1;
|
|
|
+ key->alloc.ar_startblock = cpu_to_be32(x);
|
|
|
+ key->alloc.ar_blockcount = 0;
|
|
|
+}
|
|
|
+
|
|
|
+STATIC void
|
|
|
+xfs_cntbt_init_high_key_from_rec(
|
|
|
+ union xfs_btree_key *key,
|
|
|
+ union xfs_btree_rec *rec)
|
|
|
+{
|
|
|
+ key->alloc.ar_blockcount = rec->alloc.ar_blockcount;
|
|
|
+ key->alloc.ar_startblock = 0;
|
|
|
+}
|
|
|
+
|
|
|
STATIC void
|
|
|
xfs_allocbt_init_rec_from_cur(
|
|
|
struct xfs_btree_cur *cur,
|
|
|
union xfs_btree_rec *rec)
|
|
|
{
|
|
|
- ASSERT(cur->bc_rec.a.ar_startblock != 0);
|
|
|
-
|
|
|
rec->alloc.ar_startblock = cpu_to_be32(cur->bc_rec.a.ar_startblock);
|
|
|
rec->alloc.ar_blockcount = cpu_to_be32(cur->bc_rec.a.ar_blockcount);
|
|
|
}
|
|
@@ -236,18 +254,24 @@ xfs_allocbt_init_ptr_from_cur(
|
|
|
}
|
|
|
|
|
|
STATIC __int64_t
|
|
|
-xfs_allocbt_key_diff(
|
|
|
+xfs_bnobt_key_diff(
|
|
|
struct xfs_btree_cur *cur,
|
|
|
union xfs_btree_key *key)
|
|
|
{
|
|
|
xfs_alloc_rec_incore_t *rec = &cur->bc_rec.a;
|
|
|
xfs_alloc_key_t *kp = &key->alloc;
|
|
|
- __int64_t diff;
|
|
|
|
|
|
- if (cur->bc_btnum == XFS_BTNUM_BNO) {
|
|
|
- return (__int64_t)be32_to_cpu(kp->ar_startblock) -
|
|
|
- rec->ar_startblock;
|
|
|
- }
|
|
|
+ return (__int64_t)be32_to_cpu(kp->ar_startblock) - rec->ar_startblock;
|
|
|
+}
|
|
|
+
|
|
|
+STATIC __int64_t
|
|
|
+xfs_cntbt_key_diff(
|
|
|
+ struct xfs_btree_cur *cur,
|
|
|
+ union xfs_btree_key *key)
|
|
|
+{
|
|
|
+ xfs_alloc_rec_incore_t *rec = &cur->bc_rec.a;
|
|
|
+ xfs_alloc_key_t *kp = &key->alloc;
|
|
|
+ __int64_t diff;
|
|
|
|
|
|
diff = (__int64_t)be32_to_cpu(kp->ar_blockcount) - rec->ar_blockcount;
|
|
|
if (diff)
|
|
@@ -256,6 +280,33 @@ xfs_allocbt_key_diff(
|
|
|
return (__int64_t)be32_to_cpu(kp->ar_startblock) - rec->ar_startblock;
|
|
|
}
|
|
|
|
|
|
+STATIC __int64_t
|
|
|
+xfs_bnobt_diff_two_keys(
|
|
|
+ struct xfs_btree_cur *cur,
|
|
|
+ union xfs_btree_key *k1,
|
|
|
+ union xfs_btree_key *k2)
|
|
|
+{
|
|
|
+ return (__int64_t)be32_to_cpu(k1->alloc.ar_startblock) -
|
|
|
+ be32_to_cpu(k2->alloc.ar_startblock);
|
|
|
+}
|
|
|
+
|
|
|
+STATIC __int64_t
|
|
|
+xfs_cntbt_diff_two_keys(
|
|
|
+ struct xfs_btree_cur *cur,
|
|
|
+ union xfs_btree_key *k1,
|
|
|
+ union xfs_btree_key *k2)
|
|
|
+{
|
|
|
+ __int64_t diff;
|
|
|
+
|
|
|
+ diff = be32_to_cpu(k1->alloc.ar_blockcount) -
|
|
|
+ be32_to_cpu(k2->alloc.ar_blockcount);
|
|
|
+ if (diff)
|
|
|
+ return diff;
|
|
|
+
|
|
|
+ return be32_to_cpu(k1->alloc.ar_startblock) -
|
|
|
+ be32_to_cpu(k2->alloc.ar_startblock);
|
|
|
+}
|
|
|
+
|
|
|
static bool
|
|
|
xfs_allocbt_verify(
|
|
|
struct xfs_buf *bp)
|
|
@@ -346,44 +397,54 @@ const struct xfs_buf_ops xfs_allocbt_buf_ops = {
|
|
|
|
|
|
#if defined(DEBUG) || defined(XFS_WARN)
|
|
|
STATIC int
|
|
|
-xfs_allocbt_keys_inorder(
|
|
|
+xfs_bnobt_keys_inorder(
|
|
|
struct xfs_btree_cur *cur,
|
|
|
union xfs_btree_key *k1,
|
|
|
union xfs_btree_key *k2)
|
|
|
{
|
|
|
- if (cur->bc_btnum == XFS_BTNUM_BNO) {
|
|
|
- return be32_to_cpu(k1->alloc.ar_startblock) <
|
|
|
- be32_to_cpu(k2->alloc.ar_startblock);
|
|
|
- } else {
|
|
|
- return be32_to_cpu(k1->alloc.ar_blockcount) <
|
|
|
- be32_to_cpu(k2->alloc.ar_blockcount) ||
|
|
|
- (k1->alloc.ar_blockcount == k2->alloc.ar_blockcount &&
|
|
|
- be32_to_cpu(k1->alloc.ar_startblock) <
|
|
|
- be32_to_cpu(k2->alloc.ar_startblock));
|
|
|
- }
|
|
|
+ return be32_to_cpu(k1->alloc.ar_startblock) <
|
|
|
+ be32_to_cpu(k2->alloc.ar_startblock);
|
|
|
}
|
|
|
|
|
|
STATIC int
|
|
|
-xfs_allocbt_recs_inorder(
|
|
|
+xfs_bnobt_recs_inorder(
|
|
|
struct xfs_btree_cur *cur,
|
|
|
union xfs_btree_rec *r1,
|
|
|
union xfs_btree_rec *r2)
|
|
|
{
|
|
|
- if (cur->bc_btnum == XFS_BTNUM_BNO) {
|
|
|
- return be32_to_cpu(r1->alloc.ar_startblock) +
|
|
|
- be32_to_cpu(r1->alloc.ar_blockcount) <=
|
|
|
- be32_to_cpu(r2->alloc.ar_startblock);
|
|
|
- } else {
|
|
|
- return be32_to_cpu(r1->alloc.ar_blockcount) <
|
|
|
- be32_to_cpu(r2->alloc.ar_blockcount) ||
|
|
|
- (r1->alloc.ar_blockcount == r2->alloc.ar_blockcount &&
|
|
|
- be32_to_cpu(r1->alloc.ar_startblock) <
|
|
|
- be32_to_cpu(r2->alloc.ar_startblock));
|
|
|
- }
|
|
|
+ return be32_to_cpu(r1->alloc.ar_startblock) +
|
|
|
+ be32_to_cpu(r1->alloc.ar_blockcount) <=
|
|
|
+ be32_to_cpu(r2->alloc.ar_startblock);
|
|
|
+}
|
|
|
+
|
|
|
+STATIC int
|
|
|
+xfs_cntbt_keys_inorder(
|
|
|
+ struct xfs_btree_cur *cur,
|
|
|
+ union xfs_btree_key *k1,
|
|
|
+ union xfs_btree_key *k2)
|
|
|
+{
|
|
|
+ return be32_to_cpu(k1->alloc.ar_blockcount) <
|
|
|
+ be32_to_cpu(k2->alloc.ar_blockcount) ||
|
|
|
+ (k1->alloc.ar_blockcount == k2->alloc.ar_blockcount &&
|
|
|
+ be32_to_cpu(k1->alloc.ar_startblock) <
|
|
|
+ be32_to_cpu(k2->alloc.ar_startblock));
|
|
|
}
|
|
|
-#endif /* DEBUG */
|
|
|
|
|
|
-static const struct xfs_btree_ops xfs_allocbt_ops = {
|
|
|
+STATIC int
|
|
|
+xfs_cntbt_recs_inorder(
|
|
|
+ struct xfs_btree_cur *cur,
|
|
|
+ union xfs_btree_rec *r1,
|
|
|
+ union xfs_btree_rec *r2)
|
|
|
+{
|
|
|
+ return be32_to_cpu(r1->alloc.ar_blockcount) <
|
|
|
+ be32_to_cpu(r2->alloc.ar_blockcount) ||
|
|
|
+ (r1->alloc.ar_blockcount == r2->alloc.ar_blockcount &&
|
|
|
+ be32_to_cpu(r1->alloc.ar_startblock) <
|
|
|
+ be32_to_cpu(r2->alloc.ar_startblock));
|
|
|
+}
|
|
|
+#endif /* DEBUG */
|
|
|
+
|
|
|
+static const struct xfs_btree_ops xfs_bnobt_ops = {
|
|
|
.rec_len = sizeof(xfs_alloc_rec_t),
|
|
|
.key_len = sizeof(xfs_alloc_key_t),
|
|
|
|
|
@@ -395,13 +456,39 @@ static const struct xfs_btree_ops xfs_allocbt_ops = {
|
|
|
.get_minrecs = xfs_allocbt_get_minrecs,
|
|
|
.get_maxrecs = xfs_allocbt_get_maxrecs,
|
|
|
.init_key_from_rec = xfs_allocbt_init_key_from_rec,
|
|
|
+ .init_high_key_from_rec = xfs_bnobt_init_high_key_from_rec,
|
|
|
.init_rec_from_cur = xfs_allocbt_init_rec_from_cur,
|
|
|
.init_ptr_from_cur = xfs_allocbt_init_ptr_from_cur,
|
|
|
- .key_diff = xfs_allocbt_key_diff,
|
|
|
+ .key_diff = xfs_bnobt_key_diff,
|
|
|
.buf_ops = &xfs_allocbt_buf_ops,
|
|
|
+ .diff_two_keys = xfs_bnobt_diff_two_keys,
|
|
|
#if defined(DEBUG) || defined(XFS_WARN)
|
|
|
- .keys_inorder = xfs_allocbt_keys_inorder,
|
|
|
- .recs_inorder = xfs_allocbt_recs_inorder,
|
|
|
+ .keys_inorder = xfs_bnobt_keys_inorder,
|
|
|
+ .recs_inorder = xfs_bnobt_recs_inorder,
|
|
|
+#endif
|
|
|
+};
|
|
|
+
|
|
|
+static const struct xfs_btree_ops xfs_cntbt_ops = {
|
|
|
+ .rec_len = sizeof(xfs_alloc_rec_t),
|
|
|
+ .key_len = sizeof(xfs_alloc_key_t),
|
|
|
+
|
|
|
+ .dup_cursor = xfs_allocbt_dup_cursor,
|
|
|
+ .set_root = xfs_allocbt_set_root,
|
|
|
+ .alloc_block = xfs_allocbt_alloc_block,
|
|
|
+ .free_block = xfs_allocbt_free_block,
|
|
|
+ .update_lastrec = xfs_allocbt_update_lastrec,
|
|
|
+ .get_minrecs = xfs_allocbt_get_minrecs,
|
|
|
+ .get_maxrecs = xfs_allocbt_get_maxrecs,
|
|
|
+ .init_key_from_rec = xfs_allocbt_init_key_from_rec,
|
|
|
+ .init_high_key_from_rec = xfs_cntbt_init_high_key_from_rec,
|
|
|
+ .init_rec_from_cur = xfs_allocbt_init_rec_from_cur,
|
|
|
+ .init_ptr_from_cur = xfs_allocbt_init_ptr_from_cur,
|
|
|
+ .key_diff = xfs_cntbt_key_diff,
|
|
|
+ .buf_ops = &xfs_allocbt_buf_ops,
|
|
|
+ .diff_two_keys = xfs_cntbt_diff_two_keys,
|
|
|
+#if defined(DEBUG) || defined(XFS_WARN)
|
|
|
+ .keys_inorder = xfs_cntbt_keys_inorder,
|
|
|
+ .recs_inorder = xfs_cntbt_recs_inorder,
|
|
|
#endif
|
|
|
};
|
|
|
|
|
@@ -427,16 +514,15 @@ xfs_allocbt_init_cursor(
|
|
|
cur->bc_mp = mp;
|
|
|
cur->bc_btnum = btnum;
|
|
|
cur->bc_blocklog = mp->m_sb.sb_blocklog;
|
|
|
- cur->bc_ops = &xfs_allocbt_ops;
|
|
|
- if (btnum == XFS_BTNUM_BNO)
|
|
|
- cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_abtb_2);
|
|
|
- else
|
|
|
- cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_abtc_2);
|
|
|
|
|
|
if (btnum == XFS_BTNUM_CNT) {
|
|
|
+ cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_abtc_2);
|
|
|
+ cur->bc_ops = &xfs_cntbt_ops;
|
|
|
cur->bc_nlevels = be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNT]);
|
|
|
cur->bc_flags = XFS_BTREE_LASTREC_UPDATE;
|
|
|
} else {
|
|
|
+ cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_abtb_2);
|
|
|
+ cur->bc_ops = &xfs_bnobt_ops;
|
|
|
cur->bc_nlevels = be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNO]);
|
|
|
}
|
|
|
|