|
@@ -992,22 +992,30 @@ xfs_btree_readahead(
|
|
|
return xfs_btree_readahead_sblock(cur, lr, block);
|
|
|
}
|
|
|
|
|
|
-STATIC xfs_daddr_t
|
|
|
+STATIC int
|
|
|
xfs_btree_ptr_to_daddr(
|
|
|
struct xfs_btree_cur *cur,
|
|
|
- union xfs_btree_ptr *ptr)
|
|
|
+ union xfs_btree_ptr *ptr,
|
|
|
+ xfs_daddr_t *daddr)
|
|
|
{
|
|
|
- if (cur->bc_flags & XFS_BTREE_LONG_PTRS) {
|
|
|
- ASSERT(ptr->l != cpu_to_be64(NULLFSBLOCK));
|
|
|
+ xfs_fsblock_t fsbno;
|
|
|
+ xfs_agblock_t agbno;
|
|
|
+ int error;
|
|
|
|
|
|
- return XFS_FSB_TO_DADDR(cur->bc_mp, be64_to_cpu(ptr->l));
|
|
|
- } else {
|
|
|
- ASSERT(cur->bc_private.a.agno != NULLAGNUMBER);
|
|
|
- ASSERT(ptr->s != cpu_to_be32(NULLAGBLOCK));
|
|
|
+ error = xfs_btree_check_ptr(cur, ptr, 0, 1);
|
|
|
+ if (error)
|
|
|
+ return error;
|
|
|
|
|
|
- return XFS_AGB_TO_DADDR(cur->bc_mp, cur->bc_private.a.agno,
|
|
|
- be32_to_cpu(ptr->s));
|
|
|
+ if (cur->bc_flags & XFS_BTREE_LONG_PTRS) {
|
|
|
+ fsbno = be64_to_cpu(ptr->l);
|
|
|
+ *daddr = XFS_FSB_TO_DADDR(cur->bc_mp, fsbno);
|
|
|
+ } else {
|
|
|
+ agbno = be32_to_cpu(ptr->s);
|
|
|
+ *daddr = XFS_AGB_TO_DADDR(cur->bc_mp, cur->bc_private.a.agno,
|
|
|
+ agbno);
|
|
|
}
|
|
|
+
|
|
|
+ return 0;
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -1022,8 +1030,11 @@ xfs_btree_readahead_ptr(
|
|
|
union xfs_btree_ptr *ptr,
|
|
|
xfs_extlen_t count)
|
|
|
{
|
|
|
- xfs_buf_readahead(cur->bc_mp->m_ddev_targp,
|
|
|
- xfs_btree_ptr_to_daddr(cur, ptr),
|
|
|
+ xfs_daddr_t daddr;
|
|
|
+
|
|
|
+ if (xfs_btree_ptr_to_daddr(cur, ptr, &daddr))
|
|
|
+ return;
|
|
|
+ xfs_buf_readahead(cur->bc_mp->m_ddev_targp, daddr,
|
|
|
cur->bc_mp->m_bsize * count, cur->bc_ops->buf_ops);
|
|
|
}
|
|
|
|
|
@@ -1286,11 +1297,14 @@ xfs_btree_get_buf_block(
|
|
|
{
|
|
|
struct xfs_mount *mp = cur->bc_mp;
|
|
|
xfs_daddr_t d;
|
|
|
+ int error;
|
|
|
|
|
|
/* need to sort out how callers deal with failures first */
|
|
|
ASSERT(!(flags & XBF_TRYLOCK));
|
|
|
|
|
|
- d = xfs_btree_ptr_to_daddr(cur, ptr);
|
|
|
+ error = xfs_btree_ptr_to_daddr(cur, ptr, &d);
|
|
|
+ if (error)
|
|
|
+ return error;
|
|
|
*bpp = xfs_trans_get_buf(cur->bc_tp, mp->m_ddev_targp, d,
|
|
|
mp->m_bsize, flags);
|
|
|
|
|
@@ -1321,7 +1335,9 @@ xfs_btree_read_buf_block(
|
|
|
/* need to sort out how callers deal with failures first */
|
|
|
ASSERT(!(flags & XBF_TRYLOCK));
|
|
|
|
|
|
- d = xfs_btree_ptr_to_daddr(cur, ptr);
|
|
|
+ error = xfs_btree_ptr_to_daddr(cur, ptr, &d);
|
|
|
+ if (error)
|
|
|
+ return error;
|
|
|
error = xfs_trans_read_buf(mp, cur->bc_tp, mp->m_ddev_targp, d,
|
|
|
mp->m_bsize, flags, bpp,
|
|
|
cur->bc_ops->buf_ops);
|
|
@@ -1768,6 +1784,7 @@ xfs_btree_lookup_get_block(
|
|
|
struct xfs_btree_block **blkp) /* return btree block */
|
|
|
{
|
|
|
struct xfs_buf *bp; /* buffer pointer for btree block */
|
|
|
+ xfs_daddr_t daddr;
|
|
|
int error = 0;
|
|
|
|
|
|
/* special case the root block if in an inode */
|
|
@@ -1784,7 +1801,10 @@ xfs_btree_lookup_get_block(
|
|
|
* Otherwise throw it away and get a new one.
|
|
|
*/
|
|
|
bp = cur->bc_bufs[level];
|
|
|
- if (bp && XFS_BUF_ADDR(bp) == xfs_btree_ptr_to_daddr(cur, pp)) {
|
|
|
+ error = xfs_btree_ptr_to_daddr(cur, pp, &daddr);
|
|
|
+ if (error)
|
|
|
+ return error;
|
|
|
+ if (bp && XFS_BUF_ADDR(bp) == daddr) {
|
|
|
*blkp = XFS_BUF_TO_BLOCK(bp);
|
|
|
return 0;
|
|
|
}
|