|
@@ -83,9 +83,9 @@ STATIC void xfs_da_node_unbalance(xfs_da_state_t *state,
|
|
/*
|
|
/*
|
|
* Utility routines.
|
|
* Utility routines.
|
|
*/
|
|
*/
|
|
-STATIC uint xfs_da_node_lasthash(xfs_dabuf_t *bp, int *count);
|
|
|
|
-STATIC int xfs_da_node_order(xfs_dabuf_t *node1_bp, xfs_dabuf_t *node2_bp);
|
|
|
|
-STATIC xfs_dabuf_t *xfs_da_buf_make(int nbuf, xfs_buf_t **bps);
|
|
|
|
|
|
+STATIC uint xfs_da_node_lasthash(struct xfs_buf *bp, int *count);
|
|
|
|
+STATIC int xfs_da_node_order(struct xfs_buf *node1_bp,
|
|
|
|
+ struct xfs_buf *node2_bp);
|
|
STATIC int xfs_da_blk_unlink(xfs_da_state_t *state,
|
|
STATIC int xfs_da_blk_unlink(xfs_da_state_t *state,
|
|
xfs_da_state_blk_t *drop_blk,
|
|
xfs_da_state_blk_t *drop_blk,
|
|
xfs_da_state_blk_t *save_blk);
|
|
xfs_da_state_blk_t *save_blk);
|
|
@@ -100,10 +100,10 @@ STATIC void xfs_da_state_kill_altpath(xfs_da_state_t *state);
|
|
*/
|
|
*/
|
|
int
|
|
int
|
|
xfs_da_node_create(xfs_da_args_t *args, xfs_dablk_t blkno, int level,
|
|
xfs_da_node_create(xfs_da_args_t *args, xfs_dablk_t blkno, int level,
|
|
- xfs_dabuf_t **bpp, int whichfork)
|
|
|
|
|
|
+ struct xfs_buf **bpp, int whichfork)
|
|
{
|
|
{
|
|
xfs_da_intnode_t *node;
|
|
xfs_da_intnode_t *node;
|
|
- xfs_dabuf_t *bp;
|
|
|
|
|
|
+ struct xfs_buf *bp;
|
|
int error;
|
|
int error;
|
|
xfs_trans_t *tp;
|
|
xfs_trans_t *tp;
|
|
|
|
|
|
@@ -114,7 +114,7 @@ xfs_da_node_create(xfs_da_args_t *args, xfs_dablk_t blkno, int level,
|
|
if (error)
|
|
if (error)
|
|
return(error);
|
|
return(error);
|
|
ASSERT(bp != NULL);
|
|
ASSERT(bp != NULL);
|
|
- node = bp->data;
|
|
|
|
|
|
+ node = bp->b_addr;
|
|
node->hdr.info.forw = 0;
|
|
node->hdr.info.forw = 0;
|
|
node->hdr.info.back = 0;
|
|
node->hdr.info.back = 0;
|
|
node->hdr.info.magic = cpu_to_be16(XFS_DA_NODE_MAGIC);
|
|
node->hdr.info.magic = cpu_to_be16(XFS_DA_NODE_MAGIC);
|
|
@@ -122,7 +122,7 @@ xfs_da_node_create(xfs_da_args_t *args, xfs_dablk_t blkno, int level,
|
|
node->hdr.count = 0;
|
|
node->hdr.count = 0;
|
|
node->hdr.level = cpu_to_be16(level);
|
|
node->hdr.level = cpu_to_be16(level);
|
|
|
|
|
|
- xfs_da_log_buf(tp, bp,
|
|
|
|
|
|
+ xfs_trans_log_buf(tp, bp,
|
|
XFS_DA_LOGRANGE(node, &node->hdr, sizeof(node->hdr)));
|
|
XFS_DA_LOGRANGE(node, &node->hdr, sizeof(node->hdr)));
|
|
|
|
|
|
*bpp = bp;
|
|
*bpp = bp;
|
|
@@ -138,7 +138,7 @@ xfs_da_split(xfs_da_state_t *state)
|
|
{
|
|
{
|
|
xfs_da_state_blk_t *oldblk, *newblk, *addblk;
|
|
xfs_da_state_blk_t *oldblk, *newblk, *addblk;
|
|
xfs_da_intnode_t *node;
|
|
xfs_da_intnode_t *node;
|
|
- xfs_dabuf_t *bp;
|
|
|
|
|
|
+ struct xfs_buf *bp;
|
|
int max, action, error, i;
|
|
int max, action, error, i;
|
|
|
|
|
|
trace_xfs_da_split(state->args);
|
|
trace_xfs_da_split(state->args);
|
|
@@ -203,7 +203,6 @@ xfs_da_split(xfs_da_state_t *state)
|
|
case XFS_DA_NODE_MAGIC:
|
|
case XFS_DA_NODE_MAGIC:
|
|
error = xfs_da_node_split(state, oldblk, newblk, addblk,
|
|
error = xfs_da_node_split(state, oldblk, newblk, addblk,
|
|
max - i, &action);
|
|
max - i, &action);
|
|
- xfs_da_buf_done(addblk->bp);
|
|
|
|
addblk->bp = NULL;
|
|
addblk->bp = NULL;
|
|
if (error)
|
|
if (error)
|
|
return(error); /* GROT: dir is inconsistent */
|
|
return(error); /* GROT: dir is inconsistent */
|
|
@@ -221,13 +220,6 @@ xfs_da_split(xfs_da_state_t *state)
|
|
* Update the btree to show the new hashval for this child.
|
|
* Update the btree to show the new hashval for this child.
|
|
*/
|
|
*/
|
|
xfs_da_fixhashpath(state, &state->path);
|
|
xfs_da_fixhashpath(state, &state->path);
|
|
- /*
|
|
|
|
- * If we won't need this block again, it's getting dropped
|
|
|
|
- * from the active path by the loop control, so we need
|
|
|
|
- * to mark it done now.
|
|
|
|
- */
|
|
|
|
- if (i > 0 || !addblk)
|
|
|
|
- xfs_da_buf_done(oldblk->bp);
|
|
|
|
}
|
|
}
|
|
if (!addblk)
|
|
if (!addblk)
|
|
return(0);
|
|
return(0);
|
|
@@ -239,8 +231,6 @@ xfs_da_split(xfs_da_state_t *state)
|
|
oldblk = &state->path.blk[0];
|
|
oldblk = &state->path.blk[0];
|
|
error = xfs_da_root_split(state, oldblk, addblk);
|
|
error = xfs_da_root_split(state, oldblk, addblk);
|
|
if (error) {
|
|
if (error) {
|
|
- xfs_da_buf_done(oldblk->bp);
|
|
|
|
- xfs_da_buf_done(addblk->bp);
|
|
|
|
addblk->bp = NULL;
|
|
addblk->bp = NULL;
|
|
return(error); /* GROT: dir is inconsistent */
|
|
return(error); /* GROT: dir is inconsistent */
|
|
}
|
|
}
|
|
@@ -252,7 +242,7 @@ xfs_da_split(xfs_da_state_t *state)
|
|
* and the original block 0 could be at any position in the list.
|
|
* and the original block 0 could be at any position in the list.
|
|
*/
|
|
*/
|
|
|
|
|
|
- node = oldblk->bp->data;
|
|
|
|
|
|
+ node = oldblk->bp->b_addr;
|
|
if (node->hdr.info.forw) {
|
|
if (node->hdr.info.forw) {
|
|
if (be32_to_cpu(node->hdr.info.forw) == addblk->blkno) {
|
|
if (be32_to_cpu(node->hdr.info.forw) == addblk->blkno) {
|
|
bp = addblk->bp;
|
|
bp = addblk->bp;
|
|
@@ -260,13 +250,13 @@ xfs_da_split(xfs_da_state_t *state)
|
|
ASSERT(state->extravalid);
|
|
ASSERT(state->extravalid);
|
|
bp = state->extrablk.bp;
|
|
bp = state->extrablk.bp;
|
|
}
|
|
}
|
|
- node = bp->data;
|
|
|
|
|
|
+ node = bp->b_addr;
|
|
node->hdr.info.back = cpu_to_be32(oldblk->blkno);
|
|
node->hdr.info.back = cpu_to_be32(oldblk->blkno);
|
|
- xfs_da_log_buf(state->args->trans, bp,
|
|
|
|
|
|
+ xfs_trans_log_buf(state->args->trans, bp,
|
|
XFS_DA_LOGRANGE(node, &node->hdr.info,
|
|
XFS_DA_LOGRANGE(node, &node->hdr.info,
|
|
sizeof(node->hdr.info)));
|
|
sizeof(node->hdr.info)));
|
|
}
|
|
}
|
|
- node = oldblk->bp->data;
|
|
|
|
|
|
+ node = oldblk->bp->b_addr;
|
|
if (node->hdr.info.back) {
|
|
if (node->hdr.info.back) {
|
|
if (be32_to_cpu(node->hdr.info.back) == addblk->blkno) {
|
|
if (be32_to_cpu(node->hdr.info.back) == addblk->blkno) {
|
|
bp = addblk->bp;
|
|
bp = addblk->bp;
|
|
@@ -274,14 +264,12 @@ xfs_da_split(xfs_da_state_t *state)
|
|
ASSERT(state->extravalid);
|
|
ASSERT(state->extravalid);
|
|
bp = state->extrablk.bp;
|
|
bp = state->extrablk.bp;
|
|
}
|
|
}
|
|
- node = bp->data;
|
|
|
|
|
|
+ node = bp->b_addr;
|
|
node->hdr.info.forw = cpu_to_be32(oldblk->blkno);
|
|
node->hdr.info.forw = cpu_to_be32(oldblk->blkno);
|
|
- xfs_da_log_buf(state->args->trans, bp,
|
|
|
|
|
|
+ xfs_trans_log_buf(state->args->trans, bp,
|
|
XFS_DA_LOGRANGE(node, &node->hdr.info,
|
|
XFS_DA_LOGRANGE(node, &node->hdr.info,
|
|
sizeof(node->hdr.info)));
|
|
sizeof(node->hdr.info)));
|
|
}
|
|
}
|
|
- xfs_da_buf_done(oldblk->bp);
|
|
|
|
- xfs_da_buf_done(addblk->bp);
|
|
|
|
addblk->bp = NULL;
|
|
addblk->bp = NULL;
|
|
return(0);
|
|
return(0);
|
|
}
|
|
}
|
|
@@ -298,7 +286,7 @@ xfs_da_root_split(xfs_da_state_t *state, xfs_da_state_blk_t *blk1,
|
|
xfs_da_intnode_t *node, *oldroot;
|
|
xfs_da_intnode_t *node, *oldroot;
|
|
xfs_da_args_t *args;
|
|
xfs_da_args_t *args;
|
|
xfs_dablk_t blkno;
|
|
xfs_dablk_t blkno;
|
|
- xfs_dabuf_t *bp;
|
|
|
|
|
|
+ struct xfs_buf *bp;
|
|
int error, size;
|
|
int error, size;
|
|
xfs_inode_t *dp;
|
|
xfs_inode_t *dp;
|
|
xfs_trans_t *tp;
|
|
xfs_trans_t *tp;
|
|
@@ -323,8 +311,8 @@ xfs_da_root_split(xfs_da_state_t *state, xfs_da_state_blk_t *blk1,
|
|
if (error)
|
|
if (error)
|
|
return(error);
|
|
return(error);
|
|
ASSERT(bp != NULL);
|
|
ASSERT(bp != NULL);
|
|
- node = bp->data;
|
|
|
|
- oldroot = blk1->bp->data;
|
|
|
|
|
|
+ node = bp->b_addr;
|
|
|
|
+ oldroot = blk1->bp->b_addr;
|
|
if (oldroot->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC)) {
|
|
if (oldroot->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC)) {
|
|
size = (int)((char *)&oldroot->btree[be16_to_cpu(oldroot->hdr.count)] -
|
|
size = (int)((char *)&oldroot->btree[be16_to_cpu(oldroot->hdr.count)] -
|
|
(char *)oldroot);
|
|
(char *)oldroot);
|
|
@@ -335,8 +323,7 @@ xfs_da_root_split(xfs_da_state_t *state, xfs_da_state_blk_t *blk1,
|
|
(char *)leaf);
|
|
(char *)leaf);
|
|
}
|
|
}
|
|
memcpy(node, oldroot, size);
|
|
memcpy(node, oldroot, size);
|
|
- xfs_da_log_buf(tp, bp, 0, size - 1);
|
|
|
|
- xfs_da_buf_done(blk1->bp);
|
|
|
|
|
|
+ xfs_trans_log_buf(tp, bp, 0, size - 1);
|
|
blk1->bp = bp;
|
|
blk1->bp = bp;
|
|
blk1->blkno = blkno;
|
|
blk1->blkno = blkno;
|
|
|
|
|
|
@@ -348,7 +335,7 @@ xfs_da_root_split(xfs_da_state_t *state, xfs_da_state_blk_t *blk1,
|
|
be16_to_cpu(node->hdr.level) + 1, &bp, args->whichfork);
|
|
be16_to_cpu(node->hdr.level) + 1, &bp, args->whichfork);
|
|
if (error)
|
|
if (error)
|
|
return(error);
|
|
return(error);
|
|
- node = bp->data;
|
|
|
|
|
|
+ node = bp->b_addr;
|
|
node->btree[0].hashval = cpu_to_be32(blk1->hashval);
|
|
node->btree[0].hashval = cpu_to_be32(blk1->hashval);
|
|
node->btree[0].before = cpu_to_be32(blk1->blkno);
|
|
node->btree[0].before = cpu_to_be32(blk1->blkno);
|
|
node->btree[1].hashval = cpu_to_be32(blk2->hashval);
|
|
node->btree[1].hashval = cpu_to_be32(blk2->hashval);
|
|
@@ -365,10 +352,9 @@ xfs_da_root_split(xfs_da_state_t *state, xfs_da_state_blk_t *blk1,
|
|
#endif
|
|
#endif
|
|
|
|
|
|
/* Header is already logged by xfs_da_node_create */
|
|
/* Header is already logged by xfs_da_node_create */
|
|
- xfs_da_log_buf(tp, bp,
|
|
|
|
|
|
+ xfs_trans_log_buf(tp, bp,
|
|
XFS_DA_LOGRANGE(node, node->btree,
|
|
XFS_DA_LOGRANGE(node, node->btree,
|
|
sizeof(xfs_da_node_entry_t) * 2));
|
|
sizeof(xfs_da_node_entry_t) * 2));
|
|
- xfs_da_buf_done(bp);
|
|
|
|
|
|
|
|
return(0);
|
|
return(0);
|
|
}
|
|
}
|
|
@@ -389,7 +375,7 @@ xfs_da_node_split(xfs_da_state_t *state, xfs_da_state_blk_t *oldblk,
|
|
|
|
|
|
trace_xfs_da_node_split(state->args);
|
|
trace_xfs_da_node_split(state->args);
|
|
|
|
|
|
- node = oldblk->bp->data;
|
|
|
|
|
|
+ node = oldblk->bp->b_addr;
|
|
ASSERT(node->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC));
|
|
ASSERT(node->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC));
|
|
|
|
|
|
/*
|
|
/*
|
|
@@ -436,7 +422,7 @@ xfs_da_node_split(xfs_da_state_t *state, xfs_da_state_blk_t *oldblk,
|
|
*
|
|
*
|
|
* If we had double-split op below us, then add the extra block too.
|
|
* If we had double-split op below us, then add the extra block too.
|
|
*/
|
|
*/
|
|
- node = oldblk->bp->data;
|
|
|
|
|
|
+ node = oldblk->bp->b_addr;
|
|
if (oldblk->index <= be16_to_cpu(node->hdr.count)) {
|
|
if (oldblk->index <= be16_to_cpu(node->hdr.count)) {
|
|
oldblk->index++;
|
|
oldblk->index++;
|
|
xfs_da_node_add(state, oldblk, addblk);
|
|
xfs_da_node_add(state, oldblk, addblk);
|
|
@@ -477,8 +463,8 @@ xfs_da_node_rebalance(xfs_da_state_t *state, xfs_da_state_blk_t *blk1,
|
|
|
|
|
|
trace_xfs_da_node_rebalance(state->args);
|
|
trace_xfs_da_node_rebalance(state->args);
|
|
|
|
|
|
- node1 = blk1->bp->data;
|
|
|
|
- node2 = blk2->bp->data;
|
|
|
|
|
|
+ node1 = blk1->bp->b_addr;
|
|
|
|
+ node2 = blk2->bp->b_addr;
|
|
/*
|
|
/*
|
|
* Figure out how many entries need to move, and in which direction.
|
|
* Figure out how many entries need to move, and in which direction.
|
|
* Swap the nodes around if that makes it simpler.
|
|
* Swap the nodes around if that makes it simpler.
|
|
@@ -532,7 +518,7 @@ xfs_da_node_rebalance(xfs_da_state_t *state, xfs_da_state_blk_t *blk1,
|
|
btree_d = &node1->btree[be16_to_cpu(node1->hdr.count)];
|
|
btree_d = &node1->btree[be16_to_cpu(node1->hdr.count)];
|
|
memcpy(btree_d, btree_s, tmp);
|
|
memcpy(btree_d, btree_s, tmp);
|
|
be16_add_cpu(&node1->hdr.count, count);
|
|
be16_add_cpu(&node1->hdr.count, count);
|
|
- xfs_da_log_buf(tp, blk1->bp,
|
|
|
|
|
|
+ xfs_trans_log_buf(tp, blk1->bp,
|
|
XFS_DA_LOGRANGE(node1, btree_d, tmp));
|
|
XFS_DA_LOGRANGE(node1, btree_d, tmp));
|
|
|
|
|
|
/*
|
|
/*
|
|
@@ -549,9 +535,9 @@ xfs_da_node_rebalance(xfs_da_state_t *state, xfs_da_state_blk_t *blk1,
|
|
/*
|
|
/*
|
|
* Log header of node 1 and all current bits of node 2.
|
|
* Log header of node 1 and all current bits of node 2.
|
|
*/
|
|
*/
|
|
- xfs_da_log_buf(tp, blk1->bp,
|
|
|
|
|
|
+ xfs_trans_log_buf(tp, blk1->bp,
|
|
XFS_DA_LOGRANGE(node1, &node1->hdr, sizeof(node1->hdr)));
|
|
XFS_DA_LOGRANGE(node1, &node1->hdr, sizeof(node1->hdr)));
|
|
- xfs_da_log_buf(tp, blk2->bp,
|
|
|
|
|
|
+ xfs_trans_log_buf(tp, blk2->bp,
|
|
XFS_DA_LOGRANGE(node2, &node2->hdr,
|
|
XFS_DA_LOGRANGE(node2, &node2->hdr,
|
|
sizeof(node2->hdr) +
|
|
sizeof(node2->hdr) +
|
|
sizeof(node2->btree[0]) * be16_to_cpu(node2->hdr.count)));
|
|
sizeof(node2->btree[0]) * be16_to_cpu(node2->hdr.count)));
|
|
@@ -560,8 +546,8 @@ xfs_da_node_rebalance(xfs_da_state_t *state, xfs_da_state_blk_t *blk1,
|
|
* Record the last hashval from each block for upward propagation.
|
|
* Record the last hashval from each block for upward propagation.
|
|
* (note: don't use the swapped node pointers)
|
|
* (note: don't use the swapped node pointers)
|
|
*/
|
|
*/
|
|
- node1 = blk1->bp->data;
|
|
|
|
- node2 = blk2->bp->data;
|
|
|
|
|
|
+ node1 = blk1->bp->b_addr;
|
|
|
|
+ node2 = blk2->bp->b_addr;
|
|
blk1->hashval = be32_to_cpu(node1->btree[be16_to_cpu(node1->hdr.count)-1].hashval);
|
|
blk1->hashval = be32_to_cpu(node1->btree[be16_to_cpu(node1->hdr.count)-1].hashval);
|
|
blk2->hashval = be32_to_cpu(node2->btree[be16_to_cpu(node2->hdr.count)-1].hashval);
|
|
blk2->hashval = be32_to_cpu(node2->btree[be16_to_cpu(node2->hdr.count)-1].hashval);
|
|
|
|
|
|
@@ -587,7 +573,7 @@ xfs_da_node_add(xfs_da_state_t *state, xfs_da_state_blk_t *oldblk,
|
|
|
|
|
|
trace_xfs_da_node_add(state->args);
|
|
trace_xfs_da_node_add(state->args);
|
|
|
|
|
|
- node = oldblk->bp->data;
|
|
|
|
|
|
+ node = oldblk->bp->b_addr;
|
|
ASSERT(node->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC));
|
|
ASSERT(node->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC));
|
|
ASSERT((oldblk->index >= 0) && (oldblk->index <= be16_to_cpu(node->hdr.count)));
|
|
ASSERT((oldblk->index >= 0) && (oldblk->index <= be16_to_cpu(node->hdr.count)));
|
|
ASSERT(newblk->blkno != 0);
|
|
ASSERT(newblk->blkno != 0);
|
|
@@ -606,10 +592,10 @@ xfs_da_node_add(xfs_da_state_t *state, xfs_da_state_blk_t *oldblk,
|
|
}
|
|
}
|
|
btree->hashval = cpu_to_be32(newblk->hashval);
|
|
btree->hashval = cpu_to_be32(newblk->hashval);
|
|
btree->before = cpu_to_be32(newblk->blkno);
|
|
btree->before = cpu_to_be32(newblk->blkno);
|
|
- xfs_da_log_buf(state->args->trans, oldblk->bp,
|
|
|
|
|
|
+ xfs_trans_log_buf(state->args->trans, oldblk->bp,
|
|
XFS_DA_LOGRANGE(node, btree, tmp + sizeof(*btree)));
|
|
XFS_DA_LOGRANGE(node, btree, tmp + sizeof(*btree)));
|
|
be16_add_cpu(&node->hdr.count, 1);
|
|
be16_add_cpu(&node->hdr.count, 1);
|
|
- xfs_da_log_buf(state->args->trans, oldblk->bp,
|
|
|
|
|
|
+ xfs_trans_log_buf(state->args->trans, oldblk->bp,
|
|
XFS_DA_LOGRANGE(node, &node->hdr, sizeof(node->hdr)));
|
|
XFS_DA_LOGRANGE(node, &node->hdr, sizeof(node->hdr)));
|
|
|
|
|
|
/*
|
|
/*
|
|
@@ -735,7 +721,7 @@ xfs_da_root_join(xfs_da_state_t *state, xfs_da_state_blk_t *root_blk)
|
|
xfs_da_intnode_t *oldroot;
|
|
xfs_da_intnode_t *oldroot;
|
|
xfs_da_args_t *args;
|
|
xfs_da_args_t *args;
|
|
xfs_dablk_t child;
|
|
xfs_dablk_t child;
|
|
- xfs_dabuf_t *bp;
|
|
|
|
|
|
+ struct xfs_buf *bp;
|
|
int error;
|
|
int error;
|
|
|
|
|
|
trace_xfs_da_root_join(state->args);
|
|
trace_xfs_da_root_join(state->args);
|
|
@@ -743,7 +729,7 @@ xfs_da_root_join(xfs_da_state_t *state, xfs_da_state_blk_t *root_blk)
|
|
args = state->args;
|
|
args = state->args;
|
|
ASSERT(args != NULL);
|
|
ASSERT(args != NULL);
|
|
ASSERT(root_blk->magic == XFS_DA_NODE_MAGIC);
|
|
ASSERT(root_blk->magic == XFS_DA_NODE_MAGIC);
|
|
- oldroot = root_blk->bp->data;
|
|
|
|
|
|
+ oldroot = root_blk->bp->b_addr;
|
|
ASSERT(oldroot->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC));
|
|
ASSERT(oldroot->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC));
|
|
ASSERT(!oldroot->hdr.info.forw);
|
|
ASSERT(!oldroot->hdr.info.forw);
|
|
ASSERT(!oldroot->hdr.info.back);
|
|
ASSERT(!oldroot->hdr.info.back);
|
|
@@ -765,11 +751,11 @@ xfs_da_root_join(xfs_da_state_t *state, xfs_da_state_blk_t *root_blk)
|
|
if (error)
|
|
if (error)
|
|
return(error);
|
|
return(error);
|
|
ASSERT(bp != NULL);
|
|
ASSERT(bp != NULL);
|
|
- xfs_da_blkinfo_onlychild_validate(bp->data,
|
|
|
|
|
|
+ xfs_da_blkinfo_onlychild_validate(bp->b_addr,
|
|
be16_to_cpu(oldroot->hdr.level));
|
|
be16_to_cpu(oldroot->hdr.level));
|
|
|
|
|
|
- memcpy(root_blk->bp->data, bp->data, state->blocksize);
|
|
|
|
- xfs_da_log_buf(args->trans, root_blk->bp, 0, state->blocksize - 1);
|
|
|
|
|
|
+ memcpy(root_blk->bp->b_addr, bp->b_addr, state->blocksize);
|
|
|
|
+ xfs_trans_log_buf(args->trans, root_blk->bp, 0, state->blocksize - 1);
|
|
error = xfs_da_shrink_inode(args, child, bp);
|
|
error = xfs_da_shrink_inode(args, child, bp);
|
|
return(error);
|
|
return(error);
|
|
}
|
|
}
|
|
@@ -791,7 +777,7 @@ xfs_da_node_toosmall(xfs_da_state_t *state, int *action)
|
|
xfs_da_blkinfo_t *info;
|
|
xfs_da_blkinfo_t *info;
|
|
int count, forward, error, retval, i;
|
|
int count, forward, error, retval, i;
|
|
xfs_dablk_t blkno;
|
|
xfs_dablk_t blkno;
|
|
- xfs_dabuf_t *bp;
|
|
|
|
|
|
+ struct xfs_buf *bp;
|
|
|
|
|
|
/*
|
|
/*
|
|
* Check for the degenerate case of the block being over 50% full.
|
|
* Check for the degenerate case of the block being over 50% full.
|
|
@@ -799,7 +785,7 @@ xfs_da_node_toosmall(xfs_da_state_t *state, int *action)
|
|
* to coalesce with a sibling.
|
|
* to coalesce with a sibling.
|
|
*/
|
|
*/
|
|
blk = &state->path.blk[ state->path.active-1 ];
|
|
blk = &state->path.blk[ state->path.active-1 ];
|
|
- info = blk->bp->data;
|
|
|
|
|
|
+ info = blk->bp->b_addr;
|
|
ASSERT(info->magic == cpu_to_be16(XFS_DA_NODE_MAGIC));
|
|
ASSERT(info->magic == cpu_to_be16(XFS_DA_NODE_MAGIC));
|
|
node = (xfs_da_intnode_t *)info;
|
|
node = (xfs_da_intnode_t *)info;
|
|
count = be16_to_cpu(node->hdr.count);
|
|
count = be16_to_cpu(node->hdr.count);
|
|
@@ -859,10 +845,10 @@ xfs_da_node_toosmall(xfs_da_state_t *state, int *action)
|
|
count = state->node_ents;
|
|
count = state->node_ents;
|
|
count -= state->node_ents >> 2;
|
|
count -= state->node_ents >> 2;
|
|
count -= be16_to_cpu(node->hdr.count);
|
|
count -= be16_to_cpu(node->hdr.count);
|
|
- node = bp->data;
|
|
|
|
|
|
+ node = bp->b_addr;
|
|
ASSERT(node->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC));
|
|
ASSERT(node->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC));
|
|
count -= be16_to_cpu(node->hdr.count);
|
|
count -= be16_to_cpu(node->hdr.count);
|
|
- xfs_da_brelse(state->args->trans, bp);
|
|
|
|
|
|
+ xfs_trans_brelse(state->args->trans, bp);
|
|
if (count >= 0)
|
|
if (count >= 0)
|
|
break; /* fits with at least 25% to spare */
|
|
break; /* fits with at least 25% to spare */
|
|
}
|
|
}
|
|
@@ -934,14 +920,14 @@ xfs_da_fixhashpath(xfs_da_state_t *state, xfs_da_state_path_t *path)
|
|
break;
|
|
break;
|
|
}
|
|
}
|
|
for (blk--, level--; level >= 0; blk--, level--) {
|
|
for (blk--, level--; level >= 0; blk--, level--) {
|
|
- node = blk->bp->data;
|
|
|
|
|
|
+ node = blk->bp->b_addr;
|
|
ASSERT(node->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC));
|
|
ASSERT(node->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC));
|
|
btree = &node->btree[ blk->index ];
|
|
btree = &node->btree[ blk->index ];
|
|
if (be32_to_cpu(btree->hashval) == lasthash)
|
|
if (be32_to_cpu(btree->hashval) == lasthash)
|
|
break;
|
|
break;
|
|
blk->hashval = lasthash;
|
|
blk->hashval = lasthash;
|
|
btree->hashval = cpu_to_be32(lasthash);
|
|
btree->hashval = cpu_to_be32(lasthash);
|
|
- xfs_da_log_buf(state->args->trans, blk->bp,
|
|
|
|
|
|
+ xfs_trans_log_buf(state->args->trans, blk->bp,
|
|
XFS_DA_LOGRANGE(node, btree, sizeof(*btree)));
|
|
XFS_DA_LOGRANGE(node, btree, sizeof(*btree)));
|
|
|
|
|
|
lasthash = be32_to_cpu(node->btree[be16_to_cpu(node->hdr.count)-1].hashval);
|
|
lasthash = be32_to_cpu(node->btree[be16_to_cpu(node->hdr.count)-1].hashval);
|
|
@@ -960,7 +946,7 @@ xfs_da_node_remove(xfs_da_state_t *state, xfs_da_state_blk_t *drop_blk)
|
|
|
|
|
|
trace_xfs_da_node_remove(state->args);
|
|
trace_xfs_da_node_remove(state->args);
|
|
|
|
|
|
- node = drop_blk->bp->data;
|
|
|
|
|
|
+ node = drop_blk->bp->b_addr;
|
|
ASSERT(drop_blk->index < be16_to_cpu(node->hdr.count));
|
|
ASSERT(drop_blk->index < be16_to_cpu(node->hdr.count));
|
|
ASSERT(drop_blk->index >= 0);
|
|
ASSERT(drop_blk->index >= 0);
|
|
|
|
|
|
@@ -972,15 +958,15 @@ xfs_da_node_remove(xfs_da_state_t *state, xfs_da_state_blk_t *drop_blk)
|
|
tmp = be16_to_cpu(node->hdr.count) - drop_blk->index - 1;
|
|
tmp = be16_to_cpu(node->hdr.count) - drop_blk->index - 1;
|
|
tmp *= (uint)sizeof(xfs_da_node_entry_t);
|
|
tmp *= (uint)sizeof(xfs_da_node_entry_t);
|
|
memmove(btree, btree + 1, tmp);
|
|
memmove(btree, btree + 1, tmp);
|
|
- xfs_da_log_buf(state->args->trans, drop_blk->bp,
|
|
|
|
|
|
+ xfs_trans_log_buf(state->args->trans, drop_blk->bp,
|
|
XFS_DA_LOGRANGE(node, btree, tmp));
|
|
XFS_DA_LOGRANGE(node, btree, tmp));
|
|
btree = &node->btree[be16_to_cpu(node->hdr.count)-1];
|
|
btree = &node->btree[be16_to_cpu(node->hdr.count)-1];
|
|
}
|
|
}
|
|
memset((char *)btree, 0, sizeof(xfs_da_node_entry_t));
|
|
memset((char *)btree, 0, sizeof(xfs_da_node_entry_t));
|
|
- xfs_da_log_buf(state->args->trans, drop_blk->bp,
|
|
|
|
|
|
+ xfs_trans_log_buf(state->args->trans, drop_blk->bp,
|
|
XFS_DA_LOGRANGE(node, btree, sizeof(*btree)));
|
|
XFS_DA_LOGRANGE(node, btree, sizeof(*btree)));
|
|
be16_add_cpu(&node->hdr.count, -1);
|
|
be16_add_cpu(&node->hdr.count, -1);
|
|
- xfs_da_log_buf(state->args->trans, drop_blk->bp,
|
|
|
|
|
|
+ xfs_trans_log_buf(state->args->trans, drop_blk->bp,
|
|
XFS_DA_LOGRANGE(node, &node->hdr, sizeof(node->hdr)));
|
|
XFS_DA_LOGRANGE(node, &node->hdr, sizeof(node->hdr)));
|
|
|
|
|
|
/*
|
|
/*
|
|
@@ -1005,8 +991,8 @@ xfs_da_node_unbalance(xfs_da_state_t *state, xfs_da_state_blk_t *drop_blk,
|
|
|
|
|
|
trace_xfs_da_node_unbalance(state->args);
|
|
trace_xfs_da_node_unbalance(state->args);
|
|
|
|
|
|
- drop_node = drop_blk->bp->data;
|
|
|
|
- save_node = save_blk->bp->data;
|
|
|
|
|
|
+ drop_node = drop_blk->bp->b_addr;
|
|
|
|
+ save_node = save_blk->bp->b_addr;
|
|
ASSERT(drop_node->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC));
|
|
ASSERT(drop_node->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC));
|
|
ASSERT(save_node->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC));
|
|
ASSERT(save_node->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC));
|
|
tp = state->args->trans;
|
|
tp = state->args->trans;
|
|
@@ -1023,13 +1009,13 @@ xfs_da_node_unbalance(xfs_da_state_t *state, xfs_da_state_blk_t *drop_blk,
|
|
tmp = be16_to_cpu(save_node->hdr.count) * (uint)sizeof(xfs_da_node_entry_t);
|
|
tmp = be16_to_cpu(save_node->hdr.count) * (uint)sizeof(xfs_da_node_entry_t);
|
|
memmove(btree, &save_node->btree[0], tmp);
|
|
memmove(btree, &save_node->btree[0], tmp);
|
|
btree = &save_node->btree[0];
|
|
btree = &save_node->btree[0];
|
|
- xfs_da_log_buf(tp, save_blk->bp,
|
|
|
|
|
|
+ xfs_trans_log_buf(tp, save_blk->bp,
|
|
XFS_DA_LOGRANGE(save_node, btree,
|
|
XFS_DA_LOGRANGE(save_node, btree,
|
|
(be16_to_cpu(save_node->hdr.count) + be16_to_cpu(drop_node->hdr.count)) *
|
|
(be16_to_cpu(save_node->hdr.count) + be16_to_cpu(drop_node->hdr.count)) *
|
|
sizeof(xfs_da_node_entry_t)));
|
|
sizeof(xfs_da_node_entry_t)));
|
|
} else {
|
|
} else {
|
|
btree = &save_node->btree[be16_to_cpu(save_node->hdr.count)];
|
|
btree = &save_node->btree[be16_to_cpu(save_node->hdr.count)];
|
|
- xfs_da_log_buf(tp, save_blk->bp,
|
|
|
|
|
|
+ xfs_trans_log_buf(tp, save_blk->bp,
|
|
XFS_DA_LOGRANGE(save_node, btree,
|
|
XFS_DA_LOGRANGE(save_node, btree,
|
|
be16_to_cpu(drop_node->hdr.count) *
|
|
be16_to_cpu(drop_node->hdr.count) *
|
|
sizeof(xfs_da_node_entry_t)));
|
|
sizeof(xfs_da_node_entry_t)));
|
|
@@ -1042,7 +1028,7 @@ xfs_da_node_unbalance(xfs_da_state_t *state, xfs_da_state_blk_t *drop_blk,
|
|
memcpy(btree, &drop_node->btree[0], tmp);
|
|
memcpy(btree, &drop_node->btree[0], tmp);
|
|
be16_add_cpu(&save_node->hdr.count, be16_to_cpu(drop_node->hdr.count));
|
|
be16_add_cpu(&save_node->hdr.count, be16_to_cpu(drop_node->hdr.count));
|
|
|
|
|
|
- xfs_da_log_buf(tp, save_blk->bp,
|
|
|
|
|
|
+ xfs_trans_log_buf(tp, save_blk->bp,
|
|
XFS_DA_LOGRANGE(save_node, &save_node->hdr,
|
|
XFS_DA_LOGRANGE(save_node, &save_node->hdr,
|
|
sizeof(save_node->hdr)));
|
|
sizeof(save_node->hdr)));
|
|
|
|
|
|
@@ -1100,7 +1086,7 @@ xfs_da_node_lookup_int(xfs_da_state_t *state, int *result)
|
|
state->path.active--;
|
|
state->path.active--;
|
|
return(error);
|
|
return(error);
|
|
}
|
|
}
|
|
- curr = blk->bp->data;
|
|
|
|
|
|
+ curr = blk->bp->b_addr;
|
|
blk->magic = be16_to_cpu(curr->magic);
|
|
blk->magic = be16_to_cpu(curr->magic);
|
|
ASSERT(blk->magic == XFS_DA_NODE_MAGIC ||
|
|
ASSERT(blk->magic == XFS_DA_NODE_MAGIC ||
|
|
blk->magic == XFS_DIR2_LEAFN_MAGIC ||
|
|
blk->magic == XFS_DIR2_LEAFN_MAGIC ||
|
|
@@ -1110,7 +1096,7 @@ xfs_da_node_lookup_int(xfs_da_state_t *state, int *result)
|
|
* Search an intermediate node for a match.
|
|
* Search an intermediate node for a match.
|
|
*/
|
|
*/
|
|
if (blk->magic == XFS_DA_NODE_MAGIC) {
|
|
if (blk->magic == XFS_DA_NODE_MAGIC) {
|
|
- node = blk->bp->data;
|
|
|
|
|
|
+ node = blk->bp->b_addr;
|
|
max = be16_to_cpu(node->hdr.count);
|
|
max = be16_to_cpu(node->hdr.count);
|
|
blk->hashval = be32_to_cpu(node->btree[max-1].hashval);
|
|
blk->hashval = be32_to_cpu(node->btree[max-1].hashval);
|
|
|
|
|
|
@@ -1216,15 +1202,15 @@ xfs_da_blk_link(xfs_da_state_t *state, xfs_da_state_blk_t *old_blk,
|
|
xfs_da_blkinfo_t *old_info, *new_info, *tmp_info;
|
|
xfs_da_blkinfo_t *old_info, *new_info, *tmp_info;
|
|
xfs_da_args_t *args;
|
|
xfs_da_args_t *args;
|
|
int before=0, error;
|
|
int before=0, error;
|
|
- xfs_dabuf_t *bp;
|
|
|
|
|
|
+ struct xfs_buf *bp;
|
|
|
|
|
|
/*
|
|
/*
|
|
* Set up environment.
|
|
* Set up environment.
|
|
*/
|
|
*/
|
|
args = state->args;
|
|
args = state->args;
|
|
ASSERT(args != NULL);
|
|
ASSERT(args != NULL);
|
|
- old_info = old_blk->bp->data;
|
|
|
|
- new_info = new_blk->bp->data;
|
|
|
|
|
|
+ old_info = old_blk->bp->b_addr;
|
|
|
|
+ new_info = new_blk->bp->b_addr;
|
|
ASSERT(old_blk->magic == XFS_DA_NODE_MAGIC ||
|
|
ASSERT(old_blk->magic == XFS_DA_NODE_MAGIC ||
|
|
old_blk->magic == XFS_DIR2_LEAFN_MAGIC ||
|
|
old_blk->magic == XFS_DIR2_LEAFN_MAGIC ||
|
|
old_blk->magic == XFS_ATTR_LEAF_MAGIC);
|
|
old_blk->magic == XFS_ATTR_LEAF_MAGIC);
|
|
@@ -1261,12 +1247,11 @@ xfs_da_blk_link(xfs_da_state_t *state, xfs_da_state_blk_t *old_blk,
|
|
if (error)
|
|
if (error)
|
|
return(error);
|
|
return(error);
|
|
ASSERT(bp != NULL);
|
|
ASSERT(bp != NULL);
|
|
- tmp_info = bp->data;
|
|
|
|
|
|
+ tmp_info = bp->b_addr;
|
|
ASSERT(be16_to_cpu(tmp_info->magic) == be16_to_cpu(old_info->magic));
|
|
ASSERT(be16_to_cpu(tmp_info->magic) == be16_to_cpu(old_info->magic));
|
|
ASSERT(be32_to_cpu(tmp_info->forw) == old_blk->blkno);
|
|
ASSERT(be32_to_cpu(tmp_info->forw) == old_blk->blkno);
|
|
tmp_info->forw = cpu_to_be32(new_blk->blkno);
|
|
tmp_info->forw = cpu_to_be32(new_blk->blkno);
|
|
- xfs_da_log_buf(args->trans, bp, 0, sizeof(*tmp_info)-1);
|
|
|
|
- xfs_da_buf_done(bp);
|
|
|
|
|
|
+ xfs_trans_log_buf(args->trans, bp, 0, sizeof(*tmp_info)-1);
|
|
}
|
|
}
|
|
old_info->back = cpu_to_be32(new_blk->blkno);
|
|
old_info->back = cpu_to_be32(new_blk->blkno);
|
|
} else {
|
|
} else {
|
|
@@ -1283,18 +1268,17 @@ xfs_da_blk_link(xfs_da_state_t *state, xfs_da_state_blk_t *old_blk,
|
|
if (error)
|
|
if (error)
|
|
return(error);
|
|
return(error);
|
|
ASSERT(bp != NULL);
|
|
ASSERT(bp != NULL);
|
|
- tmp_info = bp->data;
|
|
|
|
|
|
+ tmp_info = bp->b_addr;
|
|
ASSERT(tmp_info->magic == old_info->magic);
|
|
ASSERT(tmp_info->magic == old_info->magic);
|
|
ASSERT(be32_to_cpu(tmp_info->back) == old_blk->blkno);
|
|
ASSERT(be32_to_cpu(tmp_info->back) == old_blk->blkno);
|
|
tmp_info->back = cpu_to_be32(new_blk->blkno);
|
|
tmp_info->back = cpu_to_be32(new_blk->blkno);
|
|
- xfs_da_log_buf(args->trans, bp, 0, sizeof(*tmp_info)-1);
|
|
|
|
- xfs_da_buf_done(bp);
|
|
|
|
|
|
+ xfs_trans_log_buf(args->trans, bp, 0, sizeof(*tmp_info)-1);
|
|
}
|
|
}
|
|
old_info->forw = cpu_to_be32(new_blk->blkno);
|
|
old_info->forw = cpu_to_be32(new_blk->blkno);
|
|
}
|
|
}
|
|
|
|
|
|
- xfs_da_log_buf(args->trans, old_blk->bp, 0, sizeof(*tmp_info) - 1);
|
|
|
|
- xfs_da_log_buf(args->trans, new_blk->bp, 0, sizeof(*tmp_info) - 1);
|
|
|
|
|
|
+ xfs_trans_log_buf(args->trans, old_blk->bp, 0, sizeof(*tmp_info) - 1);
|
|
|
|
+ xfs_trans_log_buf(args->trans, new_blk->bp, 0, sizeof(*tmp_info) - 1);
|
|
return(0);
|
|
return(0);
|
|
}
|
|
}
|
|
|
|
|
|
@@ -1302,12 +1286,14 @@ xfs_da_blk_link(xfs_da_state_t *state, xfs_da_state_blk_t *old_blk,
|
|
* Compare two intermediate nodes for "order".
|
|
* Compare two intermediate nodes for "order".
|
|
*/
|
|
*/
|
|
STATIC int
|
|
STATIC int
|
|
-xfs_da_node_order(xfs_dabuf_t *node1_bp, xfs_dabuf_t *node2_bp)
|
|
|
|
|
|
+xfs_da_node_order(
|
|
|
|
+ struct xfs_buf *node1_bp,
|
|
|
|
+ struct xfs_buf *node2_bp)
|
|
{
|
|
{
|
|
xfs_da_intnode_t *node1, *node2;
|
|
xfs_da_intnode_t *node1, *node2;
|
|
|
|
|
|
- node1 = node1_bp->data;
|
|
|
|
- node2 = node2_bp->data;
|
|
|
|
|
|
+ node1 = node1_bp->b_addr;
|
|
|
|
+ node2 = node2_bp->b_addr;
|
|
ASSERT(node1->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC) &&
|
|
ASSERT(node1->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC) &&
|
|
node2->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC));
|
|
node2->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC));
|
|
if ((be16_to_cpu(node1->hdr.count) > 0) && (be16_to_cpu(node2->hdr.count) > 0) &&
|
|
if ((be16_to_cpu(node1->hdr.count) > 0) && (be16_to_cpu(node2->hdr.count) > 0) &&
|
|
@@ -1324,11 +1310,13 @@ xfs_da_node_order(xfs_dabuf_t *node1_bp, xfs_dabuf_t *node2_bp)
|
|
* Pick up the last hashvalue from an intermediate node.
|
|
* Pick up the last hashvalue from an intermediate node.
|
|
*/
|
|
*/
|
|
STATIC uint
|
|
STATIC uint
|
|
-xfs_da_node_lasthash(xfs_dabuf_t *bp, int *count)
|
|
|
|
|
|
+xfs_da_node_lasthash(
|
|
|
|
+ struct xfs_buf *bp,
|
|
|
|
+ int *count)
|
|
{
|
|
{
|
|
xfs_da_intnode_t *node;
|
|
xfs_da_intnode_t *node;
|
|
|
|
|
|
- node = bp->data;
|
|
|
|
|
|
+ node = bp->b_addr;
|
|
ASSERT(node->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC));
|
|
ASSERT(node->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC));
|
|
if (count)
|
|
if (count)
|
|
*count = be16_to_cpu(node->hdr.count);
|
|
*count = be16_to_cpu(node->hdr.count);
|
|
@@ -1346,7 +1334,7 @@ xfs_da_blk_unlink(xfs_da_state_t *state, xfs_da_state_blk_t *drop_blk,
|
|
{
|
|
{
|
|
xfs_da_blkinfo_t *drop_info, *save_info, *tmp_info;
|
|
xfs_da_blkinfo_t *drop_info, *save_info, *tmp_info;
|
|
xfs_da_args_t *args;
|
|
xfs_da_args_t *args;
|
|
- xfs_dabuf_t *bp;
|
|
|
|
|
|
+ struct xfs_buf *bp;
|
|
int error;
|
|
int error;
|
|
|
|
|
|
/*
|
|
/*
|
|
@@ -1354,8 +1342,8 @@ xfs_da_blk_unlink(xfs_da_state_t *state, xfs_da_state_blk_t *drop_blk,
|
|
*/
|
|
*/
|
|
args = state->args;
|
|
args = state->args;
|
|
ASSERT(args != NULL);
|
|
ASSERT(args != NULL);
|
|
- save_info = save_blk->bp->data;
|
|
|
|
- drop_info = drop_blk->bp->data;
|
|
|
|
|
|
+ save_info = save_blk->bp->b_addr;
|
|
|
|
+ drop_info = drop_blk->bp->b_addr;
|
|
ASSERT(save_blk->magic == XFS_DA_NODE_MAGIC ||
|
|
ASSERT(save_blk->magic == XFS_DA_NODE_MAGIC ||
|
|
save_blk->magic == XFS_DIR2_LEAFN_MAGIC ||
|
|
save_blk->magic == XFS_DIR2_LEAFN_MAGIC ||
|
|
save_blk->magic == XFS_ATTR_LEAF_MAGIC);
|
|
save_blk->magic == XFS_ATTR_LEAF_MAGIC);
|
|
@@ -1380,13 +1368,12 @@ xfs_da_blk_unlink(xfs_da_state_t *state, xfs_da_state_blk_t *drop_blk,
|
|
if (error)
|
|
if (error)
|
|
return(error);
|
|
return(error);
|
|
ASSERT(bp != NULL);
|
|
ASSERT(bp != NULL);
|
|
- tmp_info = bp->data;
|
|
|
|
|
|
+ tmp_info = bp->b_addr;
|
|
ASSERT(tmp_info->magic == save_info->magic);
|
|
ASSERT(tmp_info->magic == save_info->magic);
|
|
ASSERT(be32_to_cpu(tmp_info->forw) == drop_blk->blkno);
|
|
ASSERT(be32_to_cpu(tmp_info->forw) == drop_blk->blkno);
|
|
tmp_info->forw = cpu_to_be32(save_blk->blkno);
|
|
tmp_info->forw = cpu_to_be32(save_blk->blkno);
|
|
- xfs_da_log_buf(args->trans, bp, 0,
|
|
|
|
|
|
+ xfs_trans_log_buf(args->trans, bp, 0,
|
|
sizeof(*tmp_info) - 1);
|
|
sizeof(*tmp_info) - 1);
|
|
- xfs_da_buf_done(bp);
|
|
|
|
}
|
|
}
|
|
} else {
|
|
} else {
|
|
trace_xfs_da_unlink_forward(args);
|
|
trace_xfs_da_unlink_forward(args);
|
|
@@ -1398,17 +1385,16 @@ xfs_da_blk_unlink(xfs_da_state_t *state, xfs_da_state_blk_t *drop_blk,
|
|
if (error)
|
|
if (error)
|
|
return(error);
|
|
return(error);
|
|
ASSERT(bp != NULL);
|
|
ASSERT(bp != NULL);
|
|
- tmp_info = bp->data;
|
|
|
|
|
|
+ tmp_info = bp->b_addr;
|
|
ASSERT(tmp_info->magic == save_info->magic);
|
|
ASSERT(tmp_info->magic == save_info->magic);
|
|
ASSERT(be32_to_cpu(tmp_info->back) == drop_blk->blkno);
|
|
ASSERT(be32_to_cpu(tmp_info->back) == drop_blk->blkno);
|
|
tmp_info->back = cpu_to_be32(save_blk->blkno);
|
|
tmp_info->back = cpu_to_be32(save_blk->blkno);
|
|
- xfs_da_log_buf(args->trans, bp, 0,
|
|
|
|
|
|
+ xfs_trans_log_buf(args->trans, bp, 0,
|
|
sizeof(*tmp_info) - 1);
|
|
sizeof(*tmp_info) - 1);
|
|
- xfs_da_buf_done(bp);
|
|
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
- xfs_da_log_buf(args->trans, save_blk->bp, 0, sizeof(*save_info) - 1);
|
|
|
|
|
|
+ xfs_trans_log_buf(args->trans, save_blk->bp, 0, sizeof(*save_info) - 1);
|
|
return(0);
|
|
return(0);
|
|
}
|
|
}
|
|
|
|
|
|
@@ -1443,7 +1429,7 @@ xfs_da_path_shift(xfs_da_state_t *state, xfs_da_state_path_t *path,
|
|
level = (path->active-1) - 1; /* skip bottom layer in path */
|
|
level = (path->active-1) - 1; /* skip bottom layer in path */
|
|
for (blk = &path->blk[level]; level >= 0; blk--, level--) {
|
|
for (blk = &path->blk[level]; level >= 0; blk--, level--) {
|
|
ASSERT(blk->bp != NULL);
|
|
ASSERT(blk->bp != NULL);
|
|
- node = blk->bp->data;
|
|
|
|
|
|
+ node = blk->bp->b_addr;
|
|
ASSERT(node->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC));
|
|
ASSERT(node->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC));
|
|
if (forward && (blk->index < be16_to_cpu(node->hdr.count)-1)) {
|
|
if (forward && (blk->index < be16_to_cpu(node->hdr.count)-1)) {
|
|
blk->index++;
|
|
blk->index++;
|
|
@@ -1471,7 +1457,7 @@ xfs_da_path_shift(xfs_da_state_t *state, xfs_da_state_path_t *path,
|
|
* (if it's dirty, trans won't actually let go)
|
|
* (if it's dirty, trans won't actually let go)
|
|
*/
|
|
*/
|
|
if (release)
|
|
if (release)
|
|
- xfs_da_brelse(args->trans, blk->bp);
|
|
|
|
|
|
+ xfs_trans_brelse(args->trans, blk->bp);
|
|
|
|
|
|
/*
|
|
/*
|
|
* Read the next child block.
|
|
* Read the next child block.
|
|
@@ -1482,7 +1468,7 @@ xfs_da_path_shift(xfs_da_state_t *state, xfs_da_state_path_t *path,
|
|
if (error)
|
|
if (error)
|
|
return(error);
|
|
return(error);
|
|
ASSERT(blk->bp != NULL);
|
|
ASSERT(blk->bp != NULL);
|
|
- info = blk->bp->data;
|
|
|
|
|
|
+ info = blk->bp->b_addr;
|
|
ASSERT(info->magic == cpu_to_be16(XFS_DA_NODE_MAGIC) ||
|
|
ASSERT(info->magic == cpu_to_be16(XFS_DA_NODE_MAGIC) ||
|
|
info->magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC) ||
|
|
info->magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC) ||
|
|
info->magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC));
|
|
info->magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC));
|
|
@@ -1702,11 +1688,13 @@ xfs_da_grow_inode(
|
|
* a bmap btree split to do that.
|
|
* a bmap btree split to do that.
|
|
*/
|
|
*/
|
|
STATIC int
|
|
STATIC int
|
|
-xfs_da_swap_lastblock(xfs_da_args_t *args, xfs_dablk_t *dead_blknop,
|
|
|
|
- xfs_dabuf_t **dead_bufp)
|
|
|
|
|
|
+xfs_da_swap_lastblock(
|
|
|
|
+ xfs_da_args_t *args,
|
|
|
|
+ xfs_dablk_t *dead_blknop,
|
|
|
|
+ struct xfs_buf **dead_bufp)
|
|
{
|
|
{
|
|
xfs_dablk_t dead_blkno, last_blkno, sib_blkno, par_blkno;
|
|
xfs_dablk_t dead_blkno, last_blkno, sib_blkno, par_blkno;
|
|
- xfs_dabuf_t *dead_buf, *last_buf, *sib_buf, *par_buf;
|
|
|
|
|
|
+ struct xfs_buf *dead_buf, *last_buf, *sib_buf, *par_buf;
|
|
xfs_fileoff_t lastoff;
|
|
xfs_fileoff_t lastoff;
|
|
xfs_inode_t *ip;
|
|
xfs_inode_t *ip;
|
|
xfs_trans_t *tp;
|
|
xfs_trans_t *tp;
|
|
@@ -1744,9 +1732,9 @@ xfs_da_swap_lastblock(xfs_da_args_t *args, xfs_dablk_t *dead_blknop,
|
|
/*
|
|
/*
|
|
* Copy the last block into the dead buffer and log it.
|
|
* Copy the last block into the dead buffer and log it.
|
|
*/
|
|
*/
|
|
- memcpy(dead_buf->data, last_buf->data, mp->m_dirblksize);
|
|
|
|
- xfs_da_log_buf(tp, dead_buf, 0, mp->m_dirblksize - 1);
|
|
|
|
- dead_info = dead_buf->data;
|
|
|
|
|
|
+ memcpy(dead_buf->b_addr, last_buf->b_addr, mp->m_dirblksize);
|
|
|
|
+ xfs_trans_log_buf(tp, dead_buf, 0, mp->m_dirblksize - 1);
|
|
|
|
+ dead_info = dead_buf->b_addr;
|
|
/*
|
|
/*
|
|
* Get values from the moved block.
|
|
* Get values from the moved block.
|
|
*/
|
|
*/
|
|
@@ -1767,7 +1755,7 @@ xfs_da_swap_lastblock(xfs_da_args_t *args, xfs_dablk_t *dead_blknop,
|
|
if ((sib_blkno = be32_to_cpu(dead_info->back))) {
|
|
if ((sib_blkno = be32_to_cpu(dead_info->back))) {
|
|
if ((error = xfs_da_read_buf(tp, ip, sib_blkno, -1, &sib_buf, w)))
|
|
if ((error = xfs_da_read_buf(tp, ip, sib_blkno, -1, &sib_buf, w)))
|
|
goto done;
|
|
goto done;
|
|
- sib_info = sib_buf->data;
|
|
|
|
|
|
+ sib_info = sib_buf->b_addr;
|
|
if (unlikely(
|
|
if (unlikely(
|
|
be32_to_cpu(sib_info->forw) != last_blkno ||
|
|
be32_to_cpu(sib_info->forw) != last_blkno ||
|
|
sib_info->magic != dead_info->magic)) {
|
|
sib_info->magic != dead_info->magic)) {
|
|
@@ -1777,10 +1765,9 @@ xfs_da_swap_lastblock(xfs_da_args_t *args, xfs_dablk_t *dead_blknop,
|
|
goto done;
|
|
goto done;
|
|
}
|
|
}
|
|
sib_info->forw = cpu_to_be32(dead_blkno);
|
|
sib_info->forw = cpu_to_be32(dead_blkno);
|
|
- xfs_da_log_buf(tp, sib_buf,
|
|
|
|
|
|
+ xfs_trans_log_buf(tp, sib_buf,
|
|
XFS_DA_LOGRANGE(sib_info, &sib_info->forw,
|
|
XFS_DA_LOGRANGE(sib_info, &sib_info->forw,
|
|
sizeof(sib_info->forw)));
|
|
sizeof(sib_info->forw)));
|
|
- xfs_da_buf_done(sib_buf);
|
|
|
|
sib_buf = NULL;
|
|
sib_buf = NULL;
|
|
}
|
|
}
|
|
/*
|
|
/*
|
|
@@ -1789,7 +1776,7 @@ xfs_da_swap_lastblock(xfs_da_args_t *args, xfs_dablk_t *dead_blknop,
|
|
if ((sib_blkno = be32_to_cpu(dead_info->forw))) {
|
|
if ((sib_blkno = be32_to_cpu(dead_info->forw))) {
|
|
if ((error = xfs_da_read_buf(tp, ip, sib_blkno, -1, &sib_buf, w)))
|
|
if ((error = xfs_da_read_buf(tp, ip, sib_blkno, -1, &sib_buf, w)))
|
|
goto done;
|
|
goto done;
|
|
- sib_info = sib_buf->data;
|
|
|
|
|
|
+ sib_info = sib_buf->b_addr;
|
|
if (unlikely(
|
|
if (unlikely(
|
|
be32_to_cpu(sib_info->back) != last_blkno ||
|
|
be32_to_cpu(sib_info->back) != last_blkno ||
|
|
sib_info->magic != dead_info->magic)) {
|
|
sib_info->magic != dead_info->magic)) {
|
|
@@ -1799,10 +1786,9 @@ xfs_da_swap_lastblock(xfs_da_args_t *args, xfs_dablk_t *dead_blknop,
|
|
goto done;
|
|
goto done;
|
|
}
|
|
}
|
|
sib_info->back = cpu_to_be32(dead_blkno);
|
|
sib_info->back = cpu_to_be32(dead_blkno);
|
|
- xfs_da_log_buf(tp, sib_buf,
|
|
|
|
|
|
+ xfs_trans_log_buf(tp, sib_buf,
|
|
XFS_DA_LOGRANGE(sib_info, &sib_info->back,
|
|
XFS_DA_LOGRANGE(sib_info, &sib_info->back,
|
|
sizeof(sib_info->back)));
|
|
sizeof(sib_info->back)));
|
|
- xfs_da_buf_done(sib_buf);
|
|
|
|
sib_buf = NULL;
|
|
sib_buf = NULL;
|
|
}
|
|
}
|
|
par_blkno = mp->m_dirleafblk;
|
|
par_blkno = mp->m_dirleafblk;
|
|
@@ -1813,7 +1799,7 @@ xfs_da_swap_lastblock(xfs_da_args_t *args, xfs_dablk_t *dead_blknop,
|
|
for (;;) {
|
|
for (;;) {
|
|
if ((error = xfs_da_read_buf(tp, ip, par_blkno, -1, &par_buf, w)))
|
|
if ((error = xfs_da_read_buf(tp, ip, par_blkno, -1, &par_buf, w)))
|
|
goto done;
|
|
goto done;
|
|
- par_node = par_buf->data;
|
|
|
|
|
|
+ par_node = par_buf->b_addr;
|
|
if (unlikely(par_node->hdr.info.magic !=
|
|
if (unlikely(par_node->hdr.info.magic !=
|
|
cpu_to_be16(XFS_DA_NODE_MAGIC) ||
|
|
cpu_to_be16(XFS_DA_NODE_MAGIC) ||
|
|
(level >= 0 && level != be16_to_cpu(par_node->hdr.level) + 1))) {
|
|
(level >= 0 && level != be16_to_cpu(par_node->hdr.level) + 1))) {
|
|
@@ -1837,7 +1823,7 @@ xfs_da_swap_lastblock(xfs_da_args_t *args, xfs_dablk_t *dead_blknop,
|
|
par_blkno = be32_to_cpu(par_node->btree[entno].before);
|
|
par_blkno = be32_to_cpu(par_node->btree[entno].before);
|
|
if (level == dead_level + 1)
|
|
if (level == dead_level + 1)
|
|
break;
|
|
break;
|
|
- xfs_da_brelse(tp, par_buf);
|
|
|
|
|
|
+ xfs_trans_brelse(tp, par_buf);
|
|
par_buf = NULL;
|
|
par_buf = NULL;
|
|
}
|
|
}
|
|
/*
|
|
/*
|
|
@@ -1853,7 +1839,7 @@ xfs_da_swap_lastblock(xfs_da_args_t *args, xfs_dablk_t *dead_blknop,
|
|
if (entno < be16_to_cpu(par_node->hdr.count))
|
|
if (entno < be16_to_cpu(par_node->hdr.count))
|
|
break;
|
|
break;
|
|
par_blkno = be32_to_cpu(par_node->hdr.info.forw);
|
|
par_blkno = be32_to_cpu(par_node->hdr.info.forw);
|
|
- xfs_da_brelse(tp, par_buf);
|
|
|
|
|
|
+ xfs_trans_brelse(tp, par_buf);
|
|
par_buf = NULL;
|
|
par_buf = NULL;
|
|
if (unlikely(par_blkno == 0)) {
|
|
if (unlikely(par_blkno == 0)) {
|
|
XFS_ERROR_REPORT("xfs_da_swap_lastblock(6)",
|
|
XFS_ERROR_REPORT("xfs_da_swap_lastblock(6)",
|
|
@@ -1863,7 +1849,7 @@ xfs_da_swap_lastblock(xfs_da_args_t *args, xfs_dablk_t *dead_blknop,
|
|
}
|
|
}
|
|
if ((error = xfs_da_read_buf(tp, ip, par_blkno, -1, &par_buf, w)))
|
|
if ((error = xfs_da_read_buf(tp, ip, par_blkno, -1, &par_buf, w)))
|
|
goto done;
|
|
goto done;
|
|
- par_node = par_buf->data;
|
|
|
|
|
|
+ par_node = par_buf->b_addr;
|
|
if (unlikely(
|
|
if (unlikely(
|
|
be16_to_cpu(par_node->hdr.level) != level ||
|
|
be16_to_cpu(par_node->hdr.level) != level ||
|
|
par_node->hdr.info.magic != cpu_to_be16(XFS_DA_NODE_MAGIC))) {
|
|
par_node->hdr.info.magic != cpu_to_be16(XFS_DA_NODE_MAGIC))) {
|
|
@@ -1878,20 +1864,18 @@ xfs_da_swap_lastblock(xfs_da_args_t *args, xfs_dablk_t *dead_blknop,
|
|
* Update the parent entry pointing to the moved block.
|
|
* Update the parent entry pointing to the moved block.
|
|
*/
|
|
*/
|
|
par_node->btree[entno].before = cpu_to_be32(dead_blkno);
|
|
par_node->btree[entno].before = cpu_to_be32(dead_blkno);
|
|
- xfs_da_log_buf(tp, par_buf,
|
|
|
|
|
|
+ xfs_trans_log_buf(tp, par_buf,
|
|
XFS_DA_LOGRANGE(par_node, &par_node->btree[entno].before,
|
|
XFS_DA_LOGRANGE(par_node, &par_node->btree[entno].before,
|
|
sizeof(par_node->btree[entno].before)));
|
|
sizeof(par_node->btree[entno].before)));
|
|
- xfs_da_buf_done(par_buf);
|
|
|
|
- xfs_da_buf_done(dead_buf);
|
|
|
|
*dead_blknop = last_blkno;
|
|
*dead_blknop = last_blkno;
|
|
*dead_bufp = last_buf;
|
|
*dead_bufp = last_buf;
|
|
return 0;
|
|
return 0;
|
|
done:
|
|
done:
|
|
if (par_buf)
|
|
if (par_buf)
|
|
- xfs_da_brelse(tp, par_buf);
|
|
|
|
|
|
+ xfs_trans_brelse(tp, par_buf);
|
|
if (sib_buf)
|
|
if (sib_buf)
|
|
- xfs_da_brelse(tp, sib_buf);
|
|
|
|
- xfs_da_brelse(tp, last_buf);
|
|
|
|
|
|
+ xfs_trans_brelse(tp, sib_buf);
|
|
|
|
+ xfs_trans_brelse(tp, last_buf);
|
|
return error;
|
|
return error;
|
|
}
|
|
}
|
|
|
|
|
|
@@ -1899,8 +1883,10 @@ done:
|
|
* Remove a btree block from a directory or attribute.
|
|
* Remove a btree block from a directory or attribute.
|
|
*/
|
|
*/
|
|
int
|
|
int
|
|
-xfs_da_shrink_inode(xfs_da_args_t *args, xfs_dablk_t dead_blkno,
|
|
|
|
- xfs_dabuf_t *dead_buf)
|
|
|
|
|
|
+xfs_da_shrink_inode(
|
|
|
|
+ xfs_da_args_t *args,
|
|
|
|
+ xfs_dablk_t dead_blkno,
|
|
|
|
+ struct xfs_buf *dead_buf)
|
|
{
|
|
{
|
|
xfs_inode_t *dp;
|
|
xfs_inode_t *dp;
|
|
int done, error, w, count;
|
|
int done, error, w, count;
|
|
@@ -1935,7 +1921,7 @@ xfs_da_shrink_inode(xfs_da_args_t *args, xfs_dablk_t dead_blkno,
|
|
break;
|
|
break;
|
|
}
|
|
}
|
|
}
|
|
}
|
|
- xfs_da_binval(tp, dead_buf);
|
|
|
|
|
|
+ xfs_trans_binval(tp, dead_buf);
|
|
return error;
|
|
return error;
|
|
}
|
|
}
|
|
|
|
|
|
@@ -1967,35 +1953,75 @@ xfs_da_map_covers_blocks(
|
|
}
|
|
}
|
|
|
|
|
|
/*
|
|
/*
|
|
- * Make a dabuf.
|
|
|
|
- * Used for get_buf, read_buf, read_bufr, and reada_buf.
|
|
|
|
|
|
+ * Convert a struct xfs_bmbt_irec to a struct xfs_buf_map.
|
|
|
|
+ *
|
|
|
|
+ * For the single map case, it is assumed that the caller has provided a pointer
|
|
|
|
+ * to a valid xfs_buf_map. For the multiple map case, this function will
|
|
|
|
+ * allocate the xfs_buf_map to hold all the maps and replace the caller's single
|
|
|
|
+ * map pointer with the allocated map.
|
|
*/
|
|
*/
|
|
-STATIC int
|
|
|
|
-xfs_da_do_buf(
|
|
|
|
- xfs_trans_t *trans,
|
|
|
|
- xfs_inode_t *dp,
|
|
|
|
- xfs_dablk_t bno,
|
|
|
|
- xfs_daddr_t *mappedbnop,
|
|
|
|
- xfs_dabuf_t **bpp,
|
|
|
|
- int whichfork,
|
|
|
|
- int caller)
|
|
|
|
|
|
+static int
|
|
|
|
+xfs_buf_map_from_irec(
|
|
|
|
+ struct xfs_mount *mp,
|
|
|
|
+ struct xfs_buf_map **mapp,
|
|
|
|
+ unsigned int *nmaps,
|
|
|
|
+ struct xfs_bmbt_irec *irecs,
|
|
|
|
+ unsigned int nirecs)
|
|
{
|
|
{
|
|
- xfs_buf_t *bp = NULL;
|
|
|
|
- xfs_buf_t **bplist;
|
|
|
|
- int error=0;
|
|
|
|
- int i;
|
|
|
|
- xfs_bmbt_irec_t map;
|
|
|
|
- xfs_bmbt_irec_t *mapp;
|
|
|
|
- xfs_daddr_t mappedbno;
|
|
|
|
- xfs_mount_t *mp;
|
|
|
|
- int nbplist=0;
|
|
|
|
- int nfsb;
|
|
|
|
- int nmap;
|
|
|
|
- xfs_dabuf_t *rbp;
|
|
|
|
|
|
+ struct xfs_buf_map *map;
|
|
|
|
+ int i;
|
|
|
|
+
|
|
|
|
+ ASSERT(*nmaps == 1);
|
|
|
|
+ ASSERT(nirecs >= 1);
|
|
|
|
+
|
|
|
|
+ if (nirecs > 1) {
|
|
|
|
+ map = kmem_zalloc(nirecs * sizeof(struct xfs_buf_map), KM_SLEEP);
|
|
|
|
+ if (!map)
|
|
|
|
+ return ENOMEM;
|
|
|
|
+ *mapp = map;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ *nmaps = nirecs;
|
|
|
|
+ map = *mapp;
|
|
|
|
+ for (i = 0; i < *nmaps; i++) {
|
|
|
|
+ ASSERT(irecs[i].br_startblock != DELAYSTARTBLOCK &&
|
|
|
|
+ irecs[i].br_startblock != HOLESTARTBLOCK);
|
|
|
|
+ map[i].bm_bn = XFS_FSB_TO_DADDR(mp, irecs[i].br_startblock);
|
|
|
|
+ map[i].bm_len = XFS_FSB_TO_BB(mp, irecs[i].br_blockcount);
|
|
|
|
+ }
|
|
|
|
+ return 0;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+/*
|
|
|
|
+ * Map the block we are given ready for reading. There are three possible return
|
|
|
|
+ * values:
|
|
|
|
+ * -1 - will be returned if we land in a hole and mappedbno == -2 so the
|
|
|
|
+ * caller knows not to execute a subsequent read.
|
|
|
|
+ * 0 - if we mapped the block successfully
|
|
|
|
+ * >0 - positive error number if there was an error.
|
|
|
|
+ */
|
|
|
|
+static int
|
|
|
|
+xfs_dabuf_map(
|
|
|
|
+ struct xfs_trans *trans,
|
|
|
|
+ struct xfs_inode *dp,
|
|
|
|
+ xfs_dablk_t bno,
|
|
|
|
+ xfs_daddr_t mappedbno,
|
|
|
|
+ int whichfork,
|
|
|
|
+ struct xfs_buf_map **map,
|
|
|
|
+ int *nmaps)
|
|
|
|
+{
|
|
|
|
+ struct xfs_mount *mp = dp->i_mount;
|
|
|
|
+ int nfsb;
|
|
|
|
+ int error = 0;
|
|
|
|
+ struct xfs_bmbt_irec irec;
|
|
|
|
+ struct xfs_bmbt_irec *irecs = &irec;
|
|
|
|
+ int nirecs;
|
|
|
|
+
|
|
|
|
+ ASSERT(map && *map);
|
|
|
|
+ ASSERT(*nmaps == 1);
|
|
|
|
|
|
- mp = dp->i_mount;
|
|
|
|
nfsb = (whichfork == XFS_DATA_FORK) ? mp->m_dirblkfsbs : 1;
|
|
nfsb = (whichfork == XFS_DATA_FORK) ? mp->m_dirblkfsbs : 1;
|
|
- mappedbno = *mappedbnop;
|
|
|
|
|
|
+
|
|
/*
|
|
/*
|
|
* Caller doesn't have a mapping. -2 means don't complain
|
|
* Caller doesn't have a mapping. -2 means don't complain
|
|
* if we land in a hole.
|
|
* if we land in a hole.
|
|
@@ -2004,112 +2030,150 @@ xfs_da_do_buf(
|
|
/*
|
|
/*
|
|
* Optimize the one-block case.
|
|
* Optimize the one-block case.
|
|
*/
|
|
*/
|
|
- if (nfsb == 1)
|
|
|
|
- mapp = ↦
|
|
|
|
- else
|
|
|
|
- mapp = kmem_alloc(sizeof(*mapp) * nfsb, KM_SLEEP);
|
|
|
|
|
|
+ if (nfsb != 1)
|
|
|
|
+ irecs = kmem_zalloc(sizeof(irec) * nfsb, KM_SLEEP);
|
|
|
|
|
|
- nmap = nfsb;
|
|
|
|
- error = xfs_bmapi_read(dp, (xfs_fileoff_t)bno, nfsb, mapp,
|
|
|
|
- &nmap, xfs_bmapi_aflag(whichfork));
|
|
|
|
|
|
+ nirecs = nfsb;
|
|
|
|
+ error = xfs_bmapi_read(dp, (xfs_fileoff_t)bno, nfsb, irecs,
|
|
|
|
+ &nirecs, xfs_bmapi_aflag(whichfork));
|
|
if (error)
|
|
if (error)
|
|
- goto exit0;
|
|
|
|
|
|
+ goto out;
|
|
} else {
|
|
} else {
|
|
- map.br_startblock = XFS_DADDR_TO_FSB(mp, mappedbno);
|
|
|
|
- map.br_startoff = (xfs_fileoff_t)bno;
|
|
|
|
- map.br_blockcount = nfsb;
|
|
|
|
- mapp = ↦
|
|
|
|
- nmap = 1;
|
|
|
|
|
|
+ irecs->br_startblock = XFS_DADDR_TO_FSB(mp, mappedbno);
|
|
|
|
+ irecs->br_startoff = (xfs_fileoff_t)bno;
|
|
|
|
+ irecs->br_blockcount = nfsb;
|
|
|
|
+ irecs->br_state = 0;
|
|
|
|
+ nirecs = 1;
|
|
}
|
|
}
|
|
- if (!xfs_da_map_covers_blocks(nmap, mapp, bno, nfsb)) {
|
|
|
|
- error = mappedbno == -2 ? 0 : XFS_ERROR(EFSCORRUPTED);
|
|
|
|
|
|
+
|
|
|
|
+ if (!xfs_da_map_covers_blocks(nirecs, irecs, bno, nfsb)) {
|
|
|
|
+ error = mappedbno == -2 ? -1 : XFS_ERROR(EFSCORRUPTED);
|
|
if (unlikely(error == EFSCORRUPTED)) {
|
|
if (unlikely(error == EFSCORRUPTED)) {
|
|
if (xfs_error_level >= XFS_ERRLEVEL_LOW) {
|
|
if (xfs_error_level >= XFS_ERRLEVEL_LOW) {
|
|
|
|
+ int i;
|
|
xfs_alert(mp, "%s: bno %lld dir: inode %lld",
|
|
xfs_alert(mp, "%s: bno %lld dir: inode %lld",
|
|
__func__, (long long)bno,
|
|
__func__, (long long)bno,
|
|
(long long)dp->i_ino);
|
|
(long long)dp->i_ino);
|
|
- for (i = 0; i < nmap; i++) {
|
|
|
|
|
|
+ for (i = 0; i < *nmaps; i++) {
|
|
xfs_alert(mp,
|
|
xfs_alert(mp,
|
|
"[%02d] br_startoff %lld br_startblock %lld br_blockcount %lld br_state %d",
|
|
"[%02d] br_startoff %lld br_startblock %lld br_blockcount %lld br_state %d",
|
|
i,
|
|
i,
|
|
- (long long)mapp[i].br_startoff,
|
|
|
|
- (long long)mapp[i].br_startblock,
|
|
|
|
- (long long)mapp[i].br_blockcount,
|
|
|
|
- mapp[i].br_state);
|
|
|
|
|
|
+ (long long)irecs[i].br_startoff,
|
|
|
|
+ (long long)irecs[i].br_startblock,
|
|
|
|
+ (long long)irecs[i].br_blockcount,
|
|
|
|
+ irecs[i].br_state);
|
|
}
|
|
}
|
|
}
|
|
}
|
|
XFS_ERROR_REPORT("xfs_da_do_buf(1)",
|
|
XFS_ERROR_REPORT("xfs_da_do_buf(1)",
|
|
XFS_ERRLEVEL_LOW, mp);
|
|
XFS_ERRLEVEL_LOW, mp);
|
|
}
|
|
}
|
|
- goto exit0;
|
|
|
|
|
|
+ goto out;
|
|
}
|
|
}
|
|
- if (caller != 3 && nmap > 1) {
|
|
|
|
- bplist = kmem_alloc(sizeof(*bplist) * nmap, KM_SLEEP);
|
|
|
|
- nbplist = 0;
|
|
|
|
- } else
|
|
|
|
- bplist = NULL;
|
|
|
|
- /*
|
|
|
|
- * Turn the mapping(s) into buffer(s).
|
|
|
|
- */
|
|
|
|
- for (i = 0; i < nmap; i++) {
|
|
|
|
- int nmapped;
|
|
|
|
-
|
|
|
|
- mappedbno = XFS_FSB_TO_DADDR(mp, mapp[i].br_startblock);
|
|
|
|
- if (i == 0)
|
|
|
|
- *mappedbnop = mappedbno;
|
|
|
|
- nmapped = (int)XFS_FSB_TO_BB(mp, mapp[i].br_blockcount);
|
|
|
|
- switch (caller) {
|
|
|
|
- case 0:
|
|
|
|
- bp = xfs_trans_get_buf(trans, mp->m_ddev_targp,
|
|
|
|
- mappedbno, nmapped, 0);
|
|
|
|
- error = bp ? bp->b_error : XFS_ERROR(EIO);
|
|
|
|
- break;
|
|
|
|
- case 1:
|
|
|
|
- case 2:
|
|
|
|
- bp = NULL;
|
|
|
|
- error = xfs_trans_read_buf(mp, trans, mp->m_ddev_targp,
|
|
|
|
- mappedbno, nmapped, 0, &bp);
|
|
|
|
- break;
|
|
|
|
- case 3:
|
|
|
|
- xfs_buf_readahead(mp->m_ddev_targp, mappedbno, nmapped);
|
|
|
|
|
|
+ error = xfs_buf_map_from_irec(mp, map, nmaps, irecs, nirecs);
|
|
|
|
+out:
|
|
|
|
+ if (irecs != &irec)
|
|
|
|
+ kmem_free(irecs);
|
|
|
|
+ return error;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+/*
|
|
|
|
+ * Get a buffer for the dir/attr block.
|
|
|
|
+ */
|
|
|
|
+int
|
|
|
|
+xfs_da_get_buf(
|
|
|
|
+ struct xfs_trans *trans,
|
|
|
|
+ struct xfs_inode *dp,
|
|
|
|
+ xfs_dablk_t bno,
|
|
|
|
+ xfs_daddr_t mappedbno,
|
|
|
|
+ struct xfs_buf **bpp,
|
|
|
|
+ int whichfork)
|
|
|
|
+{
|
|
|
|
+ struct xfs_buf *bp;
|
|
|
|
+ struct xfs_buf_map map;
|
|
|
|
+ struct xfs_buf_map *mapp;
|
|
|
|
+ int nmap;
|
|
|
|
+ int error;
|
|
|
|
+
|
|
|
|
+ *bpp = NULL;
|
|
|
|
+ mapp = ↦
|
|
|
|
+ nmap = 1;
|
|
|
|
+ error = xfs_dabuf_map(trans, dp, bno, mappedbno, whichfork,
|
|
|
|
+ &mapp, &nmap);
|
|
|
|
+ if (error) {
|
|
|
|
+ /* mapping a hole is not an error, but we don't continue */
|
|
|
|
+ if (error == -1)
|
|
error = 0;
|
|
error = 0;
|
|
- bp = NULL;
|
|
|
|
- break;
|
|
|
|
- }
|
|
|
|
- if (error) {
|
|
|
|
- if (bp)
|
|
|
|
- xfs_trans_brelse(trans, bp);
|
|
|
|
- goto exit1;
|
|
|
|
- }
|
|
|
|
- if (!bp)
|
|
|
|
- continue;
|
|
|
|
- if (caller == 1) {
|
|
|
|
- if (whichfork == XFS_ATTR_FORK)
|
|
|
|
- xfs_buf_set_ref(bp, XFS_ATTR_BTREE_REF);
|
|
|
|
- else
|
|
|
|
- xfs_buf_set_ref(bp, XFS_DIR_BTREE_REF);
|
|
|
|
- }
|
|
|
|
- if (bplist) {
|
|
|
|
- bplist[nbplist++] = bp;
|
|
|
|
- }
|
|
|
|
|
|
+ goto out_free;
|
|
}
|
|
}
|
|
- /*
|
|
|
|
- * Build a dabuf structure.
|
|
|
|
- */
|
|
|
|
- if (bplist) {
|
|
|
|
- rbp = xfs_da_buf_make(nbplist, bplist);
|
|
|
|
- } else if (bp)
|
|
|
|
- rbp = xfs_da_buf_make(1, &bp);
|
|
|
|
|
|
+
|
|
|
|
+ bp = xfs_trans_get_buf_map(trans, dp->i_mount->m_ddev_targp,
|
|
|
|
+ mapp, nmap, 0);
|
|
|
|
+ error = bp ? bp->b_error : XFS_ERROR(EIO);
|
|
|
|
+ if (error) {
|
|
|
|
+ xfs_trans_brelse(trans, bp);
|
|
|
|
+ goto out_free;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ *bpp = bp;
|
|
|
|
+
|
|
|
|
+out_free:
|
|
|
|
+ if (mapp != &map)
|
|
|
|
+ kmem_free(mapp);
|
|
|
|
+
|
|
|
|
+ return error;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+/*
|
|
|
|
+ * Get a buffer for the dir/attr block, fill in the contents.
|
|
|
|
+ */
|
|
|
|
+int
|
|
|
|
+xfs_da_read_buf(
|
|
|
|
+ struct xfs_trans *trans,
|
|
|
|
+ struct xfs_inode *dp,
|
|
|
|
+ xfs_dablk_t bno,
|
|
|
|
+ xfs_daddr_t mappedbno,
|
|
|
|
+ struct xfs_buf **bpp,
|
|
|
|
+ int whichfork)
|
|
|
|
+{
|
|
|
|
+ struct xfs_buf *bp;
|
|
|
|
+ struct xfs_buf_map map;
|
|
|
|
+ struct xfs_buf_map *mapp;
|
|
|
|
+ int nmap;
|
|
|
|
+ int error;
|
|
|
|
+
|
|
|
|
+ *bpp = NULL;
|
|
|
|
+ mapp = ↦
|
|
|
|
+ nmap = 1;
|
|
|
|
+ error = xfs_dabuf_map(trans, dp, bno, mappedbno, whichfork,
|
|
|
|
+ &mapp, &nmap);
|
|
|
|
+ if (error) {
|
|
|
|
+ /* mapping a hole is not an error, but we don't continue */
|
|
|
|
+ if (error == -1)
|
|
|
|
+ error = 0;
|
|
|
|
+ goto out_free;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ error = xfs_trans_read_buf_map(dp->i_mount, trans,
|
|
|
|
+ dp->i_mount->m_ddev_targp,
|
|
|
|
+ mapp, nmap, 0, &bp);
|
|
|
|
+ if (error)
|
|
|
|
+ goto out_free;
|
|
|
|
+
|
|
|
|
+ if (whichfork == XFS_ATTR_FORK)
|
|
|
|
+ xfs_buf_set_ref(bp, XFS_ATTR_BTREE_REF);
|
|
else
|
|
else
|
|
- rbp = NULL;
|
|
|
|
|
|
+ xfs_buf_set_ref(bp, XFS_DIR_BTREE_REF);
|
|
|
|
+
|
|
/*
|
|
/*
|
|
- * For read_buf, check the magic number.
|
|
|
|
|
|
+ * This verification code will be moved to a CRC verification callback
|
|
|
|
+ * function so just leave it here unchanged until then.
|
|
*/
|
|
*/
|
|
- if (caller == 1) {
|
|
|
|
- xfs_dir2_data_hdr_t *hdr = rbp->data;
|
|
|
|
- xfs_dir2_free_t *free = rbp->data;
|
|
|
|
- xfs_da_blkinfo_t *info = rbp->data;
|
|
|
|
|
|
+ {
|
|
|
|
+ xfs_dir2_data_hdr_t *hdr = bp->b_addr;
|
|
|
|
+ xfs_dir2_free_t *free = bp->b_addr;
|
|
|
|
+ xfs_da_blkinfo_t *info = bp->b_addr;
|
|
uint magic, magic1;
|
|
uint magic, magic1;
|
|
|
|
+ struct xfs_mount *mp = dp->i_mount;
|
|
|
|
|
|
magic = be16_to_cpu(info->magic);
|
|
magic = be16_to_cpu(info->magic);
|
|
magic1 = be32_to_cpu(hdr->magic);
|
|
magic1 = be32_to_cpu(hdr->magic);
|
|
@@ -2123,66 +2187,20 @@ xfs_da_do_buf(
|
|
(free->hdr.magic != cpu_to_be32(XFS_DIR2_FREE_MAGIC)),
|
|
(free->hdr.magic != cpu_to_be32(XFS_DIR2_FREE_MAGIC)),
|
|
mp, XFS_ERRTAG_DA_READ_BUF,
|
|
mp, XFS_ERRTAG_DA_READ_BUF,
|
|
XFS_RANDOM_DA_READ_BUF))) {
|
|
XFS_RANDOM_DA_READ_BUF))) {
|
|
- trace_xfs_da_btree_corrupt(rbp->bps[0], _RET_IP_);
|
|
|
|
|
|
+ trace_xfs_da_btree_corrupt(bp, _RET_IP_);
|
|
XFS_CORRUPTION_ERROR("xfs_da_do_buf(2)",
|
|
XFS_CORRUPTION_ERROR("xfs_da_do_buf(2)",
|
|
XFS_ERRLEVEL_LOW, mp, info);
|
|
XFS_ERRLEVEL_LOW, mp, info);
|
|
error = XFS_ERROR(EFSCORRUPTED);
|
|
error = XFS_ERROR(EFSCORRUPTED);
|
|
- xfs_da_brelse(trans, rbp);
|
|
|
|
- nbplist = 0;
|
|
|
|
- goto exit1;
|
|
|
|
|
|
+ xfs_trans_brelse(trans, bp);
|
|
|
|
+ goto out_free;
|
|
}
|
|
}
|
|
}
|
|
}
|
|
- if (bplist) {
|
|
|
|
- kmem_free(bplist);
|
|
|
|
- }
|
|
|
|
- if (mapp != &map) {
|
|
|
|
- kmem_free(mapp);
|
|
|
|
- }
|
|
|
|
- if (bpp)
|
|
|
|
- *bpp = rbp;
|
|
|
|
- return 0;
|
|
|
|
-exit1:
|
|
|
|
- if (bplist) {
|
|
|
|
- for (i = 0; i < nbplist; i++)
|
|
|
|
- xfs_trans_brelse(trans, bplist[i]);
|
|
|
|
- kmem_free(bplist);
|
|
|
|
- }
|
|
|
|
-exit0:
|
|
|
|
|
|
+ *bpp = bp;
|
|
|
|
+out_free:
|
|
if (mapp != &map)
|
|
if (mapp != &map)
|
|
kmem_free(mapp);
|
|
kmem_free(mapp);
|
|
- if (bpp)
|
|
|
|
- *bpp = NULL;
|
|
|
|
- return error;
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-/*
|
|
|
|
- * Get a buffer for the dir/attr block.
|
|
|
|
- */
|
|
|
|
-int
|
|
|
|
-xfs_da_get_buf(
|
|
|
|
- xfs_trans_t *trans,
|
|
|
|
- xfs_inode_t *dp,
|
|
|
|
- xfs_dablk_t bno,
|
|
|
|
- xfs_daddr_t mappedbno,
|
|
|
|
- xfs_dabuf_t **bpp,
|
|
|
|
- int whichfork)
|
|
|
|
-{
|
|
|
|
- return xfs_da_do_buf(trans, dp, bno, &mappedbno, bpp, whichfork, 0);
|
|
|
|
-}
|
|
|
|
|
|
|
|
-/*
|
|
|
|
- * Get a buffer for the dir/attr block, fill in the contents.
|
|
|
|
- */
|
|
|
|
-int
|
|
|
|
-xfs_da_read_buf(
|
|
|
|
- xfs_trans_t *trans,
|
|
|
|
- xfs_inode_t *dp,
|
|
|
|
- xfs_dablk_t bno,
|
|
|
|
- xfs_daddr_t mappedbno,
|
|
|
|
- xfs_dabuf_t **bpp,
|
|
|
|
- int whichfork)
|
|
|
|
-{
|
|
|
|
- return xfs_da_do_buf(trans, dp, bno, &mappedbno, bpp, whichfork, 1);
|
|
|
|
|
|
+ return error;
|
|
}
|
|
}
|
|
|
|
|
|
/*
|
|
/*
|
|
@@ -2190,22 +2208,41 @@ xfs_da_read_buf(
|
|
*/
|
|
*/
|
|
xfs_daddr_t
|
|
xfs_daddr_t
|
|
xfs_da_reada_buf(
|
|
xfs_da_reada_buf(
|
|
- xfs_trans_t *trans,
|
|
|
|
- xfs_inode_t *dp,
|
|
|
|
- xfs_dablk_t bno,
|
|
|
|
- int whichfork)
|
|
|
|
|
|
+ struct xfs_trans *trans,
|
|
|
|
+ struct xfs_inode *dp,
|
|
|
|
+ xfs_dablk_t bno,
|
|
|
|
+ int whichfork)
|
|
{
|
|
{
|
|
- xfs_daddr_t rval;
|
|
|
|
|
|
+ xfs_daddr_t mappedbno = -1;
|
|
|
|
+ struct xfs_buf_map map;
|
|
|
|
+ struct xfs_buf_map *mapp;
|
|
|
|
+ int nmap;
|
|
|
|
+ int error;
|
|
|
|
+
|
|
|
|
+ mapp = ↦
|
|
|
|
+ nmap = 1;
|
|
|
|
+ error = xfs_dabuf_map(trans, dp, bno, -1, whichfork,
|
|
|
|
+ &mapp, &nmap);
|
|
|
|
+ if (error) {
|
|
|
|
+ /* mapping a hole is not an error, but we don't continue */
|
|
|
|
+ if (error == -1)
|
|
|
|
+ error = 0;
|
|
|
|
+ goto out_free;
|
|
|
|
+ }
|
|
|
|
|
|
- rval = -1;
|
|
|
|
- if (xfs_da_do_buf(trans, dp, bno, &rval, NULL, whichfork, 3))
|
|
|
|
|
|
+ mappedbno = mapp[0].bm_bn;
|
|
|
|
+ xfs_buf_readahead_map(dp->i_mount->m_ddev_targp, mapp, nmap);
|
|
|
|
+
|
|
|
|
+out_free:
|
|
|
|
+ if (mapp != &map)
|
|
|
|
+ kmem_free(mapp);
|
|
|
|
+
|
|
|
|
+ if (error)
|
|
return -1;
|
|
return -1;
|
|
- else
|
|
|
|
- return rval;
|
|
|
|
|
|
+ return mappedbno;
|
|
}
|
|
}
|
|
|
|
|
|
kmem_zone_t *xfs_da_state_zone; /* anchor for state struct zone */
|
|
kmem_zone_t *xfs_da_state_zone; /* anchor for state struct zone */
|
|
-kmem_zone_t *xfs_dabuf_zone; /* dabuf zone */
|
|
|
|
|
|
|
|
/*
|
|
/*
|
|
* Allocate a dir-state structure.
|
|
* Allocate a dir-state structure.
|
|
@@ -2225,13 +2262,8 @@ xfs_da_state_kill_altpath(xfs_da_state_t *state)
|
|
{
|
|
{
|
|
int i;
|
|
int i;
|
|
|
|
|
|
- for (i = 0; i < state->altpath.active; i++) {
|
|
|
|
- if (state->altpath.blk[i].bp) {
|
|
|
|
- if (state->altpath.blk[i].bp != state->path.blk[i].bp)
|
|
|
|
- xfs_da_buf_done(state->altpath.blk[i].bp);
|
|
|
|
- state->altpath.blk[i].bp = NULL;
|
|
|
|
- }
|
|
|
|
- }
|
|
|
|
|
|
+ for (i = 0; i < state->altpath.active; i++)
|
|
|
|
+ state->altpath.blk[i].bp = NULL;
|
|
state->altpath.active = 0;
|
|
state->altpath.active = 0;
|
|
}
|
|
}
|
|
|
|
|
|
@@ -2241,204 +2273,9 @@ xfs_da_state_kill_altpath(xfs_da_state_t *state)
|
|
void
|
|
void
|
|
xfs_da_state_free(xfs_da_state_t *state)
|
|
xfs_da_state_free(xfs_da_state_t *state)
|
|
{
|
|
{
|
|
- int i;
|
|
|
|
-
|
|
|
|
xfs_da_state_kill_altpath(state);
|
|
xfs_da_state_kill_altpath(state);
|
|
- for (i = 0; i < state->path.active; i++) {
|
|
|
|
- if (state->path.blk[i].bp)
|
|
|
|
- xfs_da_buf_done(state->path.blk[i].bp);
|
|
|
|
- }
|
|
|
|
- if (state->extravalid && state->extrablk.bp)
|
|
|
|
- xfs_da_buf_done(state->extrablk.bp);
|
|
|
|
#ifdef DEBUG
|
|
#ifdef DEBUG
|
|
memset((char *)state, 0, sizeof(*state));
|
|
memset((char *)state, 0, sizeof(*state));
|
|
#endif /* DEBUG */
|
|
#endif /* DEBUG */
|
|
kmem_zone_free(xfs_da_state_zone, state);
|
|
kmem_zone_free(xfs_da_state_zone, state);
|
|
}
|
|
}
|
|
-
|
|
|
|
-/*
|
|
|
|
- * Create a dabuf.
|
|
|
|
- */
|
|
|
|
-/* ARGSUSED */
|
|
|
|
-STATIC xfs_dabuf_t *
|
|
|
|
-xfs_da_buf_make(int nbuf, xfs_buf_t **bps)
|
|
|
|
-{
|
|
|
|
- xfs_buf_t *bp;
|
|
|
|
- xfs_dabuf_t *dabuf;
|
|
|
|
- int i;
|
|
|
|
- int off;
|
|
|
|
-
|
|
|
|
- if (nbuf == 1)
|
|
|
|
- dabuf = kmem_zone_alloc(xfs_dabuf_zone, KM_NOFS);
|
|
|
|
- else
|
|
|
|
- dabuf = kmem_alloc(XFS_DA_BUF_SIZE(nbuf), KM_NOFS);
|
|
|
|
- dabuf->dirty = 0;
|
|
|
|
- if (nbuf == 1) {
|
|
|
|
- dabuf->nbuf = 1;
|
|
|
|
- bp = bps[0];
|
|
|
|
- dabuf->bbcount = bp->b_length;
|
|
|
|
- dabuf->data = bp->b_addr;
|
|
|
|
- dabuf->bps[0] = bp;
|
|
|
|
- } else {
|
|
|
|
- dabuf->nbuf = nbuf;
|
|
|
|
- for (i = 0, dabuf->bbcount = 0; i < nbuf; i++) {
|
|
|
|
- dabuf->bps[i] = bp = bps[i];
|
|
|
|
- dabuf->bbcount += bp->b_length;
|
|
|
|
- }
|
|
|
|
- dabuf->data = kmem_alloc(BBTOB(dabuf->bbcount), KM_SLEEP);
|
|
|
|
- for (i = off = 0; i < nbuf; i++, off += BBTOB(bp->b_length)) {
|
|
|
|
- bp = bps[i];
|
|
|
|
- memcpy((char *)dabuf->data + off, bp->b_addr,
|
|
|
|
- BBTOB(bp->b_length));
|
|
|
|
- }
|
|
|
|
- }
|
|
|
|
- return dabuf;
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-/*
|
|
|
|
- * Un-dirty a dabuf.
|
|
|
|
- */
|
|
|
|
-STATIC void
|
|
|
|
-xfs_da_buf_clean(xfs_dabuf_t *dabuf)
|
|
|
|
-{
|
|
|
|
- xfs_buf_t *bp;
|
|
|
|
- int i;
|
|
|
|
- int off;
|
|
|
|
-
|
|
|
|
- if (dabuf->dirty) {
|
|
|
|
- ASSERT(dabuf->nbuf > 1);
|
|
|
|
- dabuf->dirty = 0;
|
|
|
|
- for (i = off = 0; i < dabuf->nbuf;
|
|
|
|
- i++, off += BBTOB(bp->b_length)) {
|
|
|
|
- bp = dabuf->bps[i];
|
|
|
|
- memcpy(bp->b_addr, dabuf->data + off,
|
|
|
|
- BBTOB(bp->b_length));
|
|
|
|
- }
|
|
|
|
- }
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-/*
|
|
|
|
- * Release a dabuf.
|
|
|
|
- */
|
|
|
|
-void
|
|
|
|
-xfs_da_buf_done(xfs_dabuf_t *dabuf)
|
|
|
|
-{
|
|
|
|
- ASSERT(dabuf);
|
|
|
|
- ASSERT(dabuf->nbuf && dabuf->data && dabuf->bbcount && dabuf->bps[0]);
|
|
|
|
- if (dabuf->dirty)
|
|
|
|
- xfs_da_buf_clean(dabuf);
|
|
|
|
- if (dabuf->nbuf > 1) {
|
|
|
|
- kmem_free(dabuf->data);
|
|
|
|
- kmem_free(dabuf);
|
|
|
|
- } else {
|
|
|
|
- kmem_zone_free(xfs_dabuf_zone, dabuf);
|
|
|
|
- }
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-/*
|
|
|
|
- * Log transaction from a dabuf.
|
|
|
|
- */
|
|
|
|
-void
|
|
|
|
-xfs_da_log_buf(xfs_trans_t *tp, xfs_dabuf_t *dabuf, uint first, uint last)
|
|
|
|
-{
|
|
|
|
- xfs_buf_t *bp;
|
|
|
|
- uint f;
|
|
|
|
- int i;
|
|
|
|
- uint l;
|
|
|
|
- int off;
|
|
|
|
-
|
|
|
|
- ASSERT(dabuf->nbuf && dabuf->data && dabuf->bbcount && dabuf->bps[0]);
|
|
|
|
- if (dabuf->nbuf == 1) {
|
|
|
|
- ASSERT(dabuf->data == dabuf->bps[0]->b_addr);
|
|
|
|
- xfs_trans_log_buf(tp, dabuf->bps[0], first, last);
|
|
|
|
- return;
|
|
|
|
- }
|
|
|
|
- dabuf->dirty = 1;
|
|
|
|
- ASSERT(first <= last);
|
|
|
|
- for (i = off = 0; i < dabuf->nbuf; i++, off += BBTOB(bp->b_length)) {
|
|
|
|
- bp = dabuf->bps[i];
|
|
|
|
- f = off;
|
|
|
|
- l = f + BBTOB(bp->b_length) - 1;
|
|
|
|
- if (f < first)
|
|
|
|
- f = first;
|
|
|
|
- if (l > last)
|
|
|
|
- l = last;
|
|
|
|
- if (f <= l)
|
|
|
|
- xfs_trans_log_buf(tp, bp, f - off, l - off);
|
|
|
|
- /*
|
|
|
|
- * B_DONE is set by xfs_trans_log buf.
|
|
|
|
- * If we don't set it on a new buffer (get not read)
|
|
|
|
- * then if we don't put anything in the buffer it won't
|
|
|
|
- * be set, and at commit it it released into the cache,
|
|
|
|
- * and then a read will fail.
|
|
|
|
- */
|
|
|
|
- else if (!(XFS_BUF_ISDONE(bp)))
|
|
|
|
- XFS_BUF_DONE(bp);
|
|
|
|
- }
|
|
|
|
- ASSERT(last < off);
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-/*
|
|
|
|
- * Release dabuf from a transaction.
|
|
|
|
- * Have to free up the dabuf before the buffers are released,
|
|
|
|
- * since the synchronization on the dabuf is really the lock on the buffer.
|
|
|
|
- */
|
|
|
|
-void
|
|
|
|
-xfs_da_brelse(xfs_trans_t *tp, xfs_dabuf_t *dabuf)
|
|
|
|
-{
|
|
|
|
- xfs_buf_t *bp;
|
|
|
|
- xfs_buf_t **bplist;
|
|
|
|
- int i;
|
|
|
|
- int nbuf;
|
|
|
|
-
|
|
|
|
- ASSERT(dabuf->nbuf && dabuf->data && dabuf->bbcount && dabuf->bps[0]);
|
|
|
|
- if ((nbuf = dabuf->nbuf) == 1) {
|
|
|
|
- bplist = &bp;
|
|
|
|
- bp = dabuf->bps[0];
|
|
|
|
- } else {
|
|
|
|
- bplist = kmem_alloc(nbuf * sizeof(*bplist), KM_SLEEP);
|
|
|
|
- memcpy(bplist, dabuf->bps, nbuf * sizeof(*bplist));
|
|
|
|
- }
|
|
|
|
- xfs_da_buf_done(dabuf);
|
|
|
|
- for (i = 0; i < nbuf; i++)
|
|
|
|
- xfs_trans_brelse(tp, bplist[i]);
|
|
|
|
- if (bplist != &bp)
|
|
|
|
- kmem_free(bplist);
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-/*
|
|
|
|
- * Invalidate dabuf from a transaction.
|
|
|
|
- */
|
|
|
|
-void
|
|
|
|
-xfs_da_binval(xfs_trans_t *tp, xfs_dabuf_t *dabuf)
|
|
|
|
-{
|
|
|
|
- xfs_buf_t *bp;
|
|
|
|
- xfs_buf_t **bplist;
|
|
|
|
- int i;
|
|
|
|
- int nbuf;
|
|
|
|
-
|
|
|
|
- ASSERT(dabuf->nbuf && dabuf->data && dabuf->bbcount && dabuf->bps[0]);
|
|
|
|
- if ((nbuf = dabuf->nbuf) == 1) {
|
|
|
|
- bplist = &bp;
|
|
|
|
- bp = dabuf->bps[0];
|
|
|
|
- } else {
|
|
|
|
- bplist = kmem_alloc(nbuf * sizeof(*bplist), KM_SLEEP);
|
|
|
|
- memcpy(bplist, dabuf->bps, nbuf * sizeof(*bplist));
|
|
|
|
- }
|
|
|
|
- xfs_da_buf_done(dabuf);
|
|
|
|
- for (i = 0; i < nbuf; i++)
|
|
|
|
- xfs_trans_binval(tp, bplist[i]);
|
|
|
|
- if (bplist != &bp)
|
|
|
|
- kmem_free(bplist);
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-/*
|
|
|
|
- * Get the first daddr from a dabuf.
|
|
|
|
- */
|
|
|
|
-xfs_daddr_t
|
|
|
|
-xfs_da_blkno(xfs_dabuf_t *dabuf)
|
|
|
|
-{
|
|
|
|
- ASSERT(dabuf->nbuf);
|
|
|
|
- ASSERT(dabuf->data);
|
|
|
|
- return XFS_BUF_ADDR(dabuf->bps[0]);
|
|
|
|
-}
|
|
|