summaryrefslogtreecommitdiff
path: root/fs/xfs/libxfs/xfs_btree.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/xfs/libxfs/xfs_btree.c')
-rw-r--r--fs/xfs/libxfs/xfs_btree.c63
1 files changed, 45 insertions, 18 deletions
diff --git a/fs/xfs/libxfs/xfs_btree.c b/fs/xfs/libxfs/xfs_btree.c
index 2aa300f7461f..2eecc49fc1b2 100644
--- a/fs/xfs/libxfs/xfs_btree.c
+++ b/fs/xfs/libxfs/xfs_btree.c
@@ -51,16 +51,31 @@ xfs_btree_magic(
return magic;
}
-static xfs_failaddr_t
+/*
+ * These sibling pointer checks are optimised for null sibling pointers. This
+ * happens a lot, and we don't need to byte swap at runtime if the sibling
+ * pointer is NULL.
+ *
+ * These are explicitly marked at inline because the cost of calling them as
+ * functions instead of inlining them is about 36 bytes extra code per call site
+ * on x86-64. Yes, gcc-11 fails to inline them, and explicit inlining of these
+ * two sibling check functions reduces the compiled code size by over 300
+ * bytes.
+ */
+static inline xfs_failaddr_t
xfs_btree_check_lblock_siblings(
struct xfs_mount *mp,
struct xfs_btree_cur *cur,
int level,
xfs_fsblock_t fsb,
- xfs_fsblock_t sibling)
+ __be64 dsibling)
{
- if (sibling == NULLFSBLOCK)
+ xfs_fsblock_t sibling;
+
+ if (dsibling == cpu_to_be64(NULLFSBLOCK))
return NULL;
+
+ sibling = be64_to_cpu(dsibling);
if (sibling == fsb)
return __this_address;
if (level >= 0) {
@@ -74,17 +89,21 @@ xfs_btree_check_lblock_siblings(
return NULL;
}
-static xfs_failaddr_t
+static inline xfs_failaddr_t
xfs_btree_check_sblock_siblings(
struct xfs_mount *mp,
struct xfs_btree_cur *cur,
int level,
xfs_agnumber_t agno,
xfs_agblock_t agbno,
- xfs_agblock_t sibling)
+ __be32 dsibling)
{
- if (sibling == NULLAGBLOCK)
+ xfs_agblock_t sibling;
+
+ if (dsibling == cpu_to_be32(NULLAGBLOCK))
return NULL;
+
+ sibling = be32_to_cpu(dsibling);
if (sibling == agbno)
return __this_address;
if (level >= 0) {
@@ -136,10 +155,10 @@ __xfs_btree_check_lblock(
fsb = XFS_DADDR_TO_FSB(mp, xfs_buf_daddr(bp));
fa = xfs_btree_check_lblock_siblings(mp, cur, level, fsb,
- be64_to_cpu(block->bb_u.l.bb_leftsib));
+ block->bb_u.l.bb_leftsib);
if (!fa)
fa = xfs_btree_check_lblock_siblings(mp, cur, level, fsb,
- be64_to_cpu(block->bb_u.l.bb_rightsib));
+ block->bb_u.l.bb_rightsib);
return fa;
}
@@ -204,10 +223,10 @@ __xfs_btree_check_sblock(
}
fa = xfs_btree_check_sblock_siblings(mp, cur, level, agno, agbno,
- be32_to_cpu(block->bb_u.s.bb_leftsib));
+ block->bb_u.s.bb_leftsib);
if (!fa)
fa = xfs_btree_check_sblock_siblings(mp, cur, level, agno,
- agbno, be32_to_cpu(block->bb_u.s.bb_rightsib));
+ agbno, block->bb_u.s.bb_rightsib);
return fa;
}
@@ -426,8 +445,14 @@ xfs_btree_del_cursor(
break;
}
+ /*
+ * If we are doing a BMBT update, the number of unaccounted blocks
+ * allocated during this cursor life time should be zero. If it's not
+ * zero, then we should be shut down or on our way to shutdown due to
+ * cancelling a dirty transaction on error.
+ */
ASSERT(cur->bc_btnum != XFS_BTNUM_BMAP || cur->bc_ino.allocated == 0 ||
- xfs_is_shutdown(cur->bc_mp));
+ xfs_is_shutdown(cur->bc_mp) || error != 0);
if (unlikely(cur->bc_flags & XFS_BTREE_STAGING))
kmem_free(cur->bc_ops);
if (!(cur->bc_flags & XFS_BTREE_LONG_PTRS) && cur->bc_ag.pag)
@@ -3247,7 +3272,7 @@ xfs_btree_insrec(
struct xfs_btree_block *block; /* btree block */
struct xfs_buf *bp; /* buffer for block */
union xfs_btree_ptr nptr; /* new block ptr */
- struct xfs_btree_cur *ncur; /* new btree cursor */
+ struct xfs_btree_cur *ncur = NULL; /* new btree cursor */
union xfs_btree_key nkey; /* new block key */
union xfs_btree_key *lkey;
int optr; /* old key/record index */
@@ -3327,7 +3352,7 @@ xfs_btree_insrec(
#ifdef DEBUG
error = xfs_btree_check_block(cur, block, level, bp);
if (error)
- return error;
+ goto error0;
#endif
/*
@@ -3347,7 +3372,7 @@ xfs_btree_insrec(
for (i = numrecs - ptr; i >= 0; i--) {
error = xfs_btree_debug_check_ptr(cur, pp, i, level);
if (error)
- return error;
+ goto error0;
}
xfs_btree_shift_keys(cur, kp, 1, numrecs - ptr + 1);
@@ -3432,6 +3457,8 @@ xfs_btree_insrec(
return 0;
error0:
+ if (ncur)
+ xfs_btree_del_cursor(ncur, error);
return error;
}
@@ -4523,10 +4550,10 @@ xfs_btree_lblock_verify(
/* sibling pointer verification */
fsb = XFS_DADDR_TO_FSB(mp, xfs_buf_daddr(bp));
fa = xfs_btree_check_lblock_siblings(mp, NULL, -1, fsb,
- be64_to_cpu(block->bb_u.l.bb_leftsib));
+ block->bb_u.l.bb_leftsib);
if (!fa)
fa = xfs_btree_check_lblock_siblings(mp, NULL, -1, fsb,
- be64_to_cpu(block->bb_u.l.bb_rightsib));
+ block->bb_u.l.bb_rightsib);
return fa;
}
@@ -4580,10 +4607,10 @@ xfs_btree_sblock_verify(
agno = xfs_daddr_to_agno(mp, xfs_buf_daddr(bp));
agbno = xfs_daddr_to_agbno(mp, xfs_buf_daddr(bp));
fa = xfs_btree_check_sblock_siblings(mp, NULL, -1, agno, agbno,
- be32_to_cpu(block->bb_u.s.bb_leftsib));
+ block->bb_u.s.bb_leftsib);
if (!fa)
fa = xfs_btree_check_sblock_siblings(mp, NULL, -1, agno, agbno,
- be32_to_cpu(block->bb_u.s.bb_rightsib));
+ block->bb_u.s.bb_rightsib);
return fa;
}