summaryrefslogtreecommitdiff
path: root/fs/xfs/libxfs/xfs_btree.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2022-06-02 03:23:53 +0300
committerLinus Torvalds <torvalds@linux-foundation.org>2022-06-02 03:23:53 +0300
commit0e5ab8dd87c29640a46aee9e38bc3ba7645b1db0 (patch)
treef708fa5a7c6641af21bbf8fb20d6a310f3f7621e /fs/xfs/libxfs/xfs_btree.c
parent54eb8462f21fb170a05ad64620f0d8d0cf2b7fb5 (diff)
parent7146bda743e6f543af60584fad2cfbb6ce83d8ac (diff)
downloadlinux-0e5ab8dd87c29640a46aee9e38bc3ba7645b1db0.tar.xz
Merge tag 'xfs-5.19-for-linus-2' of git://git.kernel.org/pub/scm/fs/xfs/xfs-linux
Pull more xfs updates from Dave Chinner: "This update is largely bug fixes and cleanups for all the code merged in the first pull request. The majority of them are to the new logged attribute code, but there are also a couple of fixes for other log recovery and memory leaks that have recently been found. Summary: - fix refcount leak in xfs_ifree() - fix xfs_buf_cancel structure leaks in log recovery - fix dquot leak after failed quota check - fix a couple of problematic ASSERTS - fix small aim7 perf regression in from new btree sibling validation - clean up log incompat feature marking for new logged attribute feature - disallow logged attributes on legacy V4 filesystem formats. - fix da state leak when freeing attr intents - improve validation of the attr log items in recovery - use slab caches for commonly used attr structures - fix leaks of attr name/value buffer and reduce copying overhead during intent logging - remove some dead debug code from log recovery" * tag 'xfs-5.19-for-linus-2' of git://git.kernel.org/pub/scm/fs/xfs/xfs-linux: (33 commits) xfs: fix xfs_ifree() error handling to not leak perag ref xfs: move xfs_attr_use_log_assist usage out of libxfs xfs: move xfs_attr_use_log_assist out of xfs_log.c xfs: warn about LARP once per mount xfs: implement per-mount warnings for scrub and shrink usage xfs: don't log every time we clear the log incompat flags xfs: convert buf_cancel_table allocation to kmalloc_array xfs: don't leak xfs_buf_cancel structures when recovery fails xfs: refactor buffer cancellation table allocation xfs: don't leak btree cursor when insrec fails after a split xfs: purge dquots after inode walk fails during quotacheck xfs: assert in xfs_btree_del_cursor should take into account error xfs: don't assert fail on perag references on teardown xfs: avoid unnecessary runtime sibling pointer endian conversions xfs: share xattr name and value buffers when logging xattr updates xfs: do not use logged xattr updates on V4 filesystems xfs: Remove duplicate include xfs: reduce IOCB_NOWAIT judgment for retry exclusive unaligned DIO xfs: Remove dead code xfs: fix typo in comment ...
Diffstat (limited to 'fs/xfs/libxfs/xfs_btree.c')
-rw-r--r--fs/xfs/libxfs/xfs_btree.c63
1 files changed, 45 insertions, 18 deletions
diff --git a/fs/xfs/libxfs/xfs_btree.c b/fs/xfs/libxfs/xfs_btree.c
index 2aa300f7461f..2eecc49fc1b2 100644
--- a/fs/xfs/libxfs/xfs_btree.c
+++ b/fs/xfs/libxfs/xfs_btree.c
@@ -51,16 +51,31 @@ xfs_btree_magic(
return magic;
}
-static xfs_failaddr_t
+/*
+ * These sibling pointer checks are optimised for null sibling pointers. This
+ * happens a lot, and we don't need to byte swap at runtime if the sibling
+ * pointer is NULL.
+ *
+ * These are explicitly marked at inline because the cost of calling them as
+ * functions instead of inlining them is about 36 bytes extra code per call site
+ * on x86-64. Yes, gcc-11 fails to inline them, and explicit inlining of these
+ * two sibling check functions reduces the compiled code size by over 300
+ * bytes.
+ */
+static inline xfs_failaddr_t
xfs_btree_check_lblock_siblings(
struct xfs_mount *mp,
struct xfs_btree_cur *cur,
int level,
xfs_fsblock_t fsb,
- xfs_fsblock_t sibling)
+ __be64 dsibling)
{
- if (sibling == NULLFSBLOCK)
+ xfs_fsblock_t sibling;
+
+ if (dsibling == cpu_to_be64(NULLFSBLOCK))
return NULL;
+
+ sibling = be64_to_cpu(dsibling);
if (sibling == fsb)
return __this_address;
if (level >= 0) {
@@ -74,17 +89,21 @@ xfs_btree_check_lblock_siblings(
return NULL;
}
-static xfs_failaddr_t
+static inline xfs_failaddr_t
xfs_btree_check_sblock_siblings(
struct xfs_mount *mp,
struct xfs_btree_cur *cur,
int level,
xfs_agnumber_t agno,
xfs_agblock_t agbno,
- xfs_agblock_t sibling)
+ __be32 dsibling)
{
- if (sibling == NULLAGBLOCK)
+ xfs_agblock_t sibling;
+
+ if (dsibling == cpu_to_be32(NULLAGBLOCK))
return NULL;
+
+ sibling = be32_to_cpu(dsibling);
if (sibling == agbno)
return __this_address;
if (level >= 0) {
@@ -136,10 +155,10 @@ __xfs_btree_check_lblock(
fsb = XFS_DADDR_TO_FSB(mp, xfs_buf_daddr(bp));
fa = xfs_btree_check_lblock_siblings(mp, cur, level, fsb,
- be64_to_cpu(block->bb_u.l.bb_leftsib));
+ block->bb_u.l.bb_leftsib);
if (!fa)
fa = xfs_btree_check_lblock_siblings(mp, cur, level, fsb,
- be64_to_cpu(block->bb_u.l.bb_rightsib));
+ block->bb_u.l.bb_rightsib);
return fa;
}
@@ -204,10 +223,10 @@ __xfs_btree_check_sblock(
}
fa = xfs_btree_check_sblock_siblings(mp, cur, level, agno, agbno,
- be32_to_cpu(block->bb_u.s.bb_leftsib));
+ block->bb_u.s.bb_leftsib);
if (!fa)
fa = xfs_btree_check_sblock_siblings(mp, cur, level, agno,
- agbno, be32_to_cpu(block->bb_u.s.bb_rightsib));
+ agbno, block->bb_u.s.bb_rightsib);
return fa;
}
@@ -426,8 +445,14 @@ xfs_btree_del_cursor(
break;
}
+ /*
+ * If we are doing a BMBT update, the number of unaccounted blocks
+ * allocated during this cursor life time should be zero. If it's not
+ * zero, then we should be shut down or on our way to shutdown due to
+ * cancelling a dirty transaction on error.
+ */
ASSERT(cur->bc_btnum != XFS_BTNUM_BMAP || cur->bc_ino.allocated == 0 ||
- xfs_is_shutdown(cur->bc_mp));
+ xfs_is_shutdown(cur->bc_mp) || error != 0);
if (unlikely(cur->bc_flags & XFS_BTREE_STAGING))
kmem_free(cur->bc_ops);
if (!(cur->bc_flags & XFS_BTREE_LONG_PTRS) && cur->bc_ag.pag)
@@ -3247,7 +3272,7 @@ xfs_btree_insrec(
struct xfs_btree_block *block; /* btree block */
struct xfs_buf *bp; /* buffer for block */
union xfs_btree_ptr nptr; /* new block ptr */
- struct xfs_btree_cur *ncur; /* new btree cursor */
+ struct xfs_btree_cur *ncur = NULL; /* new btree cursor */
union xfs_btree_key nkey; /* new block key */
union xfs_btree_key *lkey;
int optr; /* old key/record index */
@@ -3327,7 +3352,7 @@ xfs_btree_insrec(
#ifdef DEBUG
error = xfs_btree_check_block(cur, block, level, bp);
if (error)
- return error;
+ goto error0;
#endif
/*
@@ -3347,7 +3372,7 @@ xfs_btree_insrec(
for (i = numrecs - ptr; i >= 0; i--) {
error = xfs_btree_debug_check_ptr(cur, pp, i, level);
if (error)
- return error;
+ goto error0;
}
xfs_btree_shift_keys(cur, kp, 1, numrecs - ptr + 1);
@@ -3432,6 +3457,8 @@ xfs_btree_insrec(
return 0;
error0:
+ if (ncur)
+ xfs_btree_del_cursor(ncur, error);
return error;
}
@@ -4523,10 +4550,10 @@ xfs_btree_lblock_verify(
/* sibling pointer verification */
fsb = XFS_DADDR_TO_FSB(mp, xfs_buf_daddr(bp));
fa = xfs_btree_check_lblock_siblings(mp, NULL, -1, fsb,
- be64_to_cpu(block->bb_u.l.bb_leftsib));
+ block->bb_u.l.bb_leftsib);
if (!fa)
fa = xfs_btree_check_lblock_siblings(mp, NULL, -1, fsb,
- be64_to_cpu(block->bb_u.l.bb_rightsib));
+ block->bb_u.l.bb_rightsib);
return fa;
}
@@ -4580,10 +4607,10 @@ xfs_btree_sblock_verify(
agno = xfs_daddr_to_agno(mp, xfs_buf_daddr(bp));
agbno = xfs_daddr_to_agbno(mp, xfs_buf_daddr(bp));
fa = xfs_btree_check_sblock_siblings(mp, NULL, -1, agno, agbno,
- be32_to_cpu(block->bb_u.s.bb_leftsib));
+ block->bb_u.s.bb_leftsib);
if (!fa)
fa = xfs_btree_check_sblock_siblings(mp, NULL, -1, agno, agbno,
- be32_to_cpu(block->bb_u.s.bb_rightsib));
+ block->bb_u.s.bb_rightsib);
return fa;
}