summaryrefslogtreecommitdiff
path: root/fs/xfs/libxfs/xfs_btree.c
diff options
context:
space:
mode:
authorDarrick J. Wong <darrick.wong@oracle.com>2020-03-11 20:51:50 +0300
committerDarrick J. Wong <darrick.wong@oracle.com>2020-03-18 18:12:23 +0300
commit60e3d7070749554227fbb636a69a4282ab930f86 (patch)
tree824eba74a6ace7294350af322ef03e48bd0c627a /fs/xfs/libxfs/xfs_btree.c
parent349e1c0380dbb7f552e4ea61b479c293eb076b3f (diff)
downloadlinux-60e3d7070749554227fbb636a69a4282ab930f86.tar.xz
xfs: support bulk loading of staged btrees
Add a new btree function that enables us to bulk load a btree cursor. This will be used by the upcoming online repair patches to generate new btrees. This avoids the programmatic inefficiency of calling xfs_btree_insert in a loop (which generates a lot of log traffic) in favor of stamping out new btree blocks with ordered buffers, and then committing both the new root and scheduling the removal of the old btree blocks in a single transaction commit. The design of this new generic code is based off the btree rebuilding code in xfs_repair's phase 5 code, with the explicit goal of enabling us to share that code between scrub and repair. It has the additional feature of being able to control btree block loading factors. Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com> Reviewed-by: Brian Foster <bfoster@redhat.com>
Diffstat (limited to 'fs/xfs/libxfs/xfs_btree.c')
-rw-r--r--fs/xfs/libxfs/xfs_btree.c14
1 files changed, 7 insertions, 7 deletions
diff --git a/fs/xfs/libxfs/xfs_btree.c b/fs/xfs/libxfs/xfs_btree.c
index 88223c3cc751..2d25bab68764 100644
--- a/fs/xfs/libxfs/xfs_btree.c
+++ b/fs/xfs/libxfs/xfs_btree.c
@@ -1027,7 +1027,7 @@ xfs_btree_ptr_is_null(
return ptr->s == cpu_to_be32(NULLAGBLOCK);
}
-STATIC void
+void
xfs_btree_set_ptr_null(
struct xfs_btree_cur *cur,
union xfs_btree_ptr *ptr)
@@ -1063,7 +1063,7 @@ xfs_btree_get_sibling(
}
}
-STATIC void
+void
xfs_btree_set_sibling(
struct xfs_btree_cur *cur,
struct xfs_btree_block *block,
@@ -1141,7 +1141,7 @@ xfs_btree_init_block(
btnum, level, numrecs, owner, 0);
}
-STATIC void
+void
xfs_btree_init_block_cur(
struct xfs_btree_cur *cur,
struct xfs_buf *bp,
@@ -1233,7 +1233,7 @@ xfs_btree_set_refs(
}
}
-STATIC int
+int
xfs_btree_get_buf_block(
struct xfs_btree_cur *cur,
union xfs_btree_ptr *ptr,
@@ -1293,7 +1293,7 @@ xfs_btree_read_buf_block(
/*
* Copy keys from one btree block to another.
*/
-STATIC void
+void
xfs_btree_copy_keys(
struct xfs_btree_cur *cur,
union xfs_btree_key *dst_key,
@@ -1321,11 +1321,11 @@ xfs_btree_copy_recs(
/*
* Copy block pointers from one btree block to another.
*/
-STATIC void
+void
xfs_btree_copy_ptrs(
struct xfs_btree_cur *cur,
union xfs_btree_ptr *dst_ptr,
- union xfs_btree_ptr *src_ptr,
+ const union xfs_btree_ptr *src_ptr,
int numptrs)
{
ASSERT(numptrs >= 0);