summaryrefslogtreecommitdiff
path: root/fs/bcachefs/fs-io-buffered.c
diff options
context:
space:
mode:
authorKent Overstreet <kent.overstreet@linux.dev>2023-09-13 00:16:02 +0300
committerKent Overstreet <kent.overstreet@linux.dev>2023-10-23 00:10:13 +0300
commit6bd68ec266ad71827ef940151067b67b62fb8fed (patch)
tree158da84712ff58061a2bfbbe6f0e858b58c6140d /fs/bcachefs/fs-io-buffered.c
parent96dea3d599dbc31f59eb786af2ac5079122beb88 (diff)
downloadlinux-6bd68ec266ad71827ef940151067b67b62fb8fed.tar.xz
bcachefs: Heap allocate btree_trans
We're using more stack than we'd like in a number of functions, and btree_trans is the biggest object that we stack allocate. But we have to do a heap allocatation to initialize it anyways, so there's no real downside to heap allocating the entire thing. Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
Diffstat (limited to 'fs/bcachefs/fs-io-buffered.c')
-rw-r--r--fs/bcachefs/fs-io-buffered.c16
1 files changed, 5 insertions, 11 deletions
diff --git a/fs/bcachefs/fs-io-buffered.c b/fs/bcachefs/fs-io-buffered.c
index 7650d8b3122a..58ccc7b91ac7 100644
--- a/fs/bcachefs/fs-io-buffered.c
+++ b/fs/bcachefs/fs-io-buffered.c
@@ -270,7 +270,7 @@ void bch2_readahead(struct readahead_control *ractl)
struct bch_inode_info *inode = to_bch_ei(ractl->mapping->host);
struct bch_fs *c = inode->v.i_sb->s_fs_info;
struct bch_io_opts opts;
- struct btree_trans trans;
+ struct btree_trans *trans = bch2_trans_get(c);
struct folio *folio;
struct readpages_iter readpages_iter;
int ret;
@@ -280,8 +280,6 @@ void bch2_readahead(struct readahead_control *ractl)
ret = readpages_iter_init(&readpages_iter, ractl);
BUG_ON(ret);
- bch2_trans_init(&trans, c, 0, 0);
-
bch2_pagecache_add_get(inode);
while ((folio = readpage_iter_peek(&readpages_iter))) {
@@ -300,31 +298,27 @@ void bch2_readahead(struct readahead_control *ractl)
rbio->bio.bi_end_io = bch2_readpages_end_io;
BUG_ON(!bio_add_folio(&rbio->bio, folio, folio_size(folio), 0));
- bchfs_read(&trans, rbio, inode_inum(inode),
+ bchfs_read(trans, rbio, inode_inum(inode),
&readpages_iter);
- bch2_trans_unlock(&trans);
+ bch2_trans_unlock(trans);
}
bch2_pagecache_add_put(inode);
- bch2_trans_exit(&trans);
+ bch2_trans_put(trans);
darray_exit(&readpages_iter.folios);
}
static void __bchfs_readfolio(struct bch_fs *c, struct bch_read_bio *rbio,
subvol_inum inum, struct folio *folio)
{
- struct btree_trans trans;
-
bch2_folio_create(folio, __GFP_NOFAIL);
rbio->bio.bi_opf = REQ_OP_READ|REQ_SYNC;
rbio->bio.bi_iter.bi_sector = folio_sector(folio);
BUG_ON(!bio_add_folio(&rbio->bio, folio, folio_size(folio), 0));
- bch2_trans_init(&trans, c, 0, 0);
- bchfs_read(&trans, rbio, inum, NULL);
- bch2_trans_exit(&trans);
+ bch2_trans_run(c, (bchfs_read(trans, rbio, inum, NULL), 0));
}
static void bch2_read_single_folio_end_io(struct bio *bio)