summaryrefslogtreecommitdiff
path: root/fs/bcachefs
diff options
context:
space:
mode:
authorKent Overstreet <kent.overstreet@gmail.com>2019-10-20 02:03:23 +0300
committerKent Overstreet <kent.overstreet@linux.dev>2023-10-23 00:08:30 +0300
commit2a9101a9898920a04e77f70f7bbee84d2c76c527 (patch)
treeafa365a07a12fdd45d2fb49ac81933b4706e5b3c /fs/bcachefs
parent8f1965391cc421ad4e50b4dfe5e06aae661f8870 (diff)
downloadlinux-2a9101a9898920a04e77f70f7bbee84d2c76c527.tar.xz
bcachefs: Refactor bch2_trans_commit() path
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
Diffstat (limited to 'fs/bcachefs')
-rw-r--r--fs/bcachefs/bcachefs.h1
-rw-r--r--fs/bcachefs/btree_io.h6
-rw-r--r--fs/bcachefs/btree_iter.h5
-rw-r--r--fs/bcachefs/btree_types.h10
-rw-r--r--fs/bcachefs/btree_update.h27
-rw-r--r--fs/bcachefs/btree_update_leaf.c402
-rw-r--r--fs/bcachefs/fs-io.c28
-rw-r--r--fs/bcachefs/io.c1
-rw-r--r--fs/bcachefs/journal.h2
-rw-r--r--fs/bcachefs/reflink.c5
10 files changed, 259 insertions, 228 deletions
diff --git a/fs/bcachefs/bcachefs.h b/fs/bcachefs/bcachefs.h
index c5c98aae8bdb..093dc906353d 100644
--- a/fs/bcachefs/bcachefs.h
+++ b/fs/bcachefs/bcachefs.h
@@ -301,7 +301,6 @@ do { \
x(btree_node_sort) \
x(btree_node_read) \
x(btree_gc) \
- x(btree_update) \
x(btree_lock_contended_read) \
x(btree_lock_contended_intent) \
x(btree_lock_contended_write) \
diff --git a/fs/bcachefs/btree_io.h b/fs/bcachefs/btree_io.h
index 3fb0aa20b340..69516ec34b89 100644
--- a/fs/bcachefs/btree_io.h
+++ b/fs/bcachefs/btree_io.h
@@ -62,10 +62,10 @@ bool __bch2_compact_whiteouts(struct bch_fs *, struct btree *, enum compact_mode
static inline unsigned should_compact_bset_lazy(struct btree *b, struct bset_tree *t)
{
- unsigned bset_u64s = le16_to_cpu(bset(b, t)->u64s);
- unsigned dead_u64s = bset_u64s - b->nr.bset_u64s[t - b->set];
+ unsigned total_u64s = bset_u64s(t);
+ unsigned dead_u64s = total_u64s - b->nr.bset_u64s[t - b->set];
- return dead_u64s > 128 && dead_u64s * 3 > bset_u64s;
+ return dead_u64s > 64 && dead_u64s * 3 > total_u64s;
}
static inline bool bch2_maybe_compact_whiteouts(struct bch_fs *c, struct btree *b)
diff --git a/fs/bcachefs/btree_iter.h b/fs/bcachefs/btree_iter.h
index 6f81be26e674..1b7262d7e284 100644
--- a/fs/bcachefs/btree_iter.h
+++ b/fs/bcachefs/btree_iter.h
@@ -48,6 +48,11 @@ static inline int btree_iter_err(const struct btree_iter *iter)
/* Iterate over iters within a transaction: */
+#define trans_for_each_iter_all(_trans, _iter) \
+ for (_iter = (_trans)->iters; \
+ _iter < (_trans)->iters + (_trans)->nr_iters; \
+ _iter++)
+
static inline struct btree_iter *
__trans_next_iter(struct btree_trans *trans, unsigned idx)
{
diff --git a/fs/bcachefs/btree_types.h b/fs/bcachefs/btree_types.h
index 48ebc886aaa2..3a26a8802e86 100644
--- a/fs/bcachefs/btree_types.h
+++ b/fs/bcachefs/btree_types.h
@@ -255,7 +255,6 @@ struct btree_insert_entry {
struct btree_trans {
struct bch_fs *c;
unsigned long ip;
- u64 commit_start;
u64 iters_linked;
u64 iters_live;
@@ -283,12 +282,11 @@ struct btree_trans {
struct disk_reservation *disk_res;
unsigned flags;
unsigned journal_u64s;
+ struct replicas_delta_list *fs_usage_deltas;
struct btree_iter iters_onstack[2];
struct btree_insert_entry updates_onstack[6];
u8 updates_sorted_onstack[6];
-
- struct replicas_delta_list *fs_usage_deltas;
};
#define BTREE_FLAG(flag) \
@@ -420,6 +418,12 @@ static inline unsigned btree_bkey_first_offset(const struct bset_tree *t)
__btree_node_offset_to_key(_b, (_t)->end_offset); \
})
+static inline unsigned bset_u64s(struct bset_tree *t)
+{
+ return t->end_offset - t->data_offset -
+ sizeof(struct bset) / sizeof(u64);
+}
+
static inline unsigned bset_byte_offset(struct btree *b, void *i)
{
return i - (void *) b->data;
diff --git a/fs/bcachefs/btree_update.h b/fs/bcachefs/btree_update.h
index db18527a239f..ad8cbf3fb778 100644
--- a/fs/bcachefs/btree_update.h
+++ b/fs/bcachefs/btree_update.h
@@ -93,9 +93,30 @@ int bch2_btree_node_rewrite(struct bch_fs *c, struct btree_iter *,
int bch2_btree_node_update_key(struct bch_fs *, struct btree_iter *,
struct btree *, struct bkey_i_btree_ptr *);
-int bch2_trans_commit(struct btree_trans *,
- struct disk_reservation *,
- u64 *, unsigned);
+int __bch2_trans_commit(struct btree_trans *);
+
+/**
+ * bch2_trans_commit - insert keys at given iterator positions
+ *
+ * This is main entry point for btree updates.
+ *
+ * Return values:
+ * -EINTR: locking changed, this function should be called again. Only returned
+ * if passed BTREE_INSERT_ATOMIC.
+ * -EROFS: filesystem read only
+ * -EIO: journal or btree node IO error
+ */
+static inline int bch2_trans_commit(struct btree_trans *trans,
+ struct disk_reservation *disk_res,
+ u64 *journal_seq,
+ unsigned flags)
+{
+ trans->disk_res = disk_res;
+ trans->journal_seq = journal_seq;
+ trans->flags = flags;
+
+ return __bch2_trans_commit(trans);
+}
static inline void bch2_trans_update(struct btree_trans *trans,
struct btree_iter *iter,
diff --git a/fs/bcachefs/btree_update_leaf.c b/fs/bcachefs/btree_update_leaf.c
index 0b9b573a0d72..38a27d3a3b40 100644
--- a/fs/bcachefs/btree_update_leaf.c
+++ b/fs/bcachefs/btree_update_leaf.c
@@ -20,16 +20,11 @@
#include <linux/sort.h>
static inline bool same_leaf_as_prev(struct btree_trans *trans,
- unsigned sorted_idx)
+ unsigned idx)
{
- struct btree_insert_entry *i = trans->updates +
- trans->updates_sorted[sorted_idx];
- struct btree_insert_entry *prev = sorted_idx
- ? trans->updates + trans->updates_sorted[sorted_idx - 1]
- : NULL;
-
- return prev &&
- i->iter->l[0].b == prev->iter->l[0].b;
+ return idx &&
+ trans->updates[trans->updates_sorted[idx]].iter->l[0].b ==
+ trans->updates[trans->updates_sorted[idx - 1]].iter->l[0].b;
}
#define trans_for_each_update_sorted(_trans, _i, _iter) \
@@ -92,8 +87,6 @@ static inline void btree_trans_sort_updates(struct btree_trans *trans)
trans->updates_sorted[pos] = l - trans->updates;
nr++;
}
-
- BUG_ON(nr != trans->nr_updates);
}
/* Inserting into a given leaf node (last stage of insert): */
@@ -266,8 +259,8 @@ static void bch2_insert_fixup_key(struct btree_trans *trans,
EBUG_ON(insert->k->k.u64s >
bch_btree_keys_u64s_remaining(trans->c, l->b));
- if (bch2_btree_bset_insert_key(iter, l->b, &l->iter,
- insert->k))
+ if (likely(bch2_btree_bset_insert_key(iter, l->b, &l->iter,
+ insert->k)))
bch2_btree_journal_key(trans, iter, insert->k);
}
@@ -280,7 +273,8 @@ static void btree_insert_key_leaf(struct btree_trans *trans,
struct bch_fs *c = trans->c;
struct btree_iter *iter = insert->iter;
struct btree *b = iter->l[0].b;
- int old_u64s = le16_to_cpu(btree_bset_last(b)->u64s);
+ struct bset_tree *t = bset_tree_last(b);
+ int old_u64s = bset_u64s(t);
int old_live_u64s = b->nr.live_u64s;
int live_u64s_added, u64s_added;
@@ -290,7 +284,7 @@ static void btree_insert_key_leaf(struct btree_trans *trans,
bch2_insert_fixup_extent(trans, insert);
live_u64s_added = (int) b->nr.live_u64s - old_live_u64s;
- u64s_added = (int) le16_to_cpu(btree_bset_last(b)->u64s) - old_u64s;
+ u64s_added = (int) bset_u64s(t) - old_u64s;
if (b->sib_u64s[0] != U16_MAX && live_u64s_added < 0)
b->sib_u64s[0] = max(0, (int) b->sib_u64s[0] + live_u64s_added);
@@ -323,26 +317,12 @@ static inline void btree_insert_entry_checks(struct btree_trans *trans,
bch2_bkey_invalid(c, bkey_i_to_s_c(i->k), i->iter->btree_id));
}
-static int bch2_trans_journal_preres_get(struct btree_trans *trans)
+static noinline int
+bch2_trans_journal_preres_get_cold(struct btree_trans *trans, unsigned u64s)
{
struct bch_fs *c = trans->c;
- struct btree_insert_entry *i;
- unsigned u64s = 0;
int ret;
- trans_for_each_update(trans, i)
- if (0)
- u64s += jset_u64s(i->k->k.u64s);
-
- if (!u64s)
- return 0;
-
- ret = bch2_journal_preres_get(&c->journal,
- &trans->journal_preres, u64s,
- JOURNAL_RES_GET_NONBLOCK);
- if (ret != -EAGAIN)
- return ret;
-
bch2_trans_unlock(trans);
ret = bch2_journal_preres_get(&c->journal,
@@ -358,8 +338,8 @@ static int bch2_trans_journal_preres_get(struct btree_trans *trans)
return 0;
}
-static int bch2_trans_journal_res_get(struct btree_trans *trans,
- unsigned flags)
+static inline int bch2_trans_journal_res_get(struct btree_trans *trans,
+ unsigned flags)
{
struct bch_fs *c = trans->c;
int ret;
@@ -438,63 +418,43 @@ static inline bool update_has_nontrans_triggers(struct btree_insert_entry *i)
(1U << i->iter->btree_id);
}
-/*
- * Get journal reservation, take write locks, and attempt to do btree update(s):
- */
-static inline int do_btree_insert_at(struct btree_trans *trans,
- struct btree_insert_entry **stopped_at)
+static noinline void bch2_btree_iter_unlock_noinline(struct btree_iter *iter)
+{
+ __bch2_btree_iter_unlock(iter);
+}
+
+static noinline void bch2_trans_mark_gc(struct btree_trans *trans)
{
struct bch_fs *c = trans->c;
- struct bch_fs_usage_online *fs_usage = NULL;
struct btree_insert_entry *i;
- struct btree_iter *iter;
unsigned mark_flags = trans->flags & BTREE_INSERT_BUCKET_INVALIDATE
? BCH_BUCKET_MARK_BUCKET_INVALIDATE
: 0;
- int ret;
- trans_for_each_update(trans, i)
- BUG_ON(i->iter->uptodate >= BTREE_ITER_NEED_RELOCK);
+ if (unlikely(trans->flags & BTREE_INSERT_NOMARK))
+ return;
- /*
- * note: running triggers will append more updates to the list of
- * updates as we're walking it:
- */
trans_for_each_update(trans, i)
- if (likely(!(trans->flags & BTREE_INSERT_NOMARK)) &&
- update_has_trans_triggers(i)) {
- ret = bch2_trans_mark_update(trans, i->iter, i->k);
- if (ret == -EINTR)
- trace_trans_restart_mark(trans->ip);
- if (ret)
- goto out_clear_replicas;
- }
-
- trans_for_each_iter(trans, iter) {
- if (iter->nodes_locked != iter->nodes_intent_locked) {
- BUG_ON(iter->flags & BTREE_ITER_KEEP_UNTIL_COMMIT);
- BUG_ON(trans->iters_live & (1ULL << iter->idx));
- __bch2_btree_iter_unlock(iter);
- }
- }
-
- if (IS_ENABLED(CONFIG_BCACHEFS_DEBUG))
- trans_for_each_update(trans, i)
- btree_insert_entry_checks(trans, i);
- bch2_btree_trans_verify_locks(trans);
-
- /*
- * No more updates can be added - sort updates so we can take write
- * locks in the correct order:
- */
- btree_trans_sort_updates(trans);
+ if (gc_visited(c, gc_pos_btree_node(i->iter->l[0].b)))
+ bch2_mark_update(trans, i, NULL,
+ mark_flags|BCH_BUCKET_MARK_GC);
+}
- btree_trans_lock_write(trans, true);
+static inline int
+bch2_trans_commit_write_locked(struct btree_trans *trans,
+ struct btree_insert_entry **stopped_at)
+{
+ struct bch_fs *c = trans->c;
+ struct bch_fs_usage_online *fs_usage = NULL;
+ struct btree_insert_entry *i;
+ unsigned mark_flags = trans->flags & BTREE_INSERT_BUCKET_INVALIDATE
+ ? BCH_BUCKET_MARK_BUCKET_INVALIDATE
+ : 0;
+ int ret;
if (race_fault()) {
- ret = -EINTR;
trace_trans_restart_fault_inject(trans->ip);
- goto out;
+ return -EINTR;
}
/*
@@ -504,7 +464,7 @@ static inline int do_btree_insert_at(struct btree_trans *trans,
*/
ret = btree_trans_check_can_insert(trans, stopped_at);
if (ret)
- goto out;
+ return ret;
trans_for_each_update(trans, i) {
if (!btree_node_type_needs_gc(i->iter->btree_id))
@@ -515,10 +475,11 @@ static inline int do_btree_insert_at(struct btree_trans *trans,
fs_usage = bch2_fs_usage_scratch_get(c);
}
+ /* Must be called under mark_lock: */
if (!bch2_bkey_replicas_marked_locked(c,
bkey_i_to_s_c(i->k), true)) {
ret = BTREE_INSERT_NEED_MARK_REPLICAS;
- goto out;
+ goto err;
}
}
@@ -527,16 +488,17 @@ static inline int do_btree_insert_at(struct btree_trans *trans,
* succeed:
*/
if (likely(!(trans->flags & BTREE_INSERT_JOURNAL_REPLAY))) {
- trans->journal_u64s = 0;
-
- trans_for_each_update(trans, i)
- trans->journal_u64s += jset_u64s(i->k->k.u64s);
-
- ret = bch2_trans_journal_res_get(trans, JOURNAL_RES_GET_NONBLOCK);
+ ret = bch2_trans_journal_res_get(trans,
+ JOURNAL_RES_GET_NONBLOCK);
if (ret)
- goto out;
+ goto err;
}
+ /*
+ * Not allowed to fail after we've gotten our journal reservation - we
+ * have to use it:
+ */
+
if (!(trans->flags & BTREE_INSERT_JOURNAL_REPLAY)) {
if (journal_seq_verify(c))
trans_for_each_update(trans, i)
@@ -558,39 +520,122 @@ static inline int do_btree_insert_at(struct btree_trans *trans,
if (fs_usage)
bch2_trans_fs_usage_apply(trans, fs_usage);
- if (likely(!(trans->flags & BTREE_INSERT_NOMARK)) &&
- unlikely(c->gc_pos.phase))
- trans_for_each_update(trans, i)
- if (gc_visited(c, gc_pos_btree_node(i->iter->l[0].b)))
- bch2_mark_update(trans, i, NULL,
- mark_flags|
- BCH_BUCKET_MARK_GC);
+ if (unlikely(c->gc_pos.phase))
+ bch2_trans_mark_gc(trans);
trans_for_each_update(trans, i)
do_btree_insert_one(trans, i);
-out:
- BUG_ON(ret &&
- (trans->flags & BTREE_INSERT_JOURNAL_RESERVED) &&
- trans->journal_res.ref);
-
- btree_trans_lock_write(trans, false);
-
+err:
if (fs_usage) {
bch2_fs_usage_scratch_put(c, fs_usage);
percpu_up_read(&c->mark_lock);
}
- bch2_journal_res_put(&c->journal, &trans->journal_res);
-out_clear_replicas:
- if (trans->fs_usage_deltas) {
- trans->fs_usage_deltas->used = 0;
- memset((void *) trans->fs_usage_deltas +
- offsetof(struct replicas_delta_list, memset_start), 0,
- (void *) &trans->fs_usage_deltas->memset_end -
- (void *) &trans->fs_usage_deltas->memset_start);
+ return ret;
+}
+
+/*
+ * Get journal reservation, take write locks, and attempt to do btree update(s):
+ */
+static inline int do_bch2_trans_commit(struct btree_trans *trans,
+ struct btree_insert_entry **stopped_at)
+{
+ struct btree_insert_entry *i;
+ struct btree_iter *iter;
+ unsigned idx, u64s, journal_preres_u64s = 0;
+ int ret;
+
+ /*
+ * note: running triggers will append more updates to the list of
+ * updates as we're walking it:
+ */
+ trans_for_each_update(trans, i) {
+ /* we know trans->nounlock won't be set here: */
+ if (unlikely(!(i->iter->locks_want < 1
+ ? __bch2_btree_iter_upgrade(i->iter, 1)
+ : i->iter->uptodate <= BTREE_ITER_NEED_PEEK))) {
+ trace_trans_restart_upgrade(trans->ip);
+ return -EINTR;
+ }
+
+ if (likely(!(trans->flags & BTREE_INSERT_NOMARK)) &&
+ update_has_trans_triggers(i)) {
+ ret = bch2_trans_mark_update(trans, i->iter, i->k);
+ if (unlikely(ret)) {
+ if (ret == -EINTR)
+ trace_trans_restart_mark(trans->ip);
+ return ret;
+ }
+ }
+
+ u64s = jset_u64s(i->k->k.u64s);
+ if (0)
+ journal_preres_u64s += u64s;
+ trans->journal_u64s += u64s;
}
- return ret;
+ ret = bch2_journal_preres_get(&trans->c->journal,
+ &trans->journal_preres, journal_preres_u64s,
+ JOURNAL_RES_GET_NONBLOCK);
+ if (unlikely(ret == -EAGAIN))
+ ret = bch2_trans_journal_preres_get_cold(trans,
+ journal_preres_u64s);
+ if (unlikely(ret))
+ return ret;
+
+ /*
+ * Can't be holding any read locks when we go to take write locks:
+ *
+ * note - this must be done after bch2_trans_journal_preres_get_cold()
+ * or anything else that might call bch2_trans_relock(), since that
+ * would just retake the read locks:
+ */
+ trans_for_each_iter_all(trans, iter) {
+ if (iter->nodes_locked != iter->nodes_intent_locked) {
+ EBUG_ON(iter->flags & BTREE_ITER_KEEP_UNTIL_COMMIT);
+ EBUG_ON(trans->iters_live & (1ULL << iter->idx));
+ bch2_btree_iter_unlock_noinline(iter);
+ }
+ }
+
+ if (IS_ENABLED(CONFIG_BCACHEFS_DEBUG))
+ trans_for_each_update(trans, i)
+ btree_insert_entry_checks(trans, i);
+ bch2_btree_trans_verify_locks(trans);
+
+ /*
+ * No more updates can be added - sort updates so we can take write
+ * locks in the correct order:
+ */
+ btree_trans_sort_updates(trans);
+
+ btree_trans_lock_write(trans, true);
+ ret = bch2_trans_commit_write_locked(trans, stopped_at);
+ btree_trans_lock_write(trans, false);
+
+ /*
+ * Drop journal reservation after dropping write locks, since dropping
+ * the journal reservation may kick off a journal write:
+ */
+ bch2_journal_res_put(&trans->c->journal, &trans->journal_res);
+
+ if (unlikely(ret))
+ return ret;
+
+ if (trans->flags & BTREE_INSERT_NOUNLOCK)
+ trans->nounlock = true;
+
+ trans_for_each_update_sorted(trans, i, idx)
+ if (!same_leaf_as_prev(trans, idx))
+ bch2_foreground_maybe_merge(trans->c, i->iter,
+ 0, trans->flags);
+
+ trans->nounlock = false;
+
+ trans_for_each_update(trans, i)
+ bch2_btree_iter_downgrade(i->iter);
+
+ return 0;
}
static noinline
@@ -698,66 +743,27 @@ int bch2_trans_commit_error(struct btree_trans *trans,
return ret;
}
-/**
- * __bch_btree_insert_at - insert keys at given iterator positions
- *
- * This is main entry point for btree updates.
- *
- * Return values:
- * -EINTR: locking changed, this function should be called again. Only returned
- * if passed BTREE_INSERT_ATOMIC.
- * -EROFS: filesystem read only
- * -EIO: journal or btree node IO error
- */
-static int __bch2_trans_commit(struct btree_trans *trans,
- struct btree_insert_entry **stopped_at)
+static noinline int
+bch2_trans_commit_get_rw_cold(struct btree_trans *trans)
{
struct bch_fs *c = trans->c;
- struct btree_insert_entry *i;
- unsigned iter;
int ret;
- trans_for_each_update(trans, i) {
- if (!bch2_btree_iter_upgrade(i->iter, 1)) {
- trace_trans_restart_upgrade(trans->ip);
- ret = -EINTR;
- goto err;
- }
-
- ret = btree_iter_err(i->iter);
- if (ret)
- goto err;
- }
+ if (likely(!(trans->flags & BTREE_INSERT_LAZY_RW)))
+ return -EROFS;
- ret = do_btree_insert_at(trans, stopped_at);
- if (unlikely(ret))
- goto err;
-
- if (trans->flags & BTREE_INSERT_NOUNLOCK)
- trans->nounlock = true;
-
- trans_for_each_update_sorted(trans, i, iter)
- if (!same_leaf_as_prev(trans, iter))
- bch2_foreground_maybe_merge(c, i->iter,
- 0, trans->flags);
-
- trans->nounlock = false;
+ bch2_trans_unlock(trans);
- trans_for_each_update(trans, i)
- bch2_btree_iter_downgrade(i->iter);
-err:
- /* make sure we didn't drop or screw up locks: */
- bch2_btree_trans_verify_locks(trans);
+ ret = bch2_fs_read_write_early(c);
+ if (ret)
+ return ret;
- return ret;
+ percpu_ref_get(&c->writes);
+ return 0;
}
-int bch2_trans_commit(struct btree_trans *trans,
- struct disk_reservation *disk_res,
- u64 *journal_seq,
- unsigned flags)
+int __bch2_trans_commit(struct btree_trans *trans)
{
- struct bch_fs *c = trans->c;
struct btree_insert_entry *i = NULL;
struct btree_iter *iter;
unsigned orig_nr_updates = trans->nr_updates;
@@ -768,61 +774,47 @@ int bch2_trans_commit(struct btree_trans *trans,
goto out_noupdates;
/* for the sake of sanity: */
- BUG_ON(trans->nr_updates > 1 && !(flags & BTREE_INSERT_ATOMIC));
-
- if (flags & BTREE_INSERT_GC_LOCK_HELD)
- lockdep_assert_held(&c->gc_lock);
+ EBUG_ON(trans->nr_updates > 1 && !(trans->flags & BTREE_INSERT_ATOMIC));
- if (!trans->commit_start)
- trans->commit_start = local_clock();
+ if (trans->flags & BTREE_INSERT_GC_LOCK_HELD)
+ lockdep_assert_held(&trans->c->gc_lock);
- memset(&trans->journal_res, 0, sizeof(trans->journal_res));
memset(&trans->journal_preres, 0, sizeof(trans->journal_preres));
- trans->disk_res = disk_res;
- trans->journal_seq = journal_seq;
- trans->flags = flags;
- if (unlikely(!(trans->flags & BTREE_INSERT_NOCHECK_RW) &&
- !percpu_ref_tryget(&c->writes))) {
- if (likely(!(trans->flags & BTREE_INSERT_LAZY_RW)))
- return -EROFS;
-
- bch2_trans_unlock(trans);
-
- ret = bch2_fs_read_write_early(c);
+ if (!(trans->flags & BTREE_INSERT_NOCHECK_RW) &&
+ unlikely(!percpu_ref_tryget(&trans->c->writes))) {
+ ret = bch2_trans_commit_get_rw_cold(trans);
if (ret)
return ret;
+ }
+retry:
+ memset(&trans->journal_res, 0, sizeof(trans->journal_res));
+ trans->journal_u64s = 0;
- percpu_ref_get(&c->writes);
+ ret = do_bch2_trans_commit(trans, &i);
- if (!bch2_trans_relock(trans)) {
- ret = -EINTR;
- goto err;
- }
+ if (trans->fs_usage_deltas) {
+ trans->fs_usage_deltas->used = 0;
+ memset((void *) trans->fs_usage_deltas +
+ offsetof(struct replicas_delta_list, memset_start), 0,
+ (void *) &trans->fs_usage_deltas->memset_end -
+ (void *) &trans->fs_usage_deltas->memset_start);
}
-retry:
- ret = bch2_trans_journal_preres_get(trans);
- if (ret)
- goto err;
- ret = __bch2_trans_commit(trans, &i);
+ /* make sure we didn't drop or screw up locks: */
+ bch2_btree_trans_verify_locks(trans);
+
if (ret)
goto err;
out:
- bch2_journal_preres_put(&c->journal, &trans->journal_preres);
+ bch2_journal_preres_put(&trans->c->journal, &trans->journal_preres);
- if (unlikely(!(trans->flags & BTREE_INSERT_NOCHECK_RW)))
- percpu_ref_put(&c->writes);
+ if (likely(!(trans->flags & BTREE_INSERT_NOCHECK_RW)))
+ percpu_ref_put(&trans->c->writes);
out_noupdates:
- if (!ret && trans->commit_start) {
- bch2_time_stats_update(&c->times[BCH_TIME_btree_update],
- trans->commit_start);
- trans->commit_start = 0;
- }
-
- BUG_ON(!(trans->flags & BTREE_INSERT_ATOMIC) && ret == -EINTR);
+ EBUG_ON(!(trans->flags & BTREE_INSERT_ATOMIC) && ret == -EINTR);
- trans_for_each_iter(trans, iter)
+ trans_for_each_iter_all(trans, iter)
iter->flags &= ~BTREE_ITER_KEEP_UNTIL_COMMIT;
if (!ret) {
@@ -836,18 +828,16 @@ out_noupdates:
err:
ret = bch2_trans_commit_error(trans, i, ret);
- /* free updates and memory used by triggers, they'll be reexecuted: */
- trans->nr_updates = orig_nr_updates;
- trans->mem_top = orig_mem_top;
-
/* can't loop if it was passed in and we changed it: */
if (unlikely(trans->flags & BTREE_INSERT_NO_CLEAR_REPLICAS) && !ret)
ret = -EINTR;
+ if (ret)
+ goto out;
- if (!ret)
- goto retry;
-
- goto out;
+ /* free updates and memory used by triggers, they'll be reexecuted: */
+ trans->nr_updates = orig_nr_updates;
+ trans->mem_top = orig_mem_top;
+ goto retry;
}
/**
diff --git a/fs/bcachefs/fs-io.c b/fs/bcachefs/fs-io.c
index a3fb60383725..c539ed3aa48d 100644
--- a/fs/bcachefs/fs-io.c
+++ b/fs/bcachefs/fs-io.c
@@ -2720,20 +2720,26 @@ long bch2_fallocate_dispatch(struct file *file, int mode,
loff_t offset, loff_t len)
{
struct bch_inode_info *inode = file_bch_inode(file);
+ struct bch_fs *c = inode->v.i_sb->s_fs_info;
+ long ret;
- if (!(mode & ~(FALLOC_FL_KEEP_SIZE|FALLOC_FL_ZERO_RANGE)))
- return bchfs_fallocate(inode, mode, offset, len);
-
- if (mode == (FALLOC_FL_PUNCH_HOLE|FALLOC_FL_KEEP_SIZE))
- return bchfs_fpunch(inode, offset, len);
-
- if (mode == FALLOC_FL_INSERT_RANGE)
- return bchfs_fcollapse_finsert(inode, offset, len, true);
+ if (!percpu_ref_tryget(&c->writes))
+ return -EROFS;
- if (mode == FALLOC_FL_COLLAPSE_RANGE)
- return bchfs_fcollapse_finsert(inode, offset, len, false);
+ if (!(mode & ~(FALLOC_FL_KEEP_SIZE|FALLOC_FL_ZERO_RANGE)))
+ ret = bchfs_fallocate(inode, mode, offset, len);
+ else if (mode == (FALLOC_FL_PUNCH_HOLE|FALLOC_FL_KEEP_SIZE))
+ ret = bchfs_fpunch(inode, offset, len);
+ else if (mode == FALLOC_FL_INSERT_RANGE)
+ ret = bchfs_fcollapse_finsert(inode, offset, len, true);
+ else if (mode == FALLOC_FL_COLLAPSE_RANGE)
+ ret = bchfs_fcollapse_finsert(inode, offset, len, false);
+ else
+ ret = -EOPNOTSUPP;
+
+ percpu_ref_put(&c->writes);
- return -EOPNOTSUPP;
+ return ret;
}
static void mark_range_unallocated(struct bch_inode_info *inode,
diff --git a/fs/bcachefs/io.c b/fs/bcachefs/io.c
index 6d416f71f055..a343393115d8 100644
--- a/fs/bcachefs/io.c
+++ b/fs/bcachefs/io.c
@@ -310,6 +310,7 @@ int bch2_extent_update(struct btree_trans *trans,
bch2_trans_update(trans, iter, k);
ret = bch2_trans_commit(trans, disk_res, journal_seq,
+ BTREE_INSERT_NOCHECK_RW|
BTREE_INSERT_NOFAIL|
BTREE_INSERT_ATOMIC|
BTREE_INSERT_USE_RESERVE);
diff --git a/fs/bcachefs/journal.h b/fs/bcachefs/journal.h
index 36066ea7de7a..f8867f86318a 100644
--- a/fs/bcachefs/journal.h
+++ b/fs/bcachefs/journal.h
@@ -271,7 +271,7 @@ static inline void bch2_journal_res_put(struct journal *j,
if (!res->ref)
return;
- lock_release(&j->res_map, _RET_IP_);
+ lock_release(&j->res_map, _THIS_IP_);
while (res->u64s)
bch2_journal_add_entry(j, res,
diff --git a/fs/bcachefs/reflink.c b/fs/bcachefs/reflink.c
index 4a4b17f93a2e..6d45ae24479d 100644
--- a/fs/bcachefs/reflink.c
+++ b/fs/bcachefs/reflink.c
@@ -166,6 +166,9 @@ s64 bch2_remap_range(struct bch_fs *c,
u64 src_done, dst_done;
int ret = 0, ret2 = 0;
+ if (!percpu_ref_tryget(&c->writes))
+ return -EROFS;
+
if (!(c->sb.features & (1ULL << BCH_FEATURE_REFLINK))) {
mutex_lock(&c->sb_lock);
if (!(c->sb.features & (1ULL << BCH_FEATURE_REFLINK))) {
@@ -295,5 +298,7 @@ err:
ret = bch2_trans_exit(&trans) ?: ret;
+ percpu_ref_put(&c->writes);
+
return dst_done ?: ret ?: ret2;
}