summaryrefslogtreecommitdiff
path: root/fs/bcachefs/move.c
diff options
context:
space:
mode:
authorKent Overstreet <kent.overstreet@linux.dev>2023-01-03 01:53:02 +0300
committerKent Overstreet <kent.overstreet@linux.dev>2023-10-23 00:09:52 +0300
commit7ffb6a7ec6712eb1ba84a80137c2f712e67c4171 (patch)
tree0d05e725b41817a28164f1ba6ee19747d010bbf6 /fs/bcachefs/move.c
parentdbe17f18838df6d0facf51b43cdc5efd372c28d6 (diff)
downloadlinux-7ffb6a7ec6712eb1ba84a80137c2f712e67c4171.tar.xz
bcachefs: Fix deadlock on nocow locks in data move path
The recent nocow locking rework introduced a deadlock in the data move path: the new nocow locking scheme uses a hash table with a fixed size array for chaining, meaning on hash collision we may have to wait for other locks to be released before we can lock a bucket. And since the data move path needs to submit writes from the same thread that's taking nocow locks and submitting reads, this introduces a deadlock. This shouldn't happen often in practice, but since the data move path can keep large numbers of IOs in flight simultaneously, it's something we have to handle. This patch makes move_ctxt_wait_event() available to bch2_data_update_init() and uses it when appropriate, which is our normal solution to this kind of thing. Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
Diffstat (limited to 'fs/bcachefs/move.c')
-rw-r--r--fs/bcachefs/move.c21
1 files changed, 6 insertions, 15 deletions
diff --git a/fs/bcachefs/move.c b/fs/bcachefs/move.c
index 9e453b8495e8..d0ce656755d7 100644
--- a/fs/bcachefs/move.c
+++ b/fs/bcachefs/move.c
@@ -91,7 +91,7 @@ static void move_write(struct moving_io *io)
bch2_data_update_read_done(&io->write, io->rbio.pick.crc);
}
-static inline struct moving_io *next_pending_write(struct moving_context *ctxt)
+struct moving_io *bch2_moving_ctxt_next_pending_write(struct moving_context *ctxt)
{
struct moving_io *io =
list_first_entry_or_null(&ctxt->reads, struct moving_io, list);
@@ -111,29 +111,20 @@ static void move_read_endio(struct bio *bio)
closure_put(&ctxt->cl);
}
-static void do_pending_writes(struct moving_context *ctxt, struct btree_trans *trans)
+void bch2_moving_ctxt_do_pending_writes(struct moving_context *ctxt,
+ struct btree_trans *trans)
{
struct moving_io *io;
if (trans)
bch2_trans_unlock(trans);
- while ((io = next_pending_write(ctxt))) {
+ while ((io = bch2_moving_ctxt_next_pending_write(ctxt))) {
list_del(&io->list);
move_write(io);
}
}
-#define move_ctxt_wait_event(_ctxt, _trans, _cond) \
-do { \
- do_pending_writes(_ctxt, _trans); \
- \
- if (_cond) \
- break; \
- __wait_event((_ctxt)->wait, \
- next_pending_write(_ctxt) || (_cond)); \
-} while (1)
-
static void bch2_move_ctxt_wait_for_io(struct moving_context *ctxt,
struct btree_trans *trans)
{
@@ -299,8 +290,8 @@ static int bch2_move_extent(struct btree_trans *trans,
io->rbio.bio.bi_iter.bi_sector = bkey_start_offset(k.k);
io->rbio.bio.bi_end_io = move_read_endio;
- ret = bch2_data_update_init(c, &io->write, ctxt->wp, io_opts,
- data_opts, btree_id, k);
+ ret = bch2_data_update_init(trans, ctxt, &io->write, ctxt->wp,
+ io_opts, data_opts, btree_id, k);
if (ret && ret != -BCH_ERR_unwritten_extent_update)
goto err_free_pages;