summaryrefslogtreecommitdiff
path: root/fs/bcachefs/debug.c
diff options
context:
space:
mode:
authorKent Overstreet <kent.overstreet@linux.dev>2023-06-20 04:01:13 +0300
committerKent Overstreet <kent.overstreet@linux.dev>2023-10-23 00:10:04 +0300
commita5b696ee6e10103def82ea9abc18958912e81b00 (patch)
treec8b0dadf44dd7441a92c2081b7b78202386bd3bc /fs/bcachefs/debug.c
parent6547ebabdaac4407ccc978f63f4dc4d9f8936783 (diff)
downloadlinux-a5b696ee6e10103def82ea9abc18958912e81b00.tar.xz
bcachefs: seqmutex; fix a lockdep splat
We can't be holding btree_trans_lock while copying to user space, which might incur a page fault. To fix this, convert it to a seqmutex so we can unlock/relock. Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
Diffstat (limited to 'fs/bcachefs/debug.c')
-rw-r--r--fs/bcachefs/debug.c46
1 files changed, 35 insertions, 11 deletions
diff --git a/fs/bcachefs/debug.c b/fs/bcachefs/debug.c
index 8981acc15098..df0e14dc96e6 100644
--- a/fs/bcachefs/debug.c
+++ b/fs/bcachefs/debug.c
@@ -627,19 +627,26 @@ static ssize_t bch2_btree_transactions_read(struct file *file, char __user *buf,
struct bch_fs *c = i->c;
struct btree_trans *trans;
ssize_t ret = 0;
+ u32 seq;
i->ubuf = buf;
i->size = size;
i->ret = 0;
-
- mutex_lock(&c->btree_trans_lock);
+restart:
+ seqmutex_lock(&c->btree_trans_lock);
list_for_each_entry(trans, &c->btree_trans_list, list) {
if (trans->locking_wait.task->pid <= i->iter)
continue;
+ closure_get(&trans->ref);
+ seq = seqmutex_seq(&c->btree_trans_lock);
+ seqmutex_unlock(&c->btree_trans_lock);
+
ret = flush_buf(i);
- if (ret)
- break;
+ if (ret) {
+ closure_put(&trans->ref);
+ goto unlocked;
+ }
bch2_btree_trans_to_text(&i->buf, trans);
@@ -651,9 +658,14 @@ static ssize_t bch2_btree_transactions_read(struct file *file, char __user *buf,
prt_newline(&i->buf);
i->iter = trans->locking_wait.task->pid;
- }
- mutex_unlock(&c->btree_trans_lock);
+ closure_put(&trans->ref);
+
+ if (!seqmutex_relock(&c->btree_trans_lock, seq))
+ goto restart;
+ }
+ seqmutex_unlock(&c->btree_trans_lock);
+unlocked:
if (i->buf.allocation_failure)
ret = -ENOMEM;
@@ -815,6 +827,7 @@ static ssize_t bch2_btree_deadlock_read(struct file *file, char __user *buf,
struct bch_fs *c = i->c;
struct btree_trans *trans;
ssize_t ret = 0;
+ u32 seq;
i->ubuf = buf;
i->size = size;
@@ -822,21 +835,32 @@ static ssize_t bch2_btree_deadlock_read(struct file *file, char __user *buf,
if (i->iter)
goto out;
-
- mutex_lock(&c->btree_trans_lock);
+restart:
+ seqmutex_lock(&c->btree_trans_lock);
list_for_each_entry(trans, &c->btree_trans_list, list) {
if (trans->locking_wait.task->pid <= i->iter)
continue;
+ closure_get(&trans->ref);
+ seq = seqmutex_seq(&c->btree_trans_lock);
+ seqmutex_unlock(&c->btree_trans_lock);
+
ret = flush_buf(i);
- if (ret)
- break;
+ if (ret) {
+ closure_put(&trans->ref);
+ goto out;
+ }
bch2_check_for_deadlock(trans, &i->buf);
i->iter = trans->locking_wait.task->pid;
+
+ closure_put(&trans->ref);
+
+ if (!seqmutex_relock(&c->btree_trans_lock, seq))
+ goto restart;
}
- mutex_unlock(&c->btree_trans_lock);
+ seqmutex_unlock(&c->btree_trans_lock);
out:
if (i->buf.allocation_failure)
ret = -ENOMEM;