summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKent Overstreet <kent.overstreet@gmail.com>2019-04-16 22:13:16 +0300
committerKent Overstreet <kent.overstreet@linux.dev>2023-10-23 00:08:20 +0300
commitf80b4e64a4d79e78053a0e2ed4607f6af9dd2c89 (patch)
tree31182b84f007a7c10b4705edb6fabe424d65975e
parent53beb841623bcdb1fe619efe5f2c34ca3af08c78 (diff)
downloadlinux-f80b4e64a4d79e78053a0e2ed4607f6af9dd2c89.tar.xz
bcachefs: Fix hang while shutting down
If the allocator thread exited before bch2_dev_allocator_stop() was called (because of an error), bch2_dev_allocator_quiesce() could hang. Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
-rw-r--r--fs/bcachefs/alloc_background.c22
-rw-r--r--fs/bcachefs/bcachefs.h8
-rw-r--r--fs/bcachefs/movinggc.c2
3 files changed, 21 insertions, 11 deletions
diff --git a/fs/bcachefs/alloc_background.c b/fs/bcachefs/alloc_background.c
index c254c08af9d1..4a8f6fa3db1e 100644
--- a/fs/bcachefs/alloc_background.c
+++ b/fs/bcachefs/alloc_background.c
@@ -598,6 +598,9 @@ static int wait_buckets_available(struct bch_fs *c, struct bch_dev *ca)
unsigned long gc_count = c->gc_count;
int ret = 0;
+ ca->allocator_state = ALLOCATOR_BLOCKED;
+ closure_wake_up(&c->freelist_wait);
+
while (1) {
set_current_state(TASK_INTERRUPTIBLE);
if (kthread_should_stop()) {
@@ -620,6 +623,9 @@ static int wait_buckets_available(struct bch_fs *c, struct bch_dev *ca)
}
__set_current_state(TASK_RUNNING);
+ ca->allocator_state = ALLOCATOR_RUNNING;
+ closure_wake_up(&c->freelist_wait);
+
return ret;
}
@@ -1119,14 +1125,14 @@ static int push_invalidated_bucket(struct bch_fs *c, struct bch_dev *ca, size_t
fifo_pop(&ca->free_inc, bucket);
closure_wake_up(&c->freelist_wait);
- ca->allocator_blocked_full = false;
+ ca->allocator_state = ALLOCATOR_RUNNING;
spin_unlock(&c->freelist_lock);
goto out;
}
- if (!ca->allocator_blocked_full) {
- ca->allocator_blocked_full = true;
+ if (ca->allocator_state != ALLOCATOR_BLOCKED_FULL) {
+ ca->allocator_state = ALLOCATOR_BLOCKED_FULL;
closure_wake_up(&c->freelist_wait);
}
@@ -1184,6 +1190,7 @@ static int bch2_allocator_thread(void *arg)
int ret;
set_freezable();
+ ca->allocator_state = ALLOCATOR_RUNNING;
while (1) {
cond_resched();
@@ -1242,9 +1249,6 @@ static int bch2_allocator_thread(void *arg)
if (!nr ||
(nr < ALLOC_SCAN_BATCH(ca) &&
!fifo_full(&ca->free[RESERVE_MOVINGGC]))) {
- ca->allocator_blocked = true;
- closure_wake_up(&c->freelist_wait);
-
ret = wait_buckets_available(c, ca);
if (ret) {
up_read(&c->gc_lock);
@@ -1253,7 +1257,6 @@ static int bch2_allocator_thread(void *arg)
}
} while (!nr);
- ca->allocator_blocked = false;
up_read(&c->gc_lock);
pr_debug("%zu buckets to invalidate", nr);
@@ -1266,6 +1269,8 @@ static int bch2_allocator_thread(void *arg)
stop:
pr_debug("alloc thread stopping (ret %i)", ret);
+ ca->allocator_state = ALLOCATOR_STOPPED;
+ closure_wake_up(&c->freelist_wait);
return 0;
}
@@ -1457,7 +1462,8 @@ void bch2_dev_allocator_add(struct bch_fs *c, struct bch_dev *ca)
void bch2_dev_allocator_quiesce(struct bch_fs *c, struct bch_dev *ca)
{
if (ca->alloc_thread)
- closure_wait_event(&c->freelist_wait, ca->allocator_blocked_full);
+ closure_wait_event(&c->freelist_wait,
+ ca->allocator_state != ALLOCATOR_RUNNING);
}
/* stop allocator thread: */
diff --git a/fs/bcachefs/bcachefs.h b/fs/bcachefs/bcachefs.h
index 8acdc7ffeca3..72f9f5f9abe9 100644
--- a/fs/bcachefs/bcachefs.h
+++ b/fs/bcachefs/bcachefs.h
@@ -447,8 +447,12 @@ struct bch_dev {
* XXX: this should be an enum for allocator state, so as to include
* error state
*/
- bool allocator_blocked;
- bool allocator_blocked_full;
+ enum {
+ ALLOCATOR_STOPPED,
+ ALLOCATOR_RUNNING,
+ ALLOCATOR_BLOCKED,
+ ALLOCATOR_BLOCKED_FULL,
+ } allocator_state;
alloc_heap alloc_heap;
diff --git a/fs/bcachefs/movinggc.c b/fs/bcachefs/movinggc.c
index aba13e6ea4ff..d97be76da58f 100644
--- a/fs/bcachefs/movinggc.c
+++ b/fs/bcachefs/movinggc.c
@@ -116,7 +116,7 @@ static bool have_copygc_reserve(struct bch_dev *ca)
spin_lock(&ca->freelist_lock);
ret = fifo_full(&ca->free[RESERVE_MOVINGGC]) ||
- ca->allocator_blocked;
+ ca->allocator_state != ALLOCATOR_RUNNING;
spin_unlock(&ca->freelist_lock);
return ret;