summaryrefslogtreecommitdiff
path: root/fs/bcachefs/replicas.c
diff options
context:
space:
mode:
authorKent Overstreet <kent.overstreet@gmail.com>2021-01-22 03:14:37 +0300
committerKent Overstreet <kent.overstreet@linux.dev>2023-10-23 00:08:51 +0300
commite46b85573434b4e0c9f8eee4ac21d90643a97454 (patch)
treed43c1fb50c92d789f86436ffff77193bf7c52ea8 /fs/bcachefs/replicas.c
parentb4725cc1a45fa859e6ff0966f5fa988d6402e5c8 (diff)
downloadlinux-e46b85573434b4e0c9f8eee4ac21d90643a97454.tar.xz
bcachefs: Switch replicas.c allocations to GFP_KERNEL
We're transitioning to memalloc_nofs_save/restore instead of GFP flags with the rest of the kernel, and GFP_NOIO was excessively strict and causing unnnecessary allocation failures - these allocations are done with btree locks dropped. Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com> Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
Diffstat (limited to 'fs/bcachefs/replicas.c')
-rw-r--r--fs/bcachefs/replicas.c18
1 files changed, 9 insertions, 9 deletions
diff --git a/fs/bcachefs/replicas.c b/fs/bcachefs/replicas.c
index a0840e1c9f88..979e9c2b8c74 100644
--- a/fs/bcachefs/replicas.c
+++ b/fs/bcachefs/replicas.c
@@ -159,7 +159,7 @@ cpu_replicas_add_entry(struct bch_replicas_cpu *old,
BUG_ON(!new_entry->data_type);
verify_replicas_entry(new_entry);
- new.entries = kcalloc(new.nr, new.entry_size, GFP_NOIO);
+ new.entries = kcalloc(new.nr, new.entry_size, GFP_KERNEL);
if (!new.entries)
return new;
@@ -284,20 +284,20 @@ static int replicas_table_update(struct bch_fs *c,
for (i = 0; i < ARRAY_SIZE(new_usage); i++)
if (!(new_usage[i] = __alloc_percpu_gfp(bytes,
- sizeof(u64), GFP_NOIO)))
+ sizeof(u64), GFP_KERNEL)))
goto err;
memset(new_usage, 0, sizeof(new_usage));
for (i = 0; i < ARRAY_SIZE(new_usage); i++)
if (!(new_usage[i] = __alloc_percpu_gfp(bytes,
- sizeof(u64), GFP_NOIO)))
+ sizeof(u64), GFP_KERNEL)))
goto err;
- if (!(new_base = kzalloc(bytes, GFP_NOIO)) ||
- !(new_scratch = kmalloc(scratch_bytes, GFP_NOIO)) ||
+ if (!(new_base = kzalloc(bytes, GFP_KERNEL)) ||
+ !(new_scratch = kmalloc(scratch_bytes, GFP_KERNEL)) ||
(c->usage_gc &&
- !(new_gc = __alloc_percpu_gfp(bytes, sizeof(u64), GFP_NOIO))))
+ !(new_gc = __alloc_percpu_gfp(bytes, sizeof(u64), GFP_KERNEL))))
goto err;
for (i = 0; i < ARRAY_SIZE(new_usage); i++)
@@ -557,7 +557,7 @@ int bch2_replicas_gc_start(struct bch_fs *c, unsigned typemask)
c->replicas_gc.entries = kcalloc(c->replicas_gc.nr,
c->replicas_gc.entry_size,
- GFP_NOIO);
+ GFP_KERNEL);
if (!c->replicas_gc.entries) {
mutex_unlock(&c->sb_lock);
bch_err(c, "error allocating c->replicas_gc");
@@ -680,7 +680,7 @@ __bch2_sb_replicas_to_cpu_replicas(struct bch_sb_field_replicas *sb_r,
nr++;
}
- cpu_r->entries = kcalloc(nr, entry_size, GFP_NOIO);
+ cpu_r->entries = kcalloc(nr, entry_size, GFP_KERNEL);
if (!cpu_r->entries)
return -ENOMEM;
@@ -712,7 +712,7 @@ __bch2_sb_replicas_v0_to_cpu_replicas(struct bch_sb_field_replicas_v0 *sb_r,
entry_size += sizeof(struct bch_replicas_entry) -
sizeof(struct bch_replicas_entry_v0);
- cpu_r->entries = kcalloc(nr, entry_size, GFP_NOIO);
+ cpu_r->entries = kcalloc(nr, entry_size, GFP_KERNEL);
if (!cpu_r->entries)
return -ENOMEM;