summaryrefslogtreecommitdiff
path: root/fs/bcachefs/compress.c
diff options
context:
space:
mode:
authorKent Overstreet <kent.overstreet@gmail.com>2020-03-29 19:33:41 +0300
committerKent Overstreet <kent.overstreet@linux.dev>2023-10-23 00:08:37 +0300
commit22f776985f34334b3bbba75b71ecca711f34e3f4 (patch)
tree7d62678fef82a28e8bb6ff17a696f610f51339e3 /fs/bcachefs/compress.c
parent5a655f06c94f541fa9223a9b7ef2ab8a909f1fea (diff)
downloadlinux-22f776985f34334b3bbba75b71ecca711f34e3f4.tar.xz
bcachefs: Use kvpmalloc mempools for compression bounce
This fixes an issue where mounting would fail because of memory fragmentation - previously the compression bounce buffers were using get_free_pages(). Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com> Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
Diffstat (limited to 'fs/bcachefs/compress.c')
-rw-r--r--fs/bcachefs/compress.c25
1 files changed, 5 insertions, 20 deletions
diff --git a/fs/bcachefs/compress.c b/fs/bcachefs/compress.c
index 117afac3db1a..89eb03a988f1 100644
--- a/fs/bcachefs/compress.c
+++ b/fs/bcachefs/compress.c
@@ -17,7 +17,6 @@ struct bbuf {
BB_NONE,
BB_VMAP,
BB_KMALLOC,
- BB_VMALLOC,
BB_MEMPOOL,
} type;
int rw;
@@ -33,17 +32,7 @@ static struct bbuf __bounce_alloc(struct bch_fs *c, unsigned size, int rw)
if (b)
return (struct bbuf) { .b = b, .type = BB_KMALLOC, .rw = rw };
- b = mempool_alloc(&c->compression_bounce[rw], GFP_NOWAIT);
- b = b ? page_address(b) : NULL;
- if (b)
- return (struct bbuf) { .b = b, .type = BB_MEMPOOL, .rw = rw };
-
- b = vmalloc(size);
- if (b)
- return (struct bbuf) { .b = b, .type = BB_VMALLOC, .rw = rw };
-
b = mempool_alloc(&c->compression_bounce[rw], GFP_NOIO);
- b = b ? page_address(b) : NULL;
if (b)
return (struct bbuf) { .b = b, .type = BB_MEMPOOL, .rw = rw };
@@ -129,12 +118,8 @@ static void bio_unmap_or_unbounce(struct bch_fs *c, struct bbuf buf)
case BB_KMALLOC:
kfree(buf.b);
break;
- case BB_VMALLOC:
- vfree(buf.b);
- break;
case BB_MEMPOOL:
- mempool_free(virt_to_page(buf.b),
- &c->compression_bounce[buf.rw]);
+ mempool_free(buf.b, &c->compression_bounce[buf.rw]);
break;
}
}
@@ -561,15 +546,15 @@ static int __bch2_fs_compress_init(struct bch_fs *c, u64 features)
have_compressed:
if (!mempool_initialized(&c->compression_bounce[READ])) {
- ret = mempool_init_page_pool(&c->compression_bounce[READ],
- 1, order);
+ ret = mempool_init_kvpmalloc_pool(&c->compression_bounce[READ],
+ 1, order);
if (ret)
goto out;
}
if (!mempool_initialized(&c->compression_bounce[WRITE])) {
- ret = mempool_init_page_pool(&c->compression_bounce[WRITE],
- 1, order);
+ ret = mempool_init_kvpmalloc_pool(&c->compression_bounce[WRITE],
+ 1, order);
if (ret)
goto out;
}