summaryrefslogtreecommitdiff
path: root/drivers/md
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2022-02-28 14:25:02 +0300
committerSong Liu <song@kernel.org>2022-03-09 09:55:09 +0300
commit89f94b64408b576c88e407d87061bb7b9bc2111d (patch)
treebddffec274b94ed4ca22b269a56fecd7be7923e3 /drivers/md
parent0dd00cba99c352dc9afd62979f350d808c215cb9 (diff)
downloadlinux-89f94b64408b576c88e407d87061bb7b9bc2111d.tar.xz
raid5-cache: statically allocate the recovery ra bio
There is no need to preallocate the bio and reset it when use. Just allocate it on-stack and use a bvec places next to the pages used for it. Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Song Liu <song@kernel.org>
Diffstat (limited to 'drivers/md')
-rw-r--r--drivers/md/raid5-cache.c28
1 files changed, 13 insertions, 15 deletions
diff --git a/drivers/md/raid5-cache.c b/drivers/md/raid5-cache.c
index 8907d18c225a..a7d50ff9020a 100644
--- a/drivers/md/raid5-cache.c
+++ b/drivers/md/raid5-cache.c
@@ -1623,10 +1623,10 @@ struct r5l_recovery_ctx {
* just copy data from the pool.
*/
struct page *ra_pool[R5L_RECOVERY_PAGE_POOL_SIZE];
+ struct bio_vec ra_bvec[R5L_RECOVERY_PAGE_POOL_SIZE];
sector_t pool_offset; /* offset of first page in the pool */
int total_pages; /* total allocated pages */
int valid_pages; /* pages with valid data */
- struct bio *ra_bio; /* bio to do the read ahead */
};
static int r5l_recovery_allocate_ra_pool(struct r5l_log *log,
@@ -1634,11 +1634,6 @@ static int r5l_recovery_allocate_ra_pool(struct r5l_log *log,
{
struct page *page;
- ctx->ra_bio = bio_alloc_bioset(NULL, BIO_MAX_VECS, 0, GFP_KERNEL,
- &log->bs);
- if (!ctx->ra_bio)
- return -ENOMEM;
-
ctx->valid_pages = 0;
ctx->total_pages = 0;
while (ctx->total_pages < R5L_RECOVERY_PAGE_POOL_SIZE) {
@@ -1650,10 +1645,8 @@ static int r5l_recovery_allocate_ra_pool(struct r5l_log *log,
ctx->total_pages += 1;
}
- if (ctx->total_pages == 0) {
- bio_put(ctx->ra_bio);
+ if (ctx->total_pages == 0)
return -ENOMEM;
- }
ctx->pool_offset = 0;
return 0;
@@ -1666,7 +1659,6 @@ static void r5l_recovery_free_ra_pool(struct r5l_log *log,
for (i = 0; i < ctx->total_pages; ++i)
put_page(ctx->ra_pool[i]);
- bio_put(ctx->ra_bio);
}
/*
@@ -1679,15 +1671,19 @@ static int r5l_recovery_fetch_ra_pool(struct r5l_log *log,
struct r5l_recovery_ctx *ctx,
sector_t offset)
{
- bio_reset(ctx->ra_bio, log->rdev->bdev, REQ_OP_READ);
- ctx->ra_bio->bi_iter.bi_sector = log->rdev->data_offset + offset;
+ struct bio bio;
+ int ret;
+
+ bio_init(&bio, log->rdev->bdev, ctx->ra_bvec,
+ R5L_RECOVERY_PAGE_POOL_SIZE, REQ_OP_READ);
+ bio.bi_iter.bi_sector = log->rdev->data_offset + offset;
ctx->valid_pages = 0;
ctx->pool_offset = offset;
while (ctx->valid_pages < ctx->total_pages) {
- bio_add_page(ctx->ra_bio,
- ctx->ra_pool[ctx->valid_pages], PAGE_SIZE, 0);
+ __bio_add_page(&bio, ctx->ra_pool[ctx->valid_pages], PAGE_SIZE,
+ 0);
ctx->valid_pages += 1;
offset = r5l_ring_add(log, offset, BLOCK_SECTORS);
@@ -1696,7 +1692,9 @@ static int r5l_recovery_fetch_ra_pool(struct r5l_log *log,
break;
}
- return submit_bio_wait(ctx->ra_bio);
+ ret = submit_bio_wait(&bio);
+ bio_uninit(&bio);
+ return ret;
}
/*