summaryrefslogtreecommitdiff
path: root/block/bio.c
diff options
context:
space:
mode:
authorMing Lei <ming.lei@redhat.com>2019-03-17 13:01:08 +0300
committerJens Axboe <axboe@kernel.dk>2019-04-01 21:11:34 +0300
commit190470871ae28da7bdb3909f6124385c8472fc97 (patch)
tree6af7ea3cda825066245aedd136a40cd4a31b3c64 /block/bio.c
parent5919482e222908d40279a616b1fe6400549e32b4 (diff)
downloadlinux-190470871ae28da7bdb3909f6124385c8472fc97.tar.xz
block: put the same page when adding it to bio
When the added page is merged to last same page in bio_add_pc_page(), the user may need to put this page for avoiding page leak. bio_map_user_iov() needs this kind of handling, and now it deals with it by itself in hack style. Moves the handling of put page into __bio_add_pc_page(), so bio_map_user_iov() may be simplified a bit, and maybe more users can benefit from this change. Cc: Omar Sandoval <osandov@fb.com> Cc: Christoph Hellwig <hch@lst.de> Signed-off-by: Ming Lei <ming.lei@redhat.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block/bio.c')
-rw-r--r--block/bio.c28
1 files changed, 16 insertions, 12 deletions
diff --git a/block/bio.c b/block/bio.c
index 7ab7060a0e6c..26853e072cd7 100644
--- a/block/bio.c
+++ b/block/bio.c
@@ -666,12 +666,13 @@ static inline bool page_is_mergeable(const struct bio_vec *bv,
}
/**
- * bio_add_pc_page - attempt to add page to passthrough bio
+ * __bio_add_pc_page - attempt to add page to passthrough bio
* @q: the target queue
* @bio: destination bio
* @page: page to add
* @len: vec entry length
* @offset: vec entry offset
+ * @put_same_page: put the page if it is same with last added page
*
* Attempt to add a page to the bio_vec maplist. This can fail for a
* number of reasons, such as the bio being full or target block device
@@ -680,8 +681,9 @@ static inline bool page_is_mergeable(const struct bio_vec *bv,
*
* This should only be used by passthrough bios.
*/
-int bio_add_pc_page(struct request_queue *q, struct bio *bio, struct page
- *page, unsigned int len, unsigned int offset)
+int __bio_add_pc_page(struct request_queue *q, struct bio *bio,
+ struct page *page, unsigned int len, unsigned int offset,
+ bool put_same_page)
{
int retried_segments = 0;
struct bio_vec *bvec;
@@ -705,6 +707,8 @@ int bio_add_pc_page(struct request_queue *q, struct bio *bio, struct page
if (page == bvec->bv_page &&
offset == bvec->bv_offset + bvec->bv_len) {
+ if (put_same_page)
+ put_page(page);
bvec->bv_len += len;
bio->bi_iter.bi_size += len;
goto done;
@@ -763,6 +767,13 @@ int bio_add_pc_page(struct request_queue *q, struct bio *bio, struct page
blk_recount_segments(q, bio);
return 0;
}
+EXPORT_SYMBOL(__bio_add_pc_page);
+
+int bio_add_pc_page(struct request_queue *q, struct bio *bio,
+ struct page *page, unsigned int len, unsigned int offset)
+{
+ return __bio_add_pc_page(q, bio, page, len, offset, false);
+}
EXPORT_SYMBOL(bio_add_pc_page);
/**
@@ -1397,21 +1408,14 @@ struct bio *bio_map_user_iov(struct request_queue *q,
for (j = 0; j < npages; j++) {
struct page *page = pages[j];
unsigned int n = PAGE_SIZE - offs;
- unsigned short prev_bi_vcnt = bio->bi_vcnt;
if (n > bytes)
n = bytes;
- if (!bio_add_pc_page(q, bio, page, n, offs))
+ if (!__bio_add_pc_page(q, bio, page, n, offs,
+ true))
break;
- /*
- * check if vector was merged with previous
- * drop page reference if needed
- */
- if (bio->bi_vcnt == prev_bi_vcnt)
- put_page(page);
-
added += n;
bytes -= n;
offs = 0;