summaryrefslogtreecommitdiff
path: root/block/blk-merge.c
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2021-09-20 15:33:26 +0300
committerJens Axboe <axboe@kernel.dk>2021-10-18 15:17:02 +0300
commitbadf7f64378796d460c79eb0f182fa7282eb65d5 (patch)
treedc8f945a732d8a64eaddd22dc6ff508f7f8bf4af /block/blk-merge.c
parentb81e0c2372e65e5627864ba034433b64b2fc73f5 (diff)
downloadlinux-badf7f64378796d460c79eb0f182fa7282eb65d5.tar.xz
block: move a few merge helpers out of <linux/blkdev.h>
These are block-layer internal helpers, so move them to block/blk.h and block/blk-merge.c. Also update a comment a bit to use better grammar. Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Johannes Thumshirn <johannes.thumshirn@wdc.com> Link: https://lore.kernel.org/r/20210920123328.1399408-16-hch@lst.de Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block/blk-merge.c')
-rw-r--r--block/blk-merge.c24
1 files changed, 24 insertions, 0 deletions
diff --git a/block/blk-merge.c b/block/blk-merge.c
index 7a5c81c02c80..39f210da399a 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -558,6 +558,23 @@ static inline unsigned int blk_rq_get_max_segments(struct request *rq)
return queue_max_segments(rq->q);
}
+static inline unsigned int blk_rq_get_max_sectors(struct request *rq,
+ sector_t offset)
+{
+ struct request_queue *q = rq->q;
+
+ if (blk_rq_is_passthrough(rq))
+ return q->limits.max_hw_sectors;
+
+ if (!q->limits.chunk_sectors ||
+ req_op(rq) == REQ_OP_DISCARD ||
+ req_op(rq) == REQ_OP_SECURE_ERASE)
+ return blk_queue_get_max_sectors(q, req_op(rq));
+
+ return min(blk_max_size_offset(q, offset, 0),
+ blk_queue_get_max_sectors(q, req_op(rq)));
+}
+
static inline int ll_new_hw_segment(struct request *req, struct bio *bio,
unsigned int nr_phys_segs)
{
@@ -718,6 +735,13 @@ static enum elv_merge blk_try_req_merge(struct request *req,
return ELEVATOR_NO_MERGE;
}
+static inline bool blk_write_same_mergeable(struct bio *a, struct bio *b)
+{
+ if (bio_page(a) == bio_page(b) && bio_offset(a) == bio_offset(b))
+ return true;
+ return false;
+}
+
/*
* For non-mq, this has to be called with the request spinlock acquired.
* For mq with scheduling, the appropriate queue wide lock should be held.