summaryrefslogtreecommitdiff
path: root/include/linux/blk-mq.h
diff options
context:
space:
mode:
authorBart Van Assche <bvanassche@acm.org>2023-05-17 20:42:23 +0300
committerJens Axboe <axboe@kernel.dk>2023-05-19 04:47:49 +0300
commit19821fee3ed42e5b294e95814892d0ad6a9890c9 (patch)
treea5fe612c330d4792f3cdce1308ce291a06e0c179 /include/linux/blk-mq.h
parenta370798201b537f78288e4ef5e0f7fc70889e7ee (diff)
downloadlinux-19821fee3ed42e5b294e95814892d0ad6a9890c9.tar.xz
block: Introduce blk_rq_is_seq_zoned_write()
Introduce the function blk_rq_is_seq_zoned_write(). This function will be used in later patches to preserve the order of zoned writes that require write serialization. This patch includes an optimization: instead of using rq->q->disk->part0->bd_queue to check whether or not the queue is associated with a zoned block device, use rq->q->disk->queue. Cc: Christoph Hellwig <hch@lst.de> Cc: Damien Le Moal <dlemoal@kernel.org> Cc: Ming Lei <ming.lei@redhat.com> Signed-off-by: Bart Van Assche <bvanassche@acm.org> Reviewed-by: Damien Le Moal <dlemoal@kernel.org> Link: https://lore.kernel.org/r/20230517174230.897144-6-bvanassche@acm.org Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'include/linux/blk-mq.h')
-rw-r--r--include/linux/blk-mq.h17
1 files changed, 17 insertions, 0 deletions
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
index e4a211957db6..49d14b1acfa5 100644
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -1164,6 +1164,18 @@ static inline unsigned int blk_rq_zone_is_seq(struct request *rq)
return disk_zone_is_seq(rq->q->disk, blk_rq_pos(rq));
}
+/**
+ * blk_rq_is_seq_zoned_write() - Check if @rq requires write serialization.
+ * @rq: Request to examine.
+ *
+ * Note: REQ_OP_ZONE_APPEND requests do not require serialization.
+ */
+static inline bool blk_rq_is_seq_zoned_write(struct request *rq)
+{
+ return op_needs_zoned_write_locking(req_op(rq)) &&
+ blk_rq_zone_is_seq(rq);
+}
+
bool blk_req_needs_zone_write_lock(struct request *rq);
bool blk_req_zone_write_trylock(struct request *rq);
void __blk_req_zone_write_lock(struct request *rq);
@@ -1194,6 +1206,11 @@ static inline bool blk_req_can_dispatch_to_zone(struct request *rq)
return !blk_req_zone_is_write_locked(rq);
}
#else /* CONFIG_BLK_DEV_ZONED */
+static inline bool blk_rq_is_seq_zoned_write(struct request *rq)
+{
+ return false;
+}
+
static inline bool blk_req_needs_zone_write_lock(struct request *rq)
{
return false;