summaryrefslogtreecommitdiff
path: root/block/blk-mq.c
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2023-04-13 09:40:55 +0300
committerJens Axboe <axboe@kernel.dk>2023-04-13 15:52:30 +0300
commit2b5976134bfbc753dec6281da0890c5f194c00c9 (patch)
tree38c83c687aa41a7d202076fb5e40fe8db394e7aa /block/blk-mq.c
parent710fa3789ed94ceee9675f8e189aaf3e7525269a (diff)
downloadlinux-2b5976134bfbc753dec6281da0890c5f194c00c9.tar.xz
blk-mq: pass a flags argument to blk_mq_request_bypass_insert
Replace the boolean at_head argument with the same flags that are already passed to blk_mq_insert_request. Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Bart Van Assche <bvanassche@acm.org> Reviewed-by: Damien Le Moal <dlemoal@kernel.org> Link: https://lore.kernel.org/r/20230413064057.707578-19-hch@lst.de Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block/blk-mq.c')
-rw-r--r--block/blk-mq.c18
1 files changed, 9 insertions, 9 deletions
diff --git a/block/blk-mq.c b/block/blk-mq.c
index ba64c4621e29..ff74559d7da1 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -1447,7 +1447,7 @@ static void blk_mq_requeue_work(struct work_struct *work)
if (rq->rq_flags & RQF_DONTPREP) {
rq->rq_flags &= ~RQF_SOFTBARRIER;
list_del_init(&rq->queuelist);
- blk_mq_request_bypass_insert(rq, false);
+ blk_mq_request_bypass_insert(rq, 0);
} else if (rq->rq_flags & RQF_SOFTBARRIER) {
rq->rq_flags &= ~RQF_SOFTBARRIER;
list_del_init(&rq->queuelist);
@@ -2457,17 +2457,17 @@ static void blk_mq_run_work_fn(struct work_struct *work)
/**
* blk_mq_request_bypass_insert - Insert a request at dispatch list.
* @rq: Pointer to request to be inserted.
- * @at_head: true if the request should be inserted at the head of the list.
+ * @flags: BLK_MQ_INSERT_*
*
* Should only be used carefully, when the caller knows we want to
* bypass a potential IO scheduler on the target device.
*/
-void blk_mq_request_bypass_insert(struct request *rq, bool at_head)
+void blk_mq_request_bypass_insert(struct request *rq, blk_insert_t flags)
{
struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
spin_lock(&hctx->lock);
- if (at_head)
+ if (flags & BLK_MQ_INSERT_AT_HEAD)
list_add(&rq->queuelist, &hctx->dispatch);
else
list_add_tail(&rq->queuelist, &hctx->dispatch);
@@ -2526,7 +2526,7 @@ static void blk_mq_insert_request(struct request *rq, blk_insert_t flags)
* and it is added to the scheduler queue, there is no chance to
* dispatch it given we prioritize requests in hctx->dispatch.
*/
- blk_mq_request_bypass_insert(rq, flags & BLK_MQ_INSERT_AT_HEAD);
+ blk_mq_request_bypass_insert(rq, flags);
} else if (rq->rq_flags & RQF_FLUSH_SEQ) {
/*
* Firstly normal IO request is inserted to scheduler queue or
@@ -2549,7 +2549,7 @@ static void blk_mq_insert_request(struct request *rq, blk_insert_t flags)
* Simply queue flush rq to the front of hctx->dispatch so that
* intensive flush workloads can benefit in case of NCQ HW.
*/
- blk_mq_request_bypass_insert(rq, true);
+ blk_mq_request_bypass_insert(rq, BLK_MQ_INSERT_AT_HEAD);
} else if (q->elevator) {
LIST_HEAD(list);
@@ -2670,7 +2670,7 @@ static void blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
break;
case BLK_STS_RESOURCE:
case BLK_STS_DEV_RESOURCE:
- blk_mq_request_bypass_insert(rq, false);
+ blk_mq_request_bypass_insert(rq, 0);
blk_mq_run_hw_queue(hctx, false);
break;
default:
@@ -2718,7 +2718,7 @@ static void blk_mq_plug_issue_direct(struct blk_plug *plug)
break;
case BLK_STS_RESOURCE:
case BLK_STS_DEV_RESOURCE:
- blk_mq_request_bypass_insert(rq, false);
+ blk_mq_request_bypass_insert(rq, 0);
blk_mq_run_hw_queue(hctx, false);
goto out;
default:
@@ -2837,7 +2837,7 @@ static void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
break;
case BLK_STS_RESOURCE:
case BLK_STS_DEV_RESOURCE:
- blk_mq_request_bypass_insert(rq, false);
+ blk_mq_request_bypass_insert(rq, 0);
if (list_empty(list))
blk_mq_run_hw_queue(hctx, false);
goto out;