summaryrefslogtreecommitdiff
path: root/block/blk-mq.c
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2018-12-02 19:46:26 +0300
committerJens Axboe <axboe@kernel.dk>2018-12-04 21:38:19 +0300
commit529262d56dbebe6a26df5d2fd24cc0e1bc8579e5 (patch)
tree6040d2d4f2faa36d324559ea7fd2916b73130e1c /block/blk-mq.c
parent9d6610b76fa374eae3deb93bcbace4a06c2e3b95 (diff)
downloadlinux-529262d56dbebe6a26df5d2fd24cc0e1bc8579e5.tar.xz
block: remove ->poll_fn
This was intended to support users like nvme multipath, but is just getting in the way and adding another indirect call. Reviewed-by: Sagi Grimberg <sagi@grimberg.me> Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block/blk-mq.c')
-rw-r--r--block/blk-mq.c24
1 files changed, 19 insertions, 5 deletions
diff --git a/block/blk-mq.c b/block/blk-mq.c
index e09d7f500077..50d529602e05 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -38,7 +38,6 @@
#include "blk-mq-sched.h"
#include "blk-rq-qos.h"
-static int blk_mq_poll(struct request_queue *q, blk_qc_t cookie, bool spin);
static void blk_mq_poll_stats_start(struct request_queue *q);
static void blk_mq_poll_stats_fn(struct blk_stat_callback *cb);
@@ -2838,8 +2837,6 @@ struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
spin_lock_init(&q->requeue_lock);
blk_queue_make_request(q, blk_mq_make_request);
- if (q->mq_ops->poll)
- q->poll_fn = blk_mq_poll;
/*
* Do this after blk_queue_make_request() overrides it...
@@ -3400,14 +3397,30 @@ static bool blk_mq_poll_hybrid(struct request_queue *q,
return blk_mq_poll_hybrid_sleep(q, hctx, rq);
}
-static int blk_mq_poll(struct request_queue *q, blk_qc_t cookie, bool spin)
+/**
+ * blk_poll - poll for IO completions
+ * @q: the queue
+ * @cookie: cookie passed back at IO submission time
+ * @spin: whether to spin for completions
+ *
+ * Description:
+ * Poll for completions on the passed in queue. Returns number of
+ * completed entries found. If @spin is true, then blk_poll will continue
+ * looping until at least one completion is found, unless the task is
+ * otherwise marked running (or we need to reschedule).
+ */
+int blk_poll(struct request_queue *q, blk_qc_t cookie, bool spin)
{
struct blk_mq_hw_ctx *hctx;
long state;
- if (!test_bit(QUEUE_FLAG_POLL, &q->queue_flags))
+ if (!blk_qc_t_valid(cookie) ||
+ !test_bit(QUEUE_FLAG_POLL, &q->queue_flags))
return 0;
+ if (current->plug)
+ blk_flush_plug_list(current->plug, false);
+
hctx = q->queue_hw_ctx[blk_qc_t_to_queue_num(cookie)];
/*
@@ -3448,6 +3461,7 @@ static int blk_mq_poll(struct request_queue *q, blk_qc_t cookie, bool spin)
__set_current_state(TASK_RUNNING);
return 0;
}
+EXPORT_SYMBOL_GPL(blk_poll);
unsigned int blk_mq_rq_cpu(struct request *rq)
{