summaryrefslogtreecommitdiff
path: root/block
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2021-10-12 14:12:19 +0300
committerJens Axboe <axboe@kernel.dk>2021-10-18 15:17:36 +0300
commitef99b2d37666b7a600baab9e1c4944436652b0a2 (patch)
tree81457d0c9620f8c7311bf76d532670fa5d53074f /block
parent28a1ae6b9daba6ac65700eeb38479bd6fadec089 (diff)
downloadlinux-ef99b2d37666b7a600baab9e1c4944436652b0a2.tar.xz
block: replace the spin argument to blk_iopoll with a flags argument
Switch the boolean spin argument to blk_poll to passing a set of flags instead. This will allow to control polling behavior in a more fine grained way. Signed-off-by: Christoph Hellwig <hch@lst.de> Tested-by: Mark Wunderlich <mark.wunderlich@intel.com> Link: https://lore.kernel.org/r/20211012111226.760968-10-hch@lst.de [axboe: adapt to changed io_uring iopoll] Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block')
-rw-r--r--block/blk-exec.c2
-rw-r--r--block/blk-mq.c17
-rw-r--r--block/fops.c8
3 files changed, 12 insertions, 15 deletions
diff --git a/block/blk-exec.c b/block/blk-exec.c
index d6cd501c0d34..1fa7f25e5726 100644
--- a/block/blk-exec.c
+++ b/block/blk-exec.c
@@ -71,7 +71,7 @@ static bool blk_rq_is_poll(struct request *rq)
static void blk_rq_poll_completion(struct request *rq, struct completion *wait)
{
do {
- blk_poll(rq->q, request_to_qc_t(rq->mq_hctx, rq), true);
+ blk_poll(rq->q, request_to_qc_t(rq->mq_hctx, rq), 0);
cond_resched();
} while (!completion_done(wait));
}
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 7d0d947921a6..6609e10657a8 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -4052,7 +4052,7 @@ static bool blk_mq_poll_hybrid(struct request_queue *q, blk_qc_t qc)
}
static int blk_mq_poll_classic(struct request_queue *q, blk_qc_t cookie,
- bool spin)
+ unsigned int flags)
{
struct blk_mq_hw_ctx *hctx = blk_qc_to_hctx(q, cookie);
long state = get_current_state();
@@ -4075,7 +4075,7 @@ static int blk_mq_poll_classic(struct request_queue *q, blk_qc_t cookie,
if (task_is_running(current))
return 1;
- if (ret < 0 || !spin)
+ if (ret < 0 || (flags & BLK_POLL_ONESHOT))
break;
cpu_relax();
} while (!need_resched());
@@ -4088,15 +4088,13 @@ static int blk_mq_poll_classic(struct request_queue *q, blk_qc_t cookie,
* blk_poll - poll for IO completions
* @q: the queue
* @cookie: cookie passed back at IO submission time
- * @spin: whether to spin for completions
+ * @flags: BLK_POLL_* flags that control the behavior
*
* Description:
* Poll for completions on the passed in queue. Returns number of
- * completed entries found. If @spin is true, then blk_poll will continue
- * looping until at least one completion is found, unless the task is
- * otherwise marked running (or we need to reschedule).
+ * completed entries found.
*/
-int blk_poll(struct request_queue *q, blk_qc_t cookie, bool spin)
+int blk_poll(struct request_queue *q, blk_qc_t cookie, unsigned int flags)
{
if (cookie == BLK_QC_T_NONE ||
!test_bit(QUEUE_FLAG_POLL, &q->queue_flags))
@@ -4105,12 +4103,11 @@ int blk_poll(struct request_queue *q, blk_qc_t cookie, bool spin)
if (current->plug)
blk_flush_plug_list(current->plug, false);
- /* If specified not to spin, we also should not sleep. */
- if (spin && q->poll_nsec != BLK_MQ_POLL_CLASSIC) {
+ if (q->poll_nsec != BLK_MQ_POLL_CLASSIC) {
if (blk_mq_poll_hybrid(q, cookie))
return 1;
}
- return blk_mq_poll_classic(q, cookie, spin);
+ return blk_mq_poll_classic(q, cookie, flags);
}
EXPORT_SYMBOL_GPL(blk_poll);
diff --git a/block/fops.c b/block/fops.c
index 15324f2e5a91..db8f2fe68dd2 100644
--- a/block/fops.c
+++ b/block/fops.c
@@ -108,7 +108,7 @@ static ssize_t __blkdev_direct_IO_simple(struct kiocb *iocb,
if (!READ_ONCE(bio.bi_private))
break;
if (!(iocb->ki_flags & IOCB_HIPRI) ||
- !blk_poll(bdev_get_queue(bdev), qc, true))
+ !blk_poll(bdev_get_queue(bdev), qc, 0))
blk_io_schedule();
}
__set_current_state(TASK_RUNNING);
@@ -141,12 +141,12 @@ struct blkdev_dio {
static struct bio_set blkdev_dio_pool;
-static int blkdev_iopoll(struct kiocb *kiocb, bool wait)
+static int blkdev_iopoll(struct kiocb *kiocb, unsigned int flags)
{
struct block_device *bdev = I_BDEV(kiocb->ki_filp->f_mapping->host);
struct request_queue *q = bdev_get_queue(bdev);
- return blk_poll(q, READ_ONCE(kiocb->ki_cookie), wait);
+ return blk_poll(q, READ_ONCE(kiocb->ki_cookie), flags);
}
static void blkdev_bio_end_io(struct bio *bio)
@@ -297,7 +297,7 @@ static ssize_t __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
if (!READ_ONCE(dio->waiter))
break;
- if (!do_poll || !blk_poll(bdev_get_queue(bdev), qc, true))
+ if (!do_poll || !blk_poll(bdev_get_queue(bdev), qc, 0))
blk_io_schedule();
}
__set_current_state(TASK_RUNNING);