summaryrefslogtreecommitdiff
path: root/fs/block_dev.c
diff options
context:
space:
mode:
authorJens Axboe <axboe@kernel.dk>2018-11-26 18:24:43 +0300
committerJens Axboe <axboe@kernel.dk>2018-11-26 18:25:53 +0300
commit0a1b8b87d064a47fad9ec475316002da28559207 (patch)
tree9bc87a52b3fcc1f476d52ae94d6bb7e69e2bfd94 /fs/block_dev.c
parente7d943910719b44738e86f91a26a64e3b61ae419 (diff)
downloadlinux-0a1b8b87d064a47fad9ec475316002da28559207.tar.xz
block: make blk_poll() take a parameter on whether to spin or not
blk_poll() has always kept spinning until it found an IO. This is fine for SYNC polling, since we need to find one request we have pending, but in preparation for ASYNC polling it can be beneficial to just check if we have any entries available or not. Existing callers are converted to pass in 'spin == true', to retain the old behavior. Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'fs/block_dev.c')
-rw-r--r--fs/block_dev.c4
1 files changed, 2 insertions, 2 deletions
diff --git a/fs/block_dev.c b/fs/block_dev.c
index 64ba27b8b754..d233a59ea364 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -243,7 +243,7 @@ __blkdev_direct_IO_simple(struct kiocb *iocb, struct iov_iter *iter,
break;
if (!(iocb->ki_flags & IOCB_HIPRI) ||
- !blk_poll(bdev_get_queue(bdev), qc))
+ !blk_poll(bdev_get_queue(bdev), qc, true))
io_schedule();
}
__set_current_state(TASK_RUNNING);
@@ -423,7 +423,7 @@ __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages)
break;
if (!(iocb->ki_flags & IOCB_HIPRI) ||
- !blk_poll(bdev_get_queue(bdev), qc))
+ !blk_poll(bdev_get_queue(bdev), qc, true))
io_schedule();
}
__set_current_state(TASK_RUNNING);