summaryrefslogtreecommitdiff
path: root/lib/irq_poll.c
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2015-12-07 17:41:11 +0300
committerChristoph Hellwig <hch@lst.de>2015-12-11 22:52:26 +0300
commitea51190c03150fce4d9e428bfb608abbe0991db8 (patch)
tree80c61fdc0817b5f1c5667155cf7256f688b71c63 /lib/irq_poll.c
parent78d0264eb7a938f1eaf59fcb2d3f7da2567369d3 (diff)
downloadlinux-ea51190c03150fce4d9e428bfb608abbe0991db8.tar.xz
irq_poll: fold irq_poll_sched_prep into irq_poll_sched
There is no good reason to keep them apart, and this makes using the API a bit simpler. Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Bart Van Assche <bart.vanassche@sandisk.com>
Diffstat (limited to 'lib/irq_poll.c')
-rw-r--r--lib/irq_poll.c10
1 files changed, 7 insertions, 3 deletions
diff --git a/lib/irq_poll.c b/lib/irq_poll.c
index 88af87971e8c..43a3370a09fd 100644
--- a/lib/irq_poll.c
+++ b/lib/irq_poll.c
@@ -21,13 +21,17 @@ static DEFINE_PER_CPU(struct list_head, blk_cpu_iopoll);
*
* Description:
* Add this irq_poll structure to the pending poll list and trigger the
- * raise of the blk iopoll softirq. The driver must already have gotten a
- * successful return from irq_poll_sched_prep() before calling this.
+ * raise of the blk iopoll softirq.
**/
void irq_poll_sched(struct irq_poll *iop)
{
unsigned long flags;
+ if (test_bit(IRQ_POLL_F_DISABLE, &iop->state))
+ return;
+ if (!test_and_set_bit(IRQ_POLL_F_SCHED, &iop->state))
+ return;
+
local_irq_save(flags);
list_add_tail(&iop->list, this_cpu_ptr(&blk_cpu_iopoll));
__raise_softirq_irqoff(IRQ_POLL_SOFTIRQ);
@@ -58,7 +62,7 @@ EXPORT_SYMBOL(__irq_poll_complete);
* Description:
* If a driver consumes less than the assigned budget in its run of the
* iopoll handler, it'll end the polled mode by calling this function. The
- * iopoll handler will not be invoked again before irq_poll_sched_prep()
+ * iopoll handler will not be invoked again before irq_poll_sched()
* is called.
**/
void irq_poll_complete(struct irq_poll *iop)