summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJens Axboe <jaxboe@fusionio.com>2011-03-10 10:58:35 +0300
committerJens Axboe <jaxboe@fusionio.com>2011-03-10 10:58:35 +0300
commit4c63f5646e405b5010cc9499419060bf2e838f5b (patch)
treedf91ba315032c8ec4aafeb3ab96fdfa7c6c656e1
parentcafb0bfca1a73efd6d8a4a6a6a716e6134b96c24 (diff)
parent69d60eb96ae8a73cf9b79cf28051caf973006011 (diff)
downloadlinux-4c63f5646e405b5010cc9499419060bf2e838f5b.tar.xz
Merge branch 'for-2.6.39/stack-plug' into for-2.6.39/core
Conflicts: block/blk-core.c block/blk-flush.c drivers/md/raid1.c drivers/md/raid10.c drivers/md/raid5.c fs/nilfs2/btnode.c fs/nilfs2/mdt.c Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
-rw-r--r--Documentation/block/biodoc.txt5
-rw-r--r--block/blk-core.c540
-rw-r--r--block/blk-exec.c4
-rw-r--r--block/blk-flush.c6
-rw-r--r--block/blk-settings.c8
-rw-r--r--block/blk-throttle.c4
-rw-r--r--block/blk.h2
-rw-r--r--block/cfq-iosched.c8
-rw-r--r--block/deadline-iosched.c9
-rw-r--r--block/elevator.c49
-rw-r--r--block/noop-iosched.c8
-rw-r--r--drivers/block/cciss.c6
-rw-r--r--drivers/block/cpqarray.c3
-rw-r--r--drivers/block/drbd/drbd_actlog.c4
-rw-r--r--drivers/block/drbd/drbd_bitmap.c1
-rw-r--r--drivers/block/drbd/drbd_int.h16
-rw-r--r--drivers/block/drbd/drbd_main.c36
-rw-r--r--drivers/block/drbd/drbd_receiver.c29
-rw-r--r--drivers/block/drbd/drbd_req.c4
-rw-r--r--drivers/block/drbd/drbd_worker.c1
-rw-r--r--drivers/block/drbd/drbd_wrappers.h18
-rw-r--r--drivers/block/floppy.c1
-rw-r--r--drivers/block/loop.c13
-rw-r--r--drivers/block/pktcdvd.c2
-rw-r--r--drivers/block/umem.c16
-rw-r--r--drivers/ide/ide-atapi.c3
-rw-r--r--drivers/ide/ide-cd.c13
-rw-r--r--drivers/ide/ide-io.c4
-rw-r--r--drivers/ide/ide-park.c2
-rw-r--r--drivers/md/bitmap.c5
-rw-r--r--drivers/md/dm-crypt.c9
-rw-r--r--drivers/md/dm-io.c2
-rw-r--r--drivers/md/dm-kcopyd.c55
-rw-r--r--drivers/md/dm-raid.c2
-rw-r--r--drivers/md/dm-raid1.c2
-rw-r--r--drivers/md/dm-table.c24
-rw-r--r--drivers/md/dm.c33
-rw-r--r--drivers/md/linear.c17
-rw-r--r--drivers/md/md.c12
-rw-r--r--drivers/md/multipath.c31
-rw-r--r--drivers/md/raid0.c16
-rw-r--r--drivers/md/raid1.c86
-rw-r--r--drivers/md/raid10.c90
-rw-r--r--drivers/md/raid5.c63
-rw-r--r--drivers/md/raid5.h2
-rw-r--r--drivers/message/i2o/i2o_block.c6
-rw-r--r--drivers/mmc/card/queue.c3
-rw-r--r--drivers/s390/block/dasd.c2
-rw-r--r--drivers/s390/char/tape_block.c1
-rw-r--r--drivers/scsi/scsi_lib.c44
-rw-r--r--drivers/scsi/scsi_transport_fc.c2
-rw-r--r--drivers/scsi/scsi_transport_sas.c6
-rw-r--r--drivers/target/target_core_iblock.c7
-rw-r--r--fs/adfs/inode.c1
-rw-r--r--fs/affs/file.c2
-rw-r--r--fs/aio.c77
-rw-r--r--fs/befs/linuxvfs.c1
-rw-r--r--fs/bfs/file.c1
-rw-r--r--fs/block_dev.c1
-rw-r--r--fs/btrfs/disk-io.c79
-rw-r--r--fs/btrfs/extent_io.c2
-rw-r--r--fs/btrfs/inode.c1
-rw-r--r--fs/btrfs/volumes.c91
-rw-r--r--fs/buffer.c45
-rw-r--r--fs/cifs/file.c30
-rw-r--r--fs/direct-io.c7
-rw-r--r--fs/efs/inode.c1
-rw-r--r--fs/exofs/inode.c1
-rw-r--r--fs/ext2/inode.c2
-rw-r--r--fs/ext3/inode.c3
-rw-r--r--fs/ext4/inode.c4
-rw-r--r--fs/ext4/page-io.c3
-rw-r--r--fs/fat/inode.c1
-rw-r--r--fs/freevxfs/vxfs_subr.c1
-rw-r--r--fs/fuse/inode.c1
-rw-r--r--fs/gfs2/aops.c3
-rw-r--r--fs/gfs2/log.c4
-rw-r--r--fs/gfs2/lops.c12
-rw-r--r--fs/gfs2/meta_io.c3
-rw-r--r--fs/hfs/inode.c2
-rw-r--r--fs/hfsplus/inode.c2
-rw-r--r--fs/hpfs/file.c1
-rw-r--r--fs/isofs/inode.c1
-rw-r--r--fs/jbd/commit.c2
-rw-r--r--fs/jbd2/commit.c6
-rw-r--r--fs/jfs/inode.c1
-rw-r--r--fs/jfs/jfs_metapage.c1
-rw-r--r--fs/logfs/dev_bdev.c2
-rw-r--r--fs/minix/inode.c1
-rw-r--r--fs/mpage.c8
-rw-r--r--fs/nilfs2/btnode.c7
-rw-r--r--fs/nilfs2/gcinode.c1
-rw-r--r--fs/nilfs2/inode.c1
-rw-r--r--fs/nilfs2/mdt.c9
-rw-r--r--fs/nilfs2/page.c5
-rw-r--r--fs/nilfs2/page.h3
-rw-r--r--fs/nilfs2/segbuf.c2
-rw-r--r--fs/ntfs/aops.c4
-rw-r--r--fs/ntfs/compress.c3
-rw-r--r--fs/ocfs2/aops.c1
-rw-r--r--fs/ocfs2/cluster/heartbeat.c4
-rw-r--r--fs/omfs/file.c1
-rw-r--r--fs/qnx4/inode.c1
-rw-r--r--fs/reiserfs/inode.c1
-rw-r--r--fs/sysv/itree.c1
-rw-r--r--fs/ubifs/super.c1
-rw-r--r--fs/udf/file.c1
-rw-r--r--fs/udf/inode.c1
-rw-r--r--fs/ufs/inode.c1
-rw-r--r--fs/ufs/truncate.c2
-rw-r--r--fs/xfs/linux-2.6/xfs_aops.c4
-rw-r--r--fs/xfs/linux-2.6/xfs_buf.c13
-rw-r--r--include/linux/backing-dev.h16
-rw-r--r--include/linux/blk_types.h4
-rw-r--r--include/linux/blkdev.h73
-rw-r--r--include/linux/buffer_head.h1
-rw-r--r--include/linux/device-mapper.h5
-rw-r--r--include/linux/elevator.h8
-rw-r--r--include/linux/fs.h29
-rw-r--r--include/linux/pagemap.h12
-rw-r--r--include/linux/sched.h6
-rw-r--r--include/linux/swap.h2
-rw-r--r--kernel/exit.c1
-rw-r--r--kernel/fork.c3
-rw-r--r--kernel/power/block_io.c2
-rw-r--r--kernel/sched.c12
-rw-r--r--mm/backing-dev.c6
-rw-r--r--mm/filemap.c74
-rw-r--r--mm/memory-failure.c8
-rw-r--r--mm/nommu.c4
-rw-r--r--mm/page-writeback.c2
-rw-r--r--mm/page_io.c2
-rw-r--r--mm/readahead.c18
-rw-r--r--mm/shmem.c1
-rw-r--r--mm/swap_state.c5
-rw-r--r--mm/swapfile.c37
-rw-r--r--mm/vmscan.c2
137 files changed, 606 insertions, 1533 deletions
diff --git a/Documentation/block/biodoc.txt b/Documentation/block/biodoc.txt
index b9a83dd24732..2a7b38c832c7 100644
--- a/Documentation/block/biodoc.txt
+++ b/Documentation/block/biodoc.txt
@@ -963,11 +963,6 @@ elevator_dispatch_fn* fills the dispatch queue with ready requests.
elevator_add_req_fn* called to add a new request into the scheduler
-elevator_queue_empty_fn returns true if the merge queue is empty.
- Drivers shouldn't use this, but rather check
- if elv_next_request is NULL (without losing the
- request if one exists!)
-
elevator_former_req_fn
elevator_latter_req_fn These return the request before or after the
one specified in disk sort order. Used by the
diff --git a/block/blk-core.c b/block/blk-core.c
index 74d496ccf4d7..e1fcf7a24668 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -27,6 +27,7 @@
#include <linux/writeback.h>
#include <linux/task_io_accounting_ops.h>
#include <linux/fault-inject.h>
+#include <linux/list_sort.h>
#define CREATE_TRACE_POINTS
#include <trace/events/block.h>
@@ -198,135 +199,43 @@ void blk_dump_rq_flags(struct request *rq, char *msg)
EXPORT_SYMBOL(blk_dump_rq_flags);
/*
- * "plug" the device if there are no outstanding requests: this will
- * force the transfer to start only after we have put all the requests
- * on the list.
- *
- * This is called with interrupts off and no requests on the queue and
- * with the queue lock held.
- */
-void blk_plug_device(struct request_queue *q)
+ * Make sure that plugs that were pending when this function was entered,
+ * are now complete and requests pushed to the queue.
+*/
+static inline void queue_sync_plugs(struct request_queue *q)
{
- WARN_ON(!irqs_disabled());
-
/*
- * don't plug a stopped queue, it must be paired with blk_start_queue()
- * which will restart the queueing
+ * If the current process is plugged and has barriers submitted,
+ * we will livelock if we don't unplug first.
*/
- if (blk_queue_stopped(q))
- return;
-
- if (!queue_flag_test_and_set(QUEUE_FLAG_PLUGGED, q)) {
- mod_timer(&q->unplug_timer, jiffies + q->unplug_delay);
- trace_block_plug(q);
- }
-}
-EXPORT_SYMBOL(blk_plug_device);
-
-/**
- * blk_plug_device_unlocked - plug a device without queue lock held
- * @q: The &struct request_queue to plug
- *
- * Description:
- * Like @blk_plug_device(), but grabs the queue lock and disables
- * interrupts.
- **/
-void blk_plug_device_unlocked(struct request_queue *q)
-{
- unsigned long flags;
-
- spin_lock_irqsave(q->queue_lock, flags);
- blk_plug_device(q);
- spin_unlock_irqrestore(q->queue_lock, flags);
-}
-EXPORT_SYMBOL(blk_plug_device_unlocked);
-
-/*
- * remove the queue from the plugged list, if present. called with
- * queue lock held and interrupts disabled.
- */
-int blk_remove_plug(struct request_queue *q)
-{
- WARN_ON(!irqs_disabled());
-
- if (!queue_flag_test_and_clear(QUEUE_FLAG_PLUGGED, q))
- return 0;
-
- del_timer(&q->unplug_timer);
- return 1;
+ blk_flush_plug(current);
}
-EXPORT_SYMBOL(blk_remove_plug);
-/*
- * remove the plug and let it rip..
- */
-void __generic_unplug_device(struct request_queue *q)
+static void blk_delay_work(struct work_struct *work)
{
- if (unlikely(blk_queue_stopped(q)))
- return;
- if (!blk_remove_plug(q) && !blk_queue_nonrot(q))
- return;
+ struct request_queue *q;
- q->request_fn(q);
+ q = container_of(work, struct request_queue, delay_work.work);
+ spin_lock_irq(q->queue_lock);
+ __blk_run_queue(q, false);
+ spin_unlock_irq(q->queue_lock);
}
/**
- * generic_unplug_device - fire a request queue
- * @q: The &struct request_queue in question
+ * blk_delay_queue - restart queueing after defined interval
+ * @q: The &struct request_queue in question
+ * @msecs: Delay in msecs
*
* Description:
- * Linux uses plugging to build bigger requests queues before letting
- * the device have at them. If a queue is plugged, the I/O scheduler
- * is still adding and merging requests on the queue. Once the queue
- * gets unplugged, the request_fn defined for the queue is invoked and
- * transfers started.
- **/
-void generic_unplug_device(struct request_queue *q)
-{
- if (blk_queue_plugged(q)) {
- spin_lock_irq(q->queue_lock);
- __generic_unplug_device(q);
- spin_unlock_irq(q->queue_lock);
- }
-}
-EXPORT_SYMBOL(generic_unplug_device);
-
-static void blk_backing_dev_unplug(struct backing_dev_info *bdi,
- struct page *page)
-{
- struct request_queue *q = bdi->unplug_io_data;
-
- blk_unplug(q);
-}
-
-void blk_unplug_work(struct work_struct *work)
-{
- struct request_queue *q =
- container_of(work, struct request_queue, unplug_work);
-
- trace_block_unplug_io(q);
- q->unplug_fn(q);
-}
-
-void blk_unplug_timeout(unsigned long data)
-{
- struct request_queue *q = (struct request_queue *)data;
-
- trace_block_unplug_timer(q);
- kblockd_schedule_work(q, &q->unplug_work);
-}
-
-void blk_unplug(struct request_queue *q)
+ * Sometimes queueing needs to be postponed for a little while, to allow
+ * resources to come back. This function will make sure that queueing is
+ * restarted around the specified time.
+ */
+void blk_delay_queue(struct request_queue *q, unsigned long msecs)
{
- /*
- * devices don't necessarily have an ->unplug_fn defined
- */
- if (q->unplug_fn) {
- trace_block_unplug_io(q);
- q->unplug_fn(q);
- }
+ schedule_delayed_work(&q->delay_work, msecs_to_jiffies(msecs));
}
-EXPORT_SYMBOL(blk_unplug);
+EXPORT_SYMBOL(blk_delay_queue);
/**
* blk_start_queue - restart a previously stopped queue
@@ -362,7 +271,7 @@ EXPORT_SYMBOL(blk_start_queue);
**/
void blk_stop_queue(struct request_queue *q)
{
- blk_remove_plug(q);
+ cancel_delayed_work(&q->delay_work);
queue_flag_set(QUEUE_FLAG_STOPPED, q);
}
EXPORT_SYMBOL(blk_stop_queue);
@@ -387,9 +296,9 @@ EXPORT_SYMBOL(blk_stop_queue);
*/
void blk_sync_queue(struct request_queue *q)
{
- del_timer_sync(&q->unplug_timer);
del_timer_sync(&q->timeout);
- cancel_work_sync(&q->unplug_work);
+ cancel_delayed_work_sync(&q->delay_work);
+ queue_sync_plugs(q);
}
EXPORT_SYMBOL(blk_sync_queue);
@@ -405,14 +314,9 @@ EXPORT_SYMBOL(blk_sync_queue);
*/
void __blk_run_queue(struct request_queue *q, bool force_kblockd)
{
- blk_remove_plug(q);
-
if (unlikely(blk_queue_stopped(q)))
return;
- if (elv_queue_empty(q))
- return;
-
/*
* Only recurse once to avoid overrunning the stack, let the unplug
* handling reinvoke the handler shortly if we already got there.
@@ -420,10 +324,8 @@ void __blk_run_queue(struct request_queue *q, bool force_kblockd)
if (!force_kblockd && !queue_flag_test_and_set(QUEUE_FLAG_REENTER, q)) {
q->request_fn(q);
queue_flag_clear(QUEUE_FLAG_REENTER, q);
- } else {
- queue_flag_set(QUEUE_FLAG_PLUGGED, q);
- kblockd_schedule_work(q, &q->unplug_work);
- }
+ } else
+ queue_delayed_work(kblockd_workqueue, &q->delay_work, 0);
}
EXPORT_SYMBOL(__blk_run_queue);
@@ -517,8 +419,6 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
if (!q)
return NULL;
- q->backing_dev_info.unplug_io_fn = blk_backing_dev_unplug;
- q->backing_dev_info.unplug_io_data = q;
q->backing_dev_info.ra_pages =
(VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE;
q->backing_dev_info.state = 0;
@@ -538,13 +438,12 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
setup_timer(&q->backing_dev_info.laptop_mode_wb_timer,
laptop_mode_timer_fn, (unsigned long) q);
- init_timer(&q->unplug_timer);
setup_timer(&q->timeout, blk_rq_timed_out_timer, (unsigned long) q);
INIT_LIST_HEAD(&q->timeout_list);
INIT_LIST_HEAD(&q->flush_queue[0]);
INIT_LIST_HEAD(&q->flush_queue[1]);
INIT_LIST_HEAD(&q->flush_data_in_flight);
- INIT_WORK(&q->unplug_work, blk_unplug_work);
+ INIT_DELAYED_WORK(&q->delay_work, blk_delay_work);
kobject_init(&q->kobj, &blk_queue_ktype);
@@ -639,7 +538,6 @@ blk_init_allocated_queue_node(struct request_queue *q, request_fn_proc *rfn,
q->request_fn = rfn;
q->prep_rq_fn = NULL;
q->unprep_rq_fn = NULL;
- q->unplug_fn = generic_unplug_device;
q->queue_flags = QUEUE_FLAG_DEFAULT;
/* Override internal queue lock with supplied lock pointer */
@@ -677,6 +575,8 @@ int blk_get_queue(struct request_queue *q)
static inline void blk_free_request(struct request_queue *q, struct request *rq)
{
+ BUG_ON(rq->cmd_flags & REQ_ON_PLUG);
+
if (rq->cmd_flags & REQ_ELVPRIV)
elv_put_request(q, rq);
mempool_free(rq, q->rq.rq_pool);
@@ -898,8 +798,8 @@ out:
}
/*
- * No available requests for this queue, unplug the device and wait for some
- * requests to become available.
+ * No available requests for this queue, wait for some requests to become
+ * available.
*
* Called with q->queue_lock held, and returns with it unlocked.
*/
@@ -920,7 +820,6 @@ static struct request *get_request_wait(struct request_queue *q, int rw_flags,
trace_block_sleeprq(q, bio, rw_flags & 1);
- __generic_unplug_device(q);
spin_unlock_irq(q->queue_lock);
io_schedule();
@@ -1042,6 +941,13 @@ void blk_requeue_request(struct request_queue *q, struct request *rq)
}
EXPORT_SYMBOL(blk_requeue_request);
+static void add_acct_request(struct request_queue *q, struct request *rq,
+ int where)
+{
+ drive_stat_acct(rq, 1);
+ __elv_add_request(q, rq, where);
+}
+
/**
* blk_insert_request - insert a special request into a request queue
* @q: request queue where request should be inserted
@@ -1084,8 +990,7 @@ void blk_insert_request(struct request_queue *q, struct request *rq,
if (blk_rq_tagged(rq))
blk_queue_end_tag(q, rq);
- drive_stat_acct(rq, 1);
- __elv_add_request(q, rq, where, 0);
+ add_acct_request(q, rq, where);
__blk_run_queue(q, false);
spin_unlock_irqrestore(q->queue_lock, flags);
}
@@ -1206,6 +1111,113 @@ void blk_add_request_payload(struct request *rq, struct page *page,
}
EXPORT_SYMBOL_GPL(blk_add_request_payload);
+static bool bio_attempt_back_merge(struct request_queue *q, struct request *req,
+ struct bio *bio)
+{
+ const int ff = bio->bi_rw & REQ_FAILFAST_MASK;
+
+ /*
+ * Debug stuff, kill later
+ */
+ if (!rq_mergeable(req)) {
+ blk_dump_rq_flags(req, "back");
+ return false;
+ }
+
+ if (!ll_back_merge_fn(q, req, bio))
+ return false;
+
+ trace_block_bio_backmerge(q, bio);
+
+ if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff)
+ blk_rq_set_mixed_merge(req);
+
+ req->biotail->bi_next = bio;
+ req->biotail = bio;
+ req->__data_len += bio->bi_size;
+ req->ioprio = ioprio_best(req->ioprio, bio_prio(bio));
+
+ drive_stat_acct(req, 0);
+ return true;
+}
+
+static bool bio_attempt_front_merge(struct request_queue *q,
+ struct request *req, struct bio *bio)
+{
+ const int ff = bio->bi_rw & REQ_FAILFAST_MASK;
+ sector_t sector;
+
+ /*
+ * Debug stuff, kill later
+ */
+ if (!rq_mergeable(req)) {
+ blk_dump_rq_flags(req, "front");
+ return false;
+ }
+
+ if (!ll_front_merge_fn(q, req, bio))
+ return false;
+
+ trace_block_bio_frontmerge(q, bio);
+
+ if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff)
+ blk_rq_set_mixed_merge(req);
+
+ sector = bio->bi_sector;
+
+ bio->bi_next = req->bio;
+ req->bio = bio;
+
+ /*
+ * may not be valid. if the low level driver said
+ * it didn't need a bounce buffer then it better
+ * not touch req->buffer either...
+ */
+ req->buffer = bio_data(bio);
+ req->__sector = bio->bi_sector;
+ req->__data_len += bio->bi_size;
+ req->ioprio = ioprio_best(req->ioprio, bio_prio(bio));
+
+ drive_stat_acct(req, 0);
+ return true;
+}
+
+/*
+ * Attempts to merge with the plugged list in the current process. Returns
+ * true if merge was succesful, otherwise false.
+ */
+static bool attempt_plug_merge(struct task_struct *tsk, struct request_queue *q,
+ struct bio *bio)
+{
+ struct blk_plug *plug;
+ struct request *rq;
+ bool ret = false;
+
+ plug = tsk->plug;
+ if (!plug)
+ goto out;
+
+ list_for_each_entry_reverse(rq, &plug->list, queuelist) {
+ int el_ret;
+
+ if (rq->q != q)
+ continue;
+
+ el_ret = elv_try_merge(rq, bio);
+ if (el_ret == ELEVATOR_BACK_MERGE) {
+ ret = bio_attempt_back_merge(q, rq, bio);
+ if (ret)
+ break;
+ } else if (el_ret == ELEVATOR_FRONT_MERGE) {
+ ret = bio_attempt_front_merge(q, rq, bio);
+ if (ret)
+ break;
+ }
+ }
+out:
+ return ret;
+}
+
void init_request_from_bio(struct request *req, struct bio *bio)
{
req->cpu = bio->bi_comp_cpu;
@@ -1221,26 +1233,12 @@ void init_request_from_bio(struct request *req, struct bio *bio)
blk_rq_bio_prep(req->q, req, bio);
}
-/*
- * Only disabling plugging for non-rotational devices if it does tagging
- * as well, otherwise we do need the proper merging
- */
-static inline bool queue_should_plug(struct request_queue *q)
-{
- return !(blk_queue_nonrot(q) && blk_queue_tagged(q));
-}
-
static int __make_request(struct request_queue *q, struct bio *bio)
{
- struct request *req;
- int el_ret;
- unsigned int bytes = bio->bi_size;
- const unsigned short prio = bio_prio(bio);
const bool sync = !!(bio->bi_rw & REQ_SYNC);
- const bool unplug = !!(bio->bi_rw & REQ_UNPLUG);
- const unsigned long ff = bio->bi_rw & REQ_FAILFAST_MASK;
- int where = ELEVATOR_INSERT_SORT;
- int rw_flags;
+ struct blk_plug *plug;
+ int el_ret, rw_flags, where = ELEVATOR_INSERT_SORT;
+ struct request *req;
/*
* low level driver can indicate that it wants pages above a
@@ -1249,78 +1247,36 @@ static int __make_request(struct request_queue *q, struct bio *bio)
*/
blk_queue_bounce(q, &bio);
- spin_lock_irq(q->queue_lock);
-
if (bio->bi_rw & (REQ_FLUSH | REQ_FUA)) {
+ spin_lock_irq(q->queue_lock);
where = ELEVATOR_INSERT_FLUSH;
goto get_rq;
}
- if (elv_queue_empty(q))
- goto get_rq;
-
- el_ret = elv_merge(q, &req, bio);
- switch (el_ret) {
- case ELEVATOR_BACK_MERGE:
- BUG_ON(!rq_mergeable(req));
-
- if (!ll_back_merge_fn(q, req, bio))
- break;
-
- trace_block_bio_backmerge(q, bio);
-
- if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff)
- blk_rq_set_mixed_merge(req);
-
- req->biotail->bi_next = bio;
- req->biotail = bio;
- req->__data_len += bytes;
- req->ioprio = ioprio_best(req->ioprio, prio);
- if (!blk_rq_cpu_valid(req))
- req->cpu = bio->bi_comp_cpu;
- drive_stat_acct(req, 0);
- elv_bio_merged(q, req, bio);
- if (!attempt_back_merge(q, req))
- elv_merged_request(q, req, el_ret);
+ /*
+ * Check if we can merge with the plugged list before grabbing
+ * any locks.
+ */
+ if (attempt_plug_merge(current, q, bio))
goto out;
- case ELEVATOR_FRONT_MERGE:
- BUG_ON(!rq_mergeable(req));
-
- if (!ll_front_merge_fn(q, req, bio))
- break;
-
- trace_block_bio_frontmerge(q, bio);
+ spin_lock_irq(q->queue_lock);
- if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff) {
- blk_rq_set_mixed_merge(req);
- req->cmd_flags &= ~REQ_FAILFAST_MASK;
- req->cmd_flags |= ff;
+ el_ret = elv_merge(q, &req, bio);
+ if (el_ret == ELEVATOR_BACK_MERGE) {
+ BUG_ON(req->cmd_flags & REQ_ON_PLUG);
+ if (bio_attempt_back_merge(q, req, bio)) {
+ if (!attempt_back_merge(q, req))
+ elv_merged_request(q, req, el_ret);
+ goto out_unlock;
+ }
+ } else if (el_ret == ELEVATOR_FRONT_MERGE) {
+ BUG_ON(req->cmd_flags & REQ_ON_PLUG);
+ if (bio_attempt_front_merge(q, req, bio)) {
+ if (!attempt_front_merge(q, req))
+ elv_merged_request(q, req, el_ret);
+ goto out_unlock;
}
-
- bio->bi_next = req->bio;
- req->bio = bio;
-
- /*
- * may not be valid. if the low level driver said
- * it didn't need a bounce buffer then it better
- * not touch req->buffer either...
- */
- req->buffer = bio_data(bio);
- req->__sector = bio->bi_sector;
- req->__data_len += bytes;
- req->ioprio = ioprio_best(req->ioprio, prio);
- if (!blk_rq_cpu_valid(req))
- req->cpu = bio->bi_comp_cpu;
- drive_stat_acct(req, 0);
- elv_bio_merged(q, req, bio);
- if (!attempt_front_merge(q, req))
- elv_merged_request(q, req, el_ret);
- goto out;
-
- /* ELV_NO_MERGE: elevator says don't/can't merge. */
- default:
- ;
}
get_rq:
@@ -1347,20 +1303,35 @@ get_rq:
*/
init_request_from_bio(req, bio);
- spin_lock_irq(q->queue_lock);
if (test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags) ||
- bio_flagged(bio, BIO_CPU_AFFINE))
- req->cpu = blk_cpu_to_group(smp_processor_id());
- if (queue_should_plug(q) && elv_queue_empty(q))
- blk_plug_device(q);
-
- /* insert the request into the elevator */
- drive_stat_acct(req, 1);
- __elv_add_request(q, req, where, 0);
+ bio_flagged(bio, BIO_CPU_AFFINE)) {
+ req->cpu = blk_cpu_to_group(get_cpu());
+ put_cpu();
+ }
+
+ plug = current->plug;
+ if (plug) {
+ if (!plug->should_sort && !list_empty(&plug->list)) {
+ struct request *__rq;
+
+ __rq = list_entry_rq(plug->list.prev);
+ if (__rq->q != q)
+ plug->should_sort = 1;
+ }
+ /*
+ * Debug flag, kill later
+ */
+ req->cmd_flags |= REQ_ON_PLUG;
+ list_add_tail(&req->queuelist, &plug->list);
+ drive_stat_acct(req, 1);
+ } else {
+ spin_lock_irq(q->queue_lock);
+ add_acct_request(q, req, where);
+ __blk_run_queue(q, false);
+out_unlock:
+ spin_unlock_irq(q->queue_lock);
+ }
out:
- if (unplug || !queue_should_plug(q))
- __generic_unplug_device(q);
- spin_unlock_irq(q->queue_lock);
return 0;
}
@@ -1763,9 +1734,7 @@ int blk_insert_cloned_request(struct request_queue *q, struct request *rq)
*/
BUG_ON(blk_queued_rq(rq));
- drive_stat_acct(rq, 1);
- __elv_add_request(q, rq, ELEVATOR_INSERT_BACK, 0);
-
+ add_acct_request(q, rq, ELEVATOR_INSERT_BACK);
spin_unlock_irqrestore(q->queue_lock, flags);
return 0;
@@ -2643,6 +2612,113 @@ int kblockd_schedule_work(struct request_queue *q, struct work_struct *work)
}
EXPORT_SYMBOL(kblockd_schedule_work);
+int kblockd_schedule_delayed_work(struct request_queue *q,
+ struct delayed_work *dwork, unsigned long delay)
+{
+ return queue_delayed_work(kblockd_workqueue, dwork, delay);
+}
+EXPORT_SYMBOL(kblockd_schedule_delayed_work);
+
+#define PLUG_MAGIC 0x91827364
+
+void blk_start_plug(struct blk_plug *plug)
+{
+ struct task_struct *tsk = current;
+
+ plug->magic = PLUG_MAGIC;
+ INIT_LIST_HEAD(&plug->list);
+ plug->should_sort = 0;
+
+ /*
+ * If this is a nested plug, don't actually assign it. It will be
+ * flushed on its own.
+ */
+ if (!tsk->plug) {
+ /*
+ * Store ordering should not be needed here, since a potential
+ * preempt will imply a full memory barrier
+ */
+ tsk->plug = plug;
+ }
+}
+EXPORT_SYMBOL(blk_start_plug);
+
+static int plug_rq_cmp(void *priv, struct list_head *a, struct list_head *b)
+{
+ struct request *rqa = container_of(a, struct request, queuelist);
+ struct request *rqb = container_of(b, struct request, queuelist);
+
+ return !(rqa->q == rqb->q);
+}
+
+static void flush_plug_list(struct blk_plug *plug)
+{
+ struct request_queue *q;
+ unsigned long flags;
+ struct request *rq;
+
+ BUG_ON(plug->magic != PLUG_MAGIC);
+
+ if (list_empty(&plug->list))
+ return;
+
+ if (plug->should_sort)
+ list_sort(NULL, &plug->list, plug_rq_cmp);
+
+ q = NULL;
+ local_irq_save(flags);
+ while (!list_empty(&plug->list)) {
+ rq = list_entry_rq(plug->list.next);
+ list_del_init(&rq->queuelist);
+ BUG_ON(!(rq->cmd_flags & REQ_ON_PLUG));
+ BUG_ON(!rq->q);
+ if (rq->q != q) {
+ if (q) {
+ __blk_run_queue(q, false);
+ spin_unlock(q->queue_lock);
+ }
+ q = rq->q;
+ spin_lock(q->queue_lock);
+ }
+ rq->cmd_flags &= ~REQ_ON_PLUG;
+
+ /*
+ * rq is already accounted, so use raw insert
+ */
+ __elv_add_request(q, rq, ELEVATOR_INSERT_SORT);
+ }
+
+ if (q) {
+ __blk_run_queue(q, false);
+ spin_unlock(q->queue_lock);
+ }
+
+ BUG_ON(!list_empty(&plug->list));
+ local_irq_restore(flags);
+}
+
+static void __blk_finish_plug(struct task_struct *tsk, struct blk_plug *plug)
+{
+ flush_plug_list(plug);
+
+ if (plug == tsk->plug)
+ tsk->plug = NULL;
+}
+
+void blk_finish_plug(struct blk_plug *plug)
+{
+ if (plug)
+ __blk_finish_plug(current, plug);
+}
+EXPORT_SYMBOL(blk_finish_plug);
+
+void __blk_flush_plug(struct task_struct *tsk, struct blk_plug *plug)
+{
+ __blk_finish_plug(tsk, plug);
+ tsk->plug = plug;
+}
+EXPORT_SYMBOL(__blk_flush_plug);
+
int __init blk_dev_init(void)
{
BUILD_BUG_ON(__REQ_NR_BITS > 8 *
diff --git a/block/blk-exec.c b/block/blk-exec.c
index cf1456a02acd..7482b7fa863b 100644
--- a/block/blk-exec.c
+++ b/block/blk-exec.c
@@ -54,8 +54,8 @@ void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk,
rq->end_io = done;
WARN_ON(irqs_disabled());
spin_lock_irq(q->queue_lock);
- __elv_add_request(q, rq, where, 1);
- __generic_unplug_device(q);
+ __elv_add_request(q, rq, where);
+ __blk_run_queue(q, false);
/* the queue is stopped so it won't be plugged+unplugged */
if (rq->cmd_type == REQ_TYPE_PM_RESUME)
q->request_fn(q);
diff --git a/block/blk-flush.c b/block/blk-flush.c
index 0bd8c9c5d6e5..93d5fd8e51eb 100644
--- a/block/blk-flush.c
+++ b/block/blk-flush.c
@@ -194,7 +194,6 @@ static void flush_end_io(struct request *flush_rq, int error)
{
struct request_queue *q = flush_rq->q;
struct list_head *running = &q->flush_queue[q->flush_running_idx];
- bool was_empty = elv_queue_empty(q);
bool queued = false;
struct request *rq, *n;
@@ -218,7 +217,7 @@ static void flush_end_io(struct request *flush_rq, int error)
* from request completion path and calling directly into
* request_fn may confuse the driver. Always use kblockd.
*/
- if (queued && was_empty)
+ if (queued)
__blk_run_queue(q, true);
}
@@ -269,13 +268,12 @@ static bool blk_kick_flush(struct request_queue *q)
static void flush_data_end_io(struct request *rq, int error)
{
struct request_queue *q = rq->q;
- bool was_empty = elv_queue_empty(q);
/*
* After populating an empty queue, kick it to avoid stall. Read
* the comment in flush_end_io().
*/
- if (blk_flush_complete_seq(rq, REQ_FSEQ_DATA, error) && was_empty)
+ if (blk_flush_complete_seq(rq, REQ_FSEQ_DATA, error))
__blk_run_queue(q, true);
}
diff --git a/block/blk-settings.c b/block/blk-settings.c
index df649fa59ded..1fa769293597 100644
--- a/block/blk-settings.c
+++ b/block/blk-settings.c
@@ -164,14 +164,6 @@ void blk_queue_make_request(struct request_queue *q, make_request_fn *mfn)
blk_queue_congestion_threshold(q);
q->nr_batching = BLK_BATCH_REQ;
- q->unplug_thresh = 4; /* hmm */
- q->unplug_delay = msecs_to_jiffies(3); /* 3 milliseconds */
- if (q->unplug_delay == 0)
- q->unplug_delay = 1;
-
- q->unplug_timer.function = blk_unplug_timeout;
- q->unplug_timer.data = (unsigned long)q;
-
blk_set_default_limits(&q->limits);
blk_queue_max_hw_sectors(q, BLK_SAFE_MAX_SECTORS);
diff --git a/block/blk-throttle.c b/block/blk-throttle.c
index 32dd3e4b041d..37abbfc68590 100644
--- a/block/blk-throttle.c
+++ b/block/blk-throttle.c
@@ -768,6 +768,7 @@ static int throtl_dispatch(struct request_queue *q)
unsigned int nr_disp = 0;
struct bio_list bio_list_on_stack;
struct bio *bio;
+ struct blk_plug plug;
spin_lock_irq(q->queue_lock);
@@ -796,9 +797,10 @@ out:
* immediate dispatch
*/
if (nr_disp) {
+ blk_start_plug(&plug);
while((bio = bio_list_pop(&bio_list_on_stack)))
generic_make_request(bio);
- blk_unplug(q);
+ blk_finish_plug(&plug);
}
return nr_disp;
}
diff --git a/block/blk.h b/block/blk.h
index 284b500852bd..49d21af81d07 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -18,8 +18,6 @@ int blk_rq_append_bio(struct request_queue *q, struct request *rq,
void blk_dequeue_request(struct request *rq);
void __blk_queue_free_tags(struct request_queue *q);
-void blk_unplug_work(struct work_struct *work);
-void blk_unplug_timeout(unsigned long data);
void blk_rq_timed_out_timer(unsigned long data);
void blk_delete_timer(struct request *);
void blk_add_timer(struct request *);
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 9697053f80bc..c826ef81c679 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -500,13 +500,6 @@ static inline void cfq_schedule_dispatch(struct cfq_data *cfqd)
}
}
-static int cfq_queue_empty(struct request_queue *q)
-{
- struct cfq_data *cfqd = q->elevator->elevator_data;
-
- return !cfqd->rq_queued;
-}
-
/*
* Scale schedule slice based on io priority. Use the sync time slice only
* if a queue is marked sync and has sync io queued. A sync queue with async
@@ -4080,7 +4073,6 @@ static struct elevator_type iosched_cfq = {
.elevator_add_req_fn = cfq_insert_request,
.elevator_activate_req_fn = cfq_activate_request,
.elevator_deactivate_req_fn = cfq_deactivate_request,
- .elevator_queue_empty_fn = cfq_queue_empty,
.elevator_completed_req_fn = cfq_completed_request,
.elevator_former_req_fn = elv_rb_former_request,
.elevator_latter_req_fn = elv_rb_latter_request,
diff --git a/block/deadline-iosched.c b/block/deadline-iosched.c
index b547cbca7b23..5139c0ea1864 100644
--- a/block/deadline-iosched.c
+++ b/block/deadline-iosched.c
@@ -326,14 +326,6 @@ dispatch_request:
return 1;
}
-static int deadline_queue_empty(struct request_queue *q)
-{
- struct deadline_data *dd = q->elevator->elevator_data;
-
- return list_empty(&dd->fifo_list[WRITE])
- && list_empty(&dd->fifo_list[READ]);
-}
-
static void deadline_exit_queue(struct elevator_queue *e)
{
struct deadline_data *dd = e->elevator_data;
@@ -445,7 +437,6 @@ static struct elevator_type iosched_deadline = {
.elevator_merge_req_fn = deadline_merged_requests,
.elevator_dispatch_fn = deadline_dispatch_requests,
.elevator_add_req_fn = deadline_add_request,
- .elevator_queue_empty_fn = deadline_queue_empty,
.elevator_former_req_fn = elv_rb_former_request,
.elevator_latter_req_fn = elv_rb_latter_request,
.elevator_init_fn = deadline_init_queue,
diff --git a/block/elevator.c b/block/elevator.c
index fabf3675c913..542ce826b401 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -113,7 +113,7 @@ int elv_rq_merge_ok(struct request *rq, struct bio *bio)
}
EXPORT_SYMBOL(elv_rq_merge_ok);
-static inline int elv_try_merge(struct request *__rq, struct bio *bio)
+int elv_try_merge(struct request *__rq, struct bio *bio)
{
int ret = ELEVATOR_NO_MERGE;
@@ -421,6 +421,8 @@ void elv_dispatch_sort(struct request_queue *q, struct request *rq)
struct list_head *entry;
int stop_flags;
+ BUG_ON(rq->cmd_flags & REQ_ON_PLUG);
+
if (q->last_merge == rq)
q->last_merge = NULL;
@@ -617,21 +619,12 @@ void elv_quiesce_end(struct request_queue *q)
void elv_insert(struct request_queue *q, struct request *rq, int where)
{
- int unplug_it = 1;
-
trace_block_rq_insert(q, rq);
rq->q = q;
switch (where) {
case ELEVATOR_INSERT_REQUEUE:
- /*
- * Most requeues happen because of a busy condition,
- * don't force unplug of the queue for that case.
- * Clear unplug_it and fall through.
- */
- unplug_it = 0;
-
case ELEVATOR_INSERT_FRONT:
rq->cmd_flags |= REQ_SOFTBARRIER;
list_add(&rq->queuelist, &q->queue_head);
@@ -677,25 +670,17 @@ void elv_insert(struct request_queue *q, struct request *rq, int where)
rq->cmd_flags |= REQ_SOFTBARRIER;
blk_insert_flush(rq);
break;
-
default:
printk(KERN_ERR "%s: bad insertion point %d\n",
__func__, where);
BUG();
}
-
- if (unplug_it && blk_queue_plugged(q)) {
- int nrq = q->rq.count[BLK_RW_SYNC] + q->rq.count[BLK_RW_ASYNC]
- - queue_in_flight(q);
-
- if (nrq >= q->unplug_thresh)
- __generic_unplug_device(q);
- }
}
-void __elv_add_request(struct request_queue *q, struct request *rq, int where,
- int plug)
+void __elv_add_request(struct request_queue *q, struct request *rq, int where)
{
+ BUG_ON(rq->cmd_flags & REQ_ON_PLUG);
+
if (rq->cmd_flags & REQ_SOFTBARRIER) {
/* barriers are scheduling boundary, update end_sector */
if (rq->cmd_type == REQ_TYPE_FS ||
@@ -707,38 +692,20 @@ void __elv_add_request(struct request_queue *q, struct request *rq, int where,
where == ELEVATOR_INSERT_SORT)
where = ELEVATOR_INSERT_BACK;
- if (plug)
- blk_plug_device(q);
-
elv_insert(q, rq, where);
}
EXPORT_SYMBOL(__elv_add_request);
-void elv_add_request(struct request_queue *q, struct request *rq, int where,
- int plug)
+void elv_add_request(struct request_queue *q, struct request *rq, int where)
{
unsigned long flags;
spin_lock_irqsave(q->queue_lock, flags);
- __elv_add_request(q, rq, where, plug);
+ __elv_add_request(q, rq, where);
spin_unlock_irqrestore(q->queue_lock, flags);
}
EXPORT_SYMBOL(elv_add_request);
-int elv_queue_empty(struct request_queue *q)
-{
- struct elevator_queue *e = q->elevator;
-
- if (!list_empty(&q->queue_head))
- return 0;
-
- if (e->ops->elevator_queue_empty_fn)
- return e->ops->elevator_queue_empty_fn(q);
-
- return 1;
-}
-EXPORT_SYMBOL(elv_queue_empty);
-
struct request *elv_latter_request(struct request_queue *q, struct request *rq)
{
struct elevator_queue *e = q->elevator;
diff --git a/block/noop-iosched.c b/block/noop-iosched.c
index 232c4b38cd37..06389e9ef96d 100644
--- a/block/noop-iosched.c
+++ b/block/noop-iosched.c
@@ -39,13 +39,6 @@ static void noop_add_request(struct request_queue *q, struct request *rq)
list_add_tail(&rq->queuelist, &nd->queue);
}
-static int noop_queue_empty(struct request_queue *q)
-{
- struct noop_data *nd = q->elevator->elevator_data;
-
- return list_empty(&nd->queue);
-}
-
static struct request *
noop_former_request(struct request_queue *q, struct request *rq)
{
@@ -90,7 +83,6 @@ static struct elevator_type elevator_noop = {
.elevator_merge_req_fn = noop_merged_requests,
.elevator_dispatch_fn = noop_dispatch,
.elevator_add_req_fn = noop_add_request,
- .elevator_queue_empty_fn = noop_queue_empty,
.elevator_former_req_fn = noop_former_request,
.elevator_latter_req_fn = noop_latter_request,
.elevator_init_fn = noop_init_queue,
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
index 9279272b3732..35658f445fca 100644
--- a/drivers/block/cciss.c
+++ b/drivers/block/cciss.c
@@ -3170,12 +3170,6 @@ static void do_cciss_request(struct request_queue *q)
int sg_index = 0;
int chained = 0;
- /* We call start_io here in case there is a command waiting on the
- * queue that has not been sent.
- */
- if (blk_queue_plugged(q))
- goto startio;
-
queue:
creq = blk_peek_request(q);
if (!creq)
diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c
index 946dad4caef3..b2fceb53e809 100644
--- a/drivers/block/cpqarray.c
+++ b/drivers/block/cpqarray.c
@@ -911,9 +911,6 @@ static void do_ida_request(struct request_queue *q)
struct scatterlist tmp_sg[SG_MAX];
int i, dir, seg;
- if (blk_queue_plugged(q))
- goto startio;
-
queue_next:
creq = blk_peek_request(q);
if (!creq)
diff --git a/drivers/block/drbd/drbd_actlog.c b/drivers/block/drbd/drbd_actlog.c
index ba95cba192be..aca302492ff2 100644
--- a/drivers/block/drbd/drbd_actlog.c
+++ b/drivers/block/drbd/drbd_actlog.c
@@ -80,7 +80,7 @@ static int _drbd_md_sync_page_io(struct drbd_conf *mdev,
if ((rw & WRITE) && !test_bit(MD_NO_FUA, &mdev->flags))
rw |= REQ_FUA;
- rw |= REQ_UNPLUG | REQ_SYNC;
+ rw |= REQ_SYNC;
bio = bio_alloc(GFP_NOIO, 1);
bio->bi_bdev = bdev->md_bdev;
@@ -689,8 +689,6 @@ void drbd_al_to_on_disk_bm(struct drbd_conf *mdev)
}
}
- drbd_blk_run_queue(bdev_get_queue(mdev->ldev->md_bdev));
-
/* always (try to) flush bitmap to stable storage */
drbd_md_flush(mdev);
diff --git a/drivers/block/drbd/drbd_bitmap.c b/drivers/block/drbd/drbd_bitmap.c
index fd42832f785b..0645ca829a94 100644
--- a/drivers/block/drbd/drbd_bitmap.c
+++ b/drivers/block/drbd/drbd_bitmap.c
@@ -840,7 +840,6 @@ static int bm_rw(struct drbd_conf *mdev, int rw) __must_hold(local)
for (i = 0; i < num_pages; i++)
bm_page_io_async(mdev, b, i, rw);
- drbd_blk_run_queue(bdev_get_queue(mdev->ldev->md_bdev));
wait_event(b->bm_io_wait, atomic_read(&b->bm_async_io) == 0);
if (test_bit(BM_MD_IO_ERROR, &b->bm_flags)) {
diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
index 3803a0348937..b0bd27dfc1e8 100644
--- a/drivers/block/drbd/drbd_int.h
+++ b/drivers/block/drbd/drbd_int.h
@@ -377,7 +377,7 @@ union p_header {
#define DP_HARDBARRIER 1 /* depricated */
#define DP_RW_SYNC 2 /* equals REQ_SYNC */
#define DP_MAY_SET_IN_SYNC 4
-#define DP_UNPLUG 8 /* equals REQ_UNPLUG */
+#define DP_UNPLUG 8 /* not used anymore */
#define DP_FUA 16 /* equals REQ_FUA */
#define DP_FLUSH 32 /* equals REQ_FLUSH */
#define DP_DISCARD 64 /* equals REQ_DISCARD */
@@ -2382,20 +2382,6 @@ static inline int drbd_queue_order_type(struct drbd_conf *mdev)
return QUEUE_ORDERED_NONE;
}
-static inline void drbd_blk_run_queue(struct request_queue *q)
-{
- if (q && q->unplug_fn)
- q->unplug_fn(q);
-}
-
-static inline void drbd_kick_lo(struct drbd_conf *mdev)
-{
- if (get_ldev(mdev)) {
- drbd_blk_run_queue(bdev_get_queue(mdev->ldev->backing_bdev));
- put_ldev(mdev);
- }
-}
-
static inline void drbd_md_flush(struct drbd_conf *mdev)
{
int r;
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
index 29cd0dc9fe4f..8a43ce0edeed 100644
--- a/drivers/block/drbd/drbd_main.c
+++ b/drivers/block/drbd/drbd_main.c
@@ -2477,12 +2477,11 @@ static u32 bio_flags_to_wire(struct drbd_conf *mdev, unsigned long bi_rw)
{
if (mdev->agreed_pro_version >= 95)
return (bi_rw & REQ_SYNC ? DP_RW_SYNC : 0) |
- (bi_rw & REQ_UNPLUG ? DP_UNPLUG : 0) |
(bi_rw & REQ_FUA ? DP_FUA : 0) |
(bi_rw & REQ_FLUSH ? DP_FLUSH : 0) |
(bi_rw & REQ_DISCARD ? DP_DISCARD : 0);
else
- return bi_rw & (REQ_SYNC | REQ_UNPLUG) ? DP_RW_SYNC : 0;
+ return bi_rw & REQ_SYNC ? DP_RW_SYNC : 0;
}
/* Used to send write requests
@@ -2719,35 +2718,6 @@ static int drbd_release(struct gendisk *gd, fmode_t mode)
return 0;
}
-static void drbd_unplug_fn(struct request_queue *q)
-{
- struct drbd_conf *mdev = q->queuedata;
-
- /* unplug FIRST */
- spin_lock_irq(q->queue_lock);
- blk_remove_plug(q);
- spin_unlock_irq(q->queue_lock);
-
- /* only if connected */
- spin_lock_irq(&mdev->req_lock);
- if (mdev->state.pdsk >= D_INCONSISTENT && mdev->state.conn >= C_CONNECTED) {
- D_ASSERT(mdev->state.role == R_PRIMARY);
- if (test_and_clear_bit(UNPLUG_REMOTE, &mdev->flags)) {
- /* add to the data.work queue,
- * unless already queued.
- * XXX this might be a good addition to drbd_queue_work
- * anyways, to detect "double queuing" ... */
- if (list_empty(&mdev->unplug_work.list))
- drbd_queue_work(&mdev->data.work,
- &mdev->unplug_work);
- }
- }
- spin_unlock_irq(&mdev->req_lock);
-
- if (mdev->state.disk >= D_INCONSISTENT)
- drbd_kick_lo(mdev);
-}
-
static void drbd_set_defaults(struct drbd_conf *mdev)
{
/* This way we get a compile error when sync_conf grows,
@@ -3222,9 +3192,7 @@ struct drbd_conf *drbd_new_device(unsigned int minor)
blk_queue_max_segment_size(q, DRBD_MAX_SEGMENT_SIZE);
blk_queue_bounce_limit(q, BLK_BOUNCE_ANY);
blk_queue_merge_bvec(q, drbd_merge_bvec);
- q->queue_lock = &mdev->req_lock; /* needed since we use */
- /* plugging on a queue, that actually has no requests! */
- q->unplug_fn = drbd_unplug_fn;
+ q->queue_lock = &mdev->req_lock;
mdev->md_io_page = alloc_page(GFP_KERNEL);
if (!mdev->md_io_page)
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
index 24487d4fb202..8e68be939deb 100644
--- a/drivers/block/drbd/drbd_receiver.c
+++ b/drivers/block/drbd/drbd_receiver.c
@@ -187,15 +187,6 @@ static struct page *drbd_pp_first_pages_or_try_alloc(struct drbd_conf *mdev, int
return NULL;
}
-/* kick lower level device, if we have more than (arbitrary number)
- * reference counts on it, which typically are locally submitted io
- * requests. don't use unacked_cnt, so we speed up proto A and B, too. */
-static void maybe_kick_lo(struct drbd_conf *mdev)
-{
- if (atomic_read(&mdev->local_cnt) >= mdev->net_conf->unplug_watermark)
- drbd_kick_lo(mdev);
-}
-
static void reclaim_net_ee(struct drbd_conf *mdev, struct list_head *to_be_freed)
{
struct drbd_epoch_entry *e;
@@ -219,7 +210,6 @@ static void drbd_kick_lo_and_reclaim_net(struct drbd_conf *mdev)
LIST_HEAD(reclaimed);
struct drbd_epoch_entry *e, *t;
- maybe_kick_lo(mdev);
spin_lock_irq(&mdev->req_lock);
reclaim_net_ee(mdev, &reclaimed);
spin_unlock_irq(&mdev->req_lock);
@@ -436,8 +426,7 @@ void _drbd_wait_ee_list_empty(struct drbd_conf *mdev, struct list_head *head)
while (!list_empty(head)) {
prepare_to_wait(&mdev->ee_wait, &wait, TASK_UNINTERRUPTIBLE);
spin_unlock_irq(&mdev->req_lock);
- drbd_kick_lo(mdev);
- schedule();
+ io_schedule();
finish_wait(&mdev->ee_wait, &wait);
spin_lock_irq(&mdev->req_lock);
}
@@ -1111,8 +1100,6 @@ next_bio:
/* > e->sector, unless this is the first bio */
bio->bi_sector = sector;
bio->bi_bdev = mdev->ldev->backing_bdev;
- /* we special case some flags in the multi-bio case, see below
- * (REQ_UNPLUG) */
bio->bi_rw = rw;
bio->bi_private = e;
bio->bi_end_io = drbd_endio_sec;
@@ -1141,13 +1128,8 @@ next_bio:
bios = bios->bi_next;
bio->bi_next = NULL;
- /* strip off REQ_UNPLUG unless it is the last bio */
- if (bios)
- bio->bi_rw &= ~REQ_UNPLUG;
-
drbd_generic_make_request(mdev, fault_type, bio);
} while (bios);
- maybe_kick_lo(mdev);
return 0;
fail:
@@ -1167,9 +1149,6 @@ static int receive_Barrier(struct drbd_conf *mdev, enum drbd_packets cmd, unsign
inc_unacked(mdev);
- if (mdev->net_conf->wire_protocol != DRBD_PROT_C)
- drbd_kick_lo(mdev);
-
mdev->current_epoch->barrier_nr = p->barrier;
rv = drbd_may_finish_epoch(mdev, mdev->current_epoch, EV_GOT_BARRIER_NR);
@@ -1636,12 +1615,11 @@ static unsigned long write_flags_to_bio(struct drbd_conf *mdev, u32 dpf)
{
if (mdev->agreed_pro_version >= 95)
return (dpf & DP_RW_SYNC ? REQ_SYNC : 0) |
- (dpf & DP_UNPLUG ? REQ_UNPLUG : 0) |
(dpf & DP_FUA ? REQ_FUA : 0) |
(dpf & DP_FLUSH ? REQ_FUA : 0) |
(dpf & DP_DISCARD ? REQ_DISCARD : 0);
else
- return dpf & DP_RW_SYNC ? (REQ_SYNC | REQ_UNPLUG) : 0;
+ return dpf & DP_RW_SYNC ? REQ_SYNC : 0;
}
/* mirrored write */
@@ -3556,9 +3534,6 @@ static int receive_skip(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
static int receive_UnplugRemote(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
{
- if (mdev->state.disk >= D_INCONSISTENT)
- drbd_kick_lo(mdev);
-
/* Make sure we've acked all the TCP data associated
* with the data requests being unplugged */
drbd_tcp_quickack(mdev->data.socket);
diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c
index 11a75d32a2e2..ad3fc6228f27 100644
--- a/drivers/block/drbd/drbd_req.c
+++ b/drivers/block/drbd/drbd_req.c
@@ -960,10 +960,6 @@ allocate_barrier:
bio_endio(req->private_bio, -EIO);
}
- /* we need to plug ALWAYS since we possibly need to kick lo_dev.
- * we plug after submit, so we won't miss an unplug event */
- drbd_plug_device(mdev);
-
return 0;
fail_conflicting:
diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c
index 34f224b018b3..e027446590d3 100644
--- a/drivers/block/drbd/drbd_worker.c
+++ b/drivers/block/drbd/drbd_worker.c
@@ -792,7 +792,6 @@ int drbd_resync_finished(struct drbd_conf *mdev)
* queue (or even the read operations for those packets
* is not finished by now). Retry in 100ms. */
- drbd_kick_lo(mdev);
__set_current_state(TASK_INTERRUPTIBLE);
schedule_timeout(HZ / 10);
w = kmalloc(sizeof(struct drbd_work), GFP_ATOMIC);
diff --git a/drivers/block/drbd/drbd_wrappers.h b/drivers/block/drbd/drbd_wrappers.h
index defdb5013ea3..53586fa5ae1b 100644
--- a/drivers/block/drbd/drbd_wrappers.h
+++ b/drivers/block/drbd/drbd_wrappers.h
@@ -45,24 +45,6 @@ static inline void drbd_generic_make_request(struct drbd_conf *mdev,
generic_make_request(bio);
}
-static inline void drbd_plug_device(struct drbd_conf *mdev)
-{
- struct request_queue *q;
- q = bdev_get_queue(mdev->this_bdev);
-
- spin_lock_irq(q->queue_lock);
-
-/* XXX the check on !blk_queue_plugged is redundant,
- * implicitly checked in blk_plug_device */
-
- if (!blk_queue_plugged(q)) {
- blk_plug_device(q);
- del_timer(&q->unplug_timer);
- /* unplugging should not happen automatically... */
- }
- spin_unlock_irq(q->queue_lock);
-}
-
static inline int drbd_crypto_is_hash(struct crypto_tfm *tfm)
{
return (crypto_tfm_alg_type(tfm) & CRYPTO_ALG_TYPE_HASH_MASK)
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index 3851dbc03e05..301d7a9a41a6 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -3838,7 +3838,6 @@ static int __floppy_read_block_0(struct block_device *bdev)
bio.bi_end_io = floppy_rb0_complete;
submit_bio(READ, &bio);
- generic_unplug_device(bdev_get_queue(bdev));
process_fd_request();
wait_for_completion(&complete);
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index 79c3079a6203..a076a14ca72d 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -540,17 +540,6 @@ out:
return 0;
}
-/*
- * kick off io on the underlying address space
- */
-static void loop_unplug(struct request_queue *q)
-{
- struct loop_device *lo = q->queuedata;
-
- queue_flag_clear_unlocked(QUEUE_FLAG_PLUGGED, q);
- blk_run_address_space(lo->lo_backing_file->f_mapping);
-}
-
struct switch_request {
struct file *file;
struct completion wait;
@@ -917,7 +906,6 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode,
*/
blk_queue_make_request(lo->lo_queue, loop_make_request);
lo->lo_queue->queuedata = lo;
- lo->lo_queue->unplug_fn = loop_unplug;
if (!(lo_flags & LO_FLAGS_READ_ONLY) && file->f_op->fsync)
blk_queue_flush(lo->lo_queue, REQ_FLUSH);
@@ -1019,7 +1007,6 @@ static int loop_clr_fd(struct loop_device *lo, struct block_device *bdev)
kthread_stop(lo->lo_thread);
- lo->lo_queue->unplug_fn = NULL;
lo->lo_backing_file = NULL;
loop_release_xfer(lo);
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
index a077db27b7c9..07a382eaf0a8 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -1606,8 +1606,6 @@ static int kcdrwd(void *foobar)
min_sleep_time = pkt->sleep_time;
}
- generic_unplug_device(bdev_get_queue(pd->bdev));
-
VPRINTK("kcdrwd: sleeping\n");
residue = schedule_timeout(min_sleep_time);
VPRINTK("kcdrwd: wake up\n");
diff --git a/drivers/block/umem.c b/drivers/block/umem.c
index 9b102abe209c..031ca720d926 100644
--- a/drivers/block/umem.c
+++ b/drivers/block/umem.c
@@ -241,8 +241,7 @@ static void dump_dmastat(struct cardinfo *card, unsigned int dmastat)
*
* Whenever IO on the active page completes, the Ready page is activated
* and the ex-Active page is clean out and made Ready.
- * Otherwise the Ready page is only activated when it becomes full, or
- * when mm_unplug_device is called via the unplug_io_fn.
+ * Otherwise the Ready page is only activated when it becomes full.
*
* If a request arrives while both pages a full, it is queued, and b_rdev is
* overloaded to record whether it was a read or a write.
@@ -333,17 +332,6 @@ static inline void reset_page(struct mm_page *page)
page->biotail = &page->bio;
}
-static void mm_unplug_device(struct request_queue *q)
-{
- struct cardinfo *card = q->queuedata;
- unsigned long flags;
-
- spin_lock_irqsave(&card->lock, flags);
- if (blk_remove_plug(q))
- activate(card);
- spin_unlock_irqrestore(&card->lock, flags);
-}
-
/*
* If there is room on Ready page, take
* one bh off list and add it.
@@ -535,7 +523,6 @@ static int mm_make_request(struct request_queue *q, struct bio *bio)
*card->biotail = bio;
bio->bi_next = NULL;
card->biotail = &bio->bi_next;
- blk_plug_device(q);
spin_unlock_irq(&card->lock);
return 0;
@@ -897,7 +884,6 @@ static int __devinit mm_pci_probe(struct pci_dev *dev,
blk_queue_make_request(card->queue, mm_make_request);
card->queue->queue_lock = &card->lock;
card->queue->queuedata = card;
- card->queue->unplug_fn = mm_unplug_device;
tasklet_init(&card->tasklet, process_page, (unsigned long)card);
diff --git a/drivers/ide/ide-atapi.c b/drivers/ide/ide-atapi.c
index e88a2cf17711..6f218e014e99 100644
--- a/drivers/ide/ide-atapi.c
+++ b/drivers/ide/ide-atapi.c
@@ -233,8 +233,7 @@ int ide_queue_sense_rq(ide_drive_t *drive, void *special)
drive->hwif->rq = NULL;
- elv_add_request(drive->queue, &drive->sense_rq,
- ELEVATOR_INSERT_FRONT, 0);
+ elv_add_request(drive->queue, &drive->sense_rq, ELEVATOR_INSERT_FRONT);
return 0;
}
EXPORT_SYMBOL_GPL(ide_queue_sense_rq);
diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
index a2e29099ee0c..fd1e11799137 100644
--- a/drivers/ide/ide-cd.c
+++ b/drivers/ide/ide-cd.c
@@ -258,17 +258,10 @@ static int ide_cd_breathe(ide_drive_t *drive, struct request *rq)
if (time_after(jiffies, info->write_timeout))
return 0;
else {
- struct request_queue *q = drive->queue;
- unsigned long flags;
-
/*
- * take a breather relying on the unplug timer to kick us again
+ * take a breather
*/
-
- spin_lock_irqsave(q->queue_lock, flags);
- blk_plug_device(q);
- spin_unlock_irqrestore(q->queue_lock, flags);
-
+ blk_delay_queue(drive->queue, 1);
return 1;
}
}
@@ -1514,8 +1507,6 @@ static int ide_cdrom_setup(ide_drive_t *drive)
blk_queue_dma_alignment(q, 31);
blk_queue_update_dma_pad(q, 15);
- q->unplug_delay = max((1 * HZ) / 1000, 1);
-
drive->dev_flags |= IDE_DFLAG_MEDIA_CHANGED;
drive->atapi_flags = IDE_AFLAG_NO_EJECT | ide_cd_flags(id);
diff --git a/drivers/ide/ide-io.c b/drivers/ide/ide-io.c
index 999dac054bcc..f4077840d3ab 100644
--- a/drivers/ide/ide-io.c
+++ b/drivers/ide/ide-io.c
@@ -549,8 +549,6 @@ plug_device_2:
if (rq)
blk_requeue_request(q, rq);
- if (!elv_queue_empty(q))
- blk_plug_device(q);
}
void ide_requeue_and_plug(ide_drive_t *drive, struct request *rq)
@@ -562,8 +560,6 @@ void ide_requeue_and_plug(ide_drive_t *drive, struct request *rq)
if (rq)
blk_requeue_request(q, rq);
- if (!elv_queue_empty(q))
- blk_plug_device(q);
spin_unlock_irqrestore(q->queue_lock, flags);
}
diff --git a/drivers/ide/ide-park.c b/drivers/ide/ide-park.c
index 88a380c5a470..6ab9ab2a5081 100644
--- a/drivers/ide/ide-park.c
+++ b/drivers/ide/ide-park.c
@@ -52,7 +52,7 @@ static void issue_park_cmd(ide_drive_t *drive, unsigned long timeout)
rq->cmd[0] = REQ_UNPARK_HEADS;
rq->cmd_len = 1;
rq->cmd_type = REQ_TYPE_SPECIAL;
- elv_add_request(q, rq, ELEVATOR_INSERT_FRONT, 1);
+ elv_add_request(q, rq, ELEVATOR_INSERT_FRONT);
out:
return;
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
index 9a35320fb59f..ca203cb23f3c 100644
--- a/drivers/md/bitmap.c
+++ b/drivers/md/bitmap.c
@@ -347,7 +347,7 @@ static void write_page(struct bitmap *bitmap, struct page *page, int wait)
atomic_inc(&bitmap->pending_writes);
set_buffer_locked(bh);
set_buffer_mapped(bh);
- submit_bh(WRITE | REQ_UNPLUG | REQ_SYNC, bh);
+ submit_bh(WRITE | REQ_SYNC, bh);
bh = bh->b_this_page;
}
@@ -1339,8 +1339,7 @@ int bitmap_startwrite(struct bitmap *bitmap, sector_t offset, unsigned long sect
prepare_to_wait(&bitmap->overflow_wait, &__wait,
TASK_UNINTERRUPTIBLE);
spin_unlock_irq(&bitmap->lock);
- md_unplug(bitmap->mddev);
- schedule();
+ io_schedule();
finish_wait(&bitmap->overflow_wait, &__wait);
continue;
}
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index 4e054bd91664..2c62c1169f78 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -991,11 +991,6 @@ static void clone_init(struct dm_crypt_io *io, struct bio *clone)
clone->bi_destructor = dm_crypt_bio_destructor;
}
-static void kcryptd_unplug(struct crypt_config *cc)
-{
- blk_unplug(bdev_get_queue(cc->dev->bdev));
-}
-
static int kcryptd_io_read(struct dm_crypt_io *io, gfp_t gfp)
{
struct crypt_config *cc = io->target->private;
@@ -1008,10 +1003,8 @@ static int kcryptd_io_read(struct dm_crypt_io *io, gfp_t gfp)
* one in order to decrypt the whole bio data *afterwards*.
*/
clone = bio_alloc_bioset(gfp, bio_segments(base_bio), cc->bs);
- if (!clone) {
- kcryptd_unplug(cc);
+ if (!clone)
return 1;
- }
crypt_inc_pending(io);
diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c
index 136d4f71a116..76a5af00a26b 100644
--- a/drivers/md/dm-io.c
+++ b/drivers/md/dm-io.c
@@ -352,7 +352,7 @@ static void dispatch_io(int rw, unsigned int num_regions,
BUG_ON(num_regions > DM_IO_MAX_REGIONS);
if (sync)
- rw |= REQ_SYNC | REQ_UNPLUG;
+ rw |= REQ_SYNC;
/*
* For multiple regions we need to be careful to rewind
diff --git a/drivers/md/dm-kcopyd.c b/drivers/md/dm-kcopyd.c
index 924f5f0084c2..1bb73a13ca40 100644
--- a/drivers/md/dm-kcopyd.c
+++ b/drivers/md/dm-kcopyd.c
@@ -37,13 +37,6 @@ struct dm_kcopyd_client {
unsigned int nr_pages;
unsigned int nr_free_pages;
- /*
- * Block devices to unplug.
- * Non-NULL pointer means that a block device has some pending requests
- * and needs to be unplugged.
- */
- struct block_device *unplug[2];
-
struct dm_io_client *io_client;
wait_queue_head_t destroyq;
@@ -315,31 +308,6 @@ static int run_complete_job(struct kcopyd_job *job)
return 0;
}
-/*
- * Unplug the block device at the specified index.
- */
-static void unplug(struct dm_kcopyd_client *kc, int rw)
-{
- if (kc->unplug[rw] != NULL) {
- blk_unplug(bdev_get_queue(kc->unplug[rw]));
- kc->unplug[rw] = NULL;
- }
-}
-
-/*
- * Prepare block device unplug. If there's another device
- * to be unplugged at the same array index, we unplug that
- * device first.
- */
-static void prepare_unplug(struct dm_kcopyd_client *kc, int rw,
- struct block_device *bdev)
-{
- if (likely(kc->unplug[rw] == bdev))
- return;
- unplug(kc, rw);
- kc->unplug[rw] = bdev;
-}
-
static void complete_io(unsigned long error, void *context)
{
struct kcopyd_job *job = (struct kcopyd_job *) context;
@@ -386,16 +354,10 @@ static int run_io_job(struct kcopyd_job *job)
.client = job->kc->io_client,
};
- if (job->rw == READ) {
+ if (job->rw == READ)
r = dm_io(&io_req, 1, &job->source, NULL);
- prepare_unplug(job->kc, READ, job->source.bdev);
- } else {
- if (job->num_dests > 1)
- io_req.bi_rw |= REQ_UNPLUG;
+ else
r = dm_io(&io_req, job->num_dests, job->dests, NULL);
- if (!(io_req.bi_rw & REQ_UNPLUG))
- prepare_unplug(job->kc, WRITE, job->dests[0].bdev);
- }
return r;
}
@@ -466,6 +428,7 @@ static void do_work(struct work_struct *work)
{
struct dm_kcopyd_client *kc = container_of(work,
struct dm_kcopyd_client, kcopyd_work);
+ struct blk_plug plug;
/*
* The order that these are called is *very* important.
@@ -473,18 +436,12 @@ static void do_work(struct work_struct *work)
* Pages jobs when successful will jump onto the io jobs
* list. io jobs call wake when they complete and it all
* starts again.
- *
- * Note that io_jobs add block devices to the unplug array,
- * this array is cleared with "unplug" calls. It is thus
- * forbidden to run complete_jobs after io_jobs and before
- * unplug because the block device could be destroyed in
- * job completion callback.
*/
+ blk_start_plug(&plug);
process_jobs(&kc->complete_jobs, kc, run_complete_job);
process_jobs(&kc->pages_jobs, kc, run_pages_job);
process_jobs(&kc->io_jobs, kc, run_io_job);
- unplug(kc, READ);
- unplug(kc, WRITE);
+ blk_finish_plug(&plug);
}
/*
@@ -665,8 +622,6 @@ int dm_kcopyd_client_create(unsigned int nr_pages,
INIT_LIST_HEAD(&kc->io_jobs);
INIT_LIST_HEAD(&kc->pages_jobs);
- memset(kc->unplug, 0, sizeof(kc->unplug));
-
kc->job_pool = mempool_create_slab_pool(MIN_JOBS, _job_cache);
if (!kc->job_pool)
goto bad_slab;
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
index b9e1e15ef11c..5ef136cdba91 100644
--- a/drivers/md/dm-raid.c
+++ b/drivers/md/dm-raid.c
@@ -394,7 +394,7 @@ static void raid_unplug(struct dm_target_callbacks *cb)
{
struct raid_set *rs = container_of(cb, struct raid_set, callbacks);
- md_raid5_unplug_device(rs->md.private);
+ md_raid5_kick_device(rs->md.private);
}
/*
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
index dee326775c60..976ad4688afc 100644
--- a/drivers/md/dm-raid1.c
+++ b/drivers/md/dm-raid1.c
@@ -842,8 +842,6 @@ static void do_mirror(struct work_struct *work)
do_reads(ms, &reads);
do_writes(ms, &writes);
do_failures(ms, &failures);
-
- dm_table_unplug_all(ms->ti->table);
}
/*-----------------------------------------------------------------
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index 38e4eb1bb965..f50a7b952257 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -1275,29 +1275,6 @@ int dm_table_any_busy_target(struct dm_table *t)
return 0;
}
-void dm_table_unplug_all(struct dm_table *t)
-{
- struct dm_dev_internal *dd;
- struct list_head *devices = dm_table_get_devices(t);
- struct dm_target_callbacks *cb;
-
- list_for_each_entry(dd, devices, list) {
- struct request_queue *q = bdev_get_queue(dd->dm_dev.bdev);
- char b[BDEVNAME_SIZE];
-
- if (likely(q))
- blk_unplug(q);
- else
- DMWARN_LIMIT("%s: Cannot unplug nonexistent device %s",
- dm_device_name(t->md),
- bdevname(dd->dm_dev.bdev, b));
- }
-
- list_for_each_entry(cb, &t->target_callbacks, list)
- if (cb->unplug_fn)
- cb->unplug_fn(cb);
-}
-
struct mapped_device *dm_table_get_md(struct dm_table *t)
{
return t->md;
@@ -1345,4 +1322,3 @@ EXPORT_SYMBOL(dm_table_get_mode);
EXPORT_SYMBOL(dm_table_get_md);
EXPORT_SYMBOL(dm_table_put);
EXPORT_SYMBOL(dm_table_get);
-EXPORT_SYMBOL(dm_table_unplug_all);
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index eaa3af0e0632..d22b9905c168 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -807,8 +807,6 @@ void dm_requeue_unmapped_request(struct request *clone)
dm_unprep_request(rq);
spin_lock_irqsave(q->queue_lock, flags);
- if (elv_queue_empty(q))
- blk_plug_device(q);
blk_requeue_request(q, rq);
spin_unlock_irqrestore(q->queue_lock, flags);
@@ -1613,10 +1611,10 @@ static void dm_request_fn(struct request_queue *q)
* number of in-flight I/Os after the queue is stopped in
* dm_suspend().
*/
- while (!blk_queue_plugged(q) && !blk_queue_stopped(q)) {
+ while (!blk_queue_stopped(q)) {
rq = blk_peek_request(q);
if (!rq)
- goto plug_and_out;
+ goto delay_and_out;
/* always use block 0 to find the target for flushes for now */
pos = 0;
@@ -1627,7 +1625,7 @@ static void dm_request_fn(struct request_queue *q)
BUG_ON(!dm_target_is_valid(ti));
if (ti->type->busy && ti->type->busy(ti))
- goto plug_and_out;
+ goto delay_and_out;
blk_start_request(rq);
clone = rq->special;
@@ -1647,11 +1645,8 @@ requeued:
BUG_ON(!irqs_disabled());
spin_lock(q->queue_lock);
-plug_and_out:
- if (!elv_queue_empty(q))
- /* Some requests still remain, retry later */
- blk_plug_device(q);
-
+delay_and_out:
+ blk_delay_queue(q, HZ / 10);
out:
dm_table_put(map);
@@ -1680,20 +1675,6 @@ static int dm_lld_busy(struct request_queue *q)
return r;
}
-static void dm_unplug_all(struct request_queue *q)
-{
- struct mapped_device *md = q->queuedata;
- struct dm_table *map = dm_get_live_table(md);
-
- if (map) {
- if (dm_request_based(md))
- generic_unplug_device(q);
-
- dm_table_unplug_all(map);
- dm_table_put(map);
- }
-}
-
static int dm_any_congested(void *congested_data, int bdi_bits)
{
int r = bdi_bits;
@@ -1817,7 +1798,6 @@ static void dm_init_md_queue(struct mapped_device *md)
md->queue->backing_dev_info.congested_data = md;
blk_queue_make_request(md->queue, dm_request);
blk_queue_bounce_limit(md->queue, BLK_BOUNCE_ANY);
- md->queue->unplug_fn = dm_unplug_all;
blk_queue_merge_bvec(md->queue, dm_merge_bvec);
blk_queue_flush(md->queue, REQ_FLUSH | REQ_FUA);
}
@@ -2263,8 +2243,6 @@ static int dm_wait_for_completion(struct mapped_device *md, int interruptible)
int r = 0;
DECLARE_WAITQUEUE(wait, current);
- dm_unplug_all(md->queue);
-
add_wait_queue(&md->wait, &wait);
while (1) {
@@ -2539,7 +2517,6 @@ int dm_resume(struct mapped_device *md)
clear_bit(DMF_SUSPENDED, &md->flags);
- dm_table_unplug_all(map);
r = 0;
out:
dm_table_put(map);
diff --git a/drivers/md/linear.c b/drivers/md/linear.c
index 0ed7f6bc2a7f..338804f8fb3b 100644
--- a/drivers/md/linear.c
+++ b/drivers/md/linear.c
@@ -87,22 +87,6 @@ static int linear_mergeable_bvec(struct request_queue *q,
return maxsectors << 9;
}
-static void linear_unplug(struct request_queue *q)
-{
- mddev_t *mddev = q->queuedata;
- linear_conf_t *conf;
- int i;
-
- rcu_read_lock();
- conf = rcu_dereference(mddev->private);
-
- for (i=0; i < mddev->raid_disks; i++) {
- struct request_queue *r_queue = bdev_get_queue(conf->disks[i].rdev->bdev);
- blk_unplug(r_queue);
- }
- rcu_read_unlock();
-}
-
static int linear_congested(void *data, int bits)
{
mddev_t *mddev = data;
@@ -224,7 +208,6 @@ static int linear_run (mddev_t *mddev)
md_set_array_sectors(mddev, linear_size(mddev, 0, 0));
blk_queue_merge_bvec(mddev->queue, linear_mergeable_bvec);
- mddev->queue->unplug_fn = linear_unplug;
mddev->queue->backing_dev_info.congested_fn = linear_congested;
mddev->queue->backing_dev_info.congested_data = mddev;
md_integrity_register(mddev);
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 818313e277e7..86ba66c0b28a 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -780,8 +780,7 @@ void md_super_write(mddev_t *mddev, mdk_rdev_t *rdev,
bio->bi_end_io = super_written;
atomic_inc(&mddev->pending_writes);
- submit_bio(REQ_WRITE | REQ_SYNC | REQ_UNPLUG | REQ_FLUSH | REQ_FUA,
- bio);
+ submit_bio(REQ_WRITE | REQ_SYNC | REQ_FLUSH | REQ_FUA, bio);
}
void md_super_wait(mddev_t *mddev)
@@ -809,7 +808,7 @@ int sync_page_io(mdk_rdev_t *rdev, sector_t sector, int size,
struct completion event;
int ret;
- rw |= REQ_SYNC | REQ_UNPLUG;
+ rw |= REQ_SYNC;
bio->bi_bdev = (metadata_op && rdev->meta_bdev) ?
rdev->meta_bdev : rdev->bdev;
@@ -4817,7 +4816,6 @@ static int do_md_stop(mddev_t * mddev, int mode, int is_open)
__md_stop_writes(mddev);
md_stop(mddev);
mddev->queue->merge_bvec_fn = NULL;
- mddev->queue->unplug_fn = NULL;
mddev->queue->backing_dev_info.congested_fn = NULL;
/* tell userspace to handle 'inactive' */
@@ -6692,8 +6690,6 @@ EXPORT_SYMBOL_GPL(md_allow_write);
void md_unplug(mddev_t *mddev)
{
- if (mddev->queue)
- blk_unplug(mddev->queue);
if (mddev->plug)
mddev->plug->unplug_fn(mddev->plug);
}
@@ -6876,7 +6872,6 @@ void md_do_sync(mddev_t *mddev)
>= mddev->resync_max - mddev->curr_resync_completed
)) {
/* time to update curr_resync_completed */
- md_unplug(mddev);
wait_event(mddev->recovery_wait,
atomic_read(&mddev->recovery_active) == 0);
mddev->curr_resync_completed = j;
@@ -6952,7 +6947,6 @@ void md_do_sync(mddev_t *mddev)
* about not overloading the IO subsystem. (things like an
* e2fsck being done on the RAID array should execute fast)
*/
- md_unplug(mddev);
cond_resched();
currspeed = ((unsigned long)(io_sectors-mddev->resync_mark_cnt))/2
@@ -6971,8 +6965,6 @@ void md_do_sync(mddev_t *mddev)
* this also signals 'finished resyncing' to md_stop
*/
out:
- md_unplug(mddev);
-
wait_event(mddev->recovery_wait, !atomic_read(&mddev->recovery_active));
/* tell personality that we are finished */
diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c
index 3a62d440e27b..5e694b151c30 100644
--- a/drivers/md/multipath.c
+++ b/drivers/md/multipath.c
@@ -106,36 +106,6 @@ static void multipath_end_request(struct bio *bio, int error)
rdev_dec_pending(rdev, conf->mddev);
}
-static void unplug_slaves(mddev_t *mddev)
-{
- multipath_conf_t *conf = mddev->private;
- int i;
-
- rcu_read_lock();
- for (i=0; i<mddev->raid_disks; i++) {
- mdk_rdev_t *rdev = rcu_dereference(conf->multipaths[i].rdev);
- if (rdev && !test_bit(Faulty, &rdev->flags)
- && atomic_read(&rdev->nr_pending)) {
- struct request_queue *r_queue = bdev_get_queue(rdev->bdev);
-
- atomic_inc(&rdev->nr_pending);
- rcu_read_unlock();
-
- blk_unplug(r_queue);
-
- rdev_dec_pending(rdev, mddev);
- rcu_read_lock();
- }
- }
- rcu_read_unlock();
-}
-
-static void multipath_unplug(struct request_queue *q)
-{
- unplug_slaves(q->queuedata);
-}
-
-
static int multipath_make_request(mddev_t *mddev, struct bio * bio)
{
multipath_conf_t *conf = mddev->private;
@@ -517,7 +487,6 @@ static int multipath_run (mddev_t *mddev)
*/
md_set_array_sectors(mddev, multipath_size(mddev, 0, 0));
- mddev->queue->unplug_fn = multipath_unplug;
mddev->queue->backing_dev_info.congested_fn = multipath_congested;
mddev->queue->backing_dev_info.congested_data = mddev;
md_integrity_register(mddev);
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index c0ac457f1218..95916fd6394a 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -25,21 +25,6 @@
#include "raid0.h"
#include "raid5.h"
-static void raid0_unplug(struct request_queue *q)
-{
- mddev_t *mddev = q->queuedata;
- raid0_conf_t *conf = mddev->private;
- mdk_rdev_t **devlist = conf->devlist;
- int raid_disks = conf->strip_zone[0].nb_dev;
- int i;
-
- for (i=0; i < raid_disks; i++) {
- struct request_queue *r_queue = bdev_get_queue(devlist[i]->bdev);
-
- blk_unplug(r_queue);
- }
-}
-
static int raid0_congested(void *data, int bits)
{
mddev_t *mddev = data;
@@ -272,7 +257,6 @@ static int create_strip_zones(mddev_t *mddev, raid0_conf_t **private_conf)
mdname(mddev),
(unsigned long long)smallest->sectors);
}
- mddev->queue->unplug_fn = raid0_unplug;
mddev->queue->backing_dev_info.congested_fn = raid0_congested;
mddev->queue->backing_dev_info.congested_data = mddev;
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 06cd712807d0..8f34ad5c478b 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -52,23 +52,16 @@
#define NR_RAID1_BIOS 256
-static void unplug_slaves(mddev_t *mddev);
-
static void allow_barrier(conf_t *conf);
static void lower_barrier(conf_t *conf);
static void * r1bio_pool_alloc(gfp_t gfp_flags, void *data)
{
struct pool_info *pi = data;
- r1bio_t *r1_bio;
int size = offsetof(r1bio_t, bios[pi->raid_disks]);
/* allocate a r1bio with room for raid_disks entries in the bios array */
- r1_bio = kzalloc(size, gfp_flags);
- if (!r1_bio && pi->mddev)
- unplug_slaves(pi->mddev);
-
- return r1_bio;
+ return kzalloc(size, gfp_flags);
}
static void r1bio_pool_free(void *r1_bio, void *data)
@@ -91,10 +84,8 @@ static void * r1buf_pool_alloc(gfp_t gfp_flags, void *data)
int i, j;
r1_bio = r1bio_pool_alloc(gfp_flags, pi);
- if (!r1_bio) {
- unplug_slaves(pi->mddev);
+ if (!r1_bio)
return NULL;
- }
/*
* Allocate bios : 1 for reading, n-1 for writing
@@ -520,37 +511,6 @@ static int read_balance(conf_t *conf, r1bio_t *r1_bio)
return new_disk;
}
-static void unplug_slaves(mddev_t *mddev)
-{
- conf_t *conf = mddev->private;
- int i;
-
- rcu_read_lock();
- for (i=0; i<mddev->raid_disks; i++) {
- mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev);
- if (rdev && !test_bit(Faulty, &rdev->flags) && atomic_read(&rdev->nr_pending)) {
- struct request_queue *r_queue = bdev_get_queue(rdev->bdev);
-
- atomic_inc(&rdev->nr_pending);
- rcu_read_unlock();
-
- blk_unplug(r_queue);
-
- rdev_dec_pending(rdev, mddev);
- rcu_read_lock();
- }
- }
- rcu_read_unlock();
-}
-
-static void raid1_unplug(struct request_queue *q)
-{
- mddev_t *mddev = q->queuedata;
-
- unplug_slaves(mddev);
- md_wakeup_thread(mddev->thread);
-}
-
static int raid1_congested(void *data, int bits)
{
mddev_t *mddev = data;
@@ -580,23 +540,16 @@ static int raid1_congested(void *data, int bits)
}
-static int flush_pending_writes(conf_t *conf)
+static void flush_pending_writes(conf_t *conf)
{
/* Any writes that have been queued but are awaiting
* bitmap updates get flushed here.
- * We return 1 if any requests were actually submitted.
*/
- int rv = 0;
-
spin_lock_irq(&conf->device_lock);
if (conf->pending_bio_list.head) {
struct bio *bio;
bio = bio_list_get(&conf->pending_bio_list);
- /* Only take the spinlock to quiet a warning */
- spin_lock(conf->mddev->queue->queue_lock);
- blk_remove_plug(conf->mddev->queue);
- spin_unlock(conf->mddev->queue->queue_lock);
spin_unlock_irq(&conf->device_lock);
/* flush any pending bitmap writes to
* disk before proceeding w/ I/O */
@@ -608,10 +561,14 @@ static int flush_pending_writes(conf_t *conf)
generic_make_request(bio);
bio = next;
}
- rv = 1;
} else
spin_unlock_irq(&conf->device_lock);
- return rv;
+}
+
+static void md_kick_device(mddev_t *mddev)
+{
+ blk_flush_plug(current);
+ md_wakeup_thread(mddev->thread);
}
/* Barriers....
@@ -643,8 +600,7 @@ static void raise_barrier(conf_t *conf)
/* Wait until no block IO is waiting */
wait_event_lock_irq(conf->wait_barrier, !conf->nr_waiting,
- conf->resync_lock,
- raid1_unplug(conf->mddev->queue));
+ conf->resync_lock, md_kick_device(conf->mddev));
/* block any new IO from starting */
conf->barrier++;
@@ -652,8 +608,7 @@ static void raise_barrier(conf_t *conf)
/* Now wait for all pending IO to complete */
wait_event_lock_irq(conf->wait_barrier,
!conf->nr_pending && conf->barrier < RESYNC_DEPTH,
- conf->resync_lock,
- raid1_unplug(conf->mddev->queue));
+ conf->resync_lock, md_kick_device(conf->mddev));
spin_unlock_irq(&conf->resync_lock);
}
@@ -675,7 +630,7 @@ static void wait_barrier(conf_t *conf)
conf->nr_waiting++;
wait_event_lock_irq(conf->wait_barrier, !conf->barrier,
conf->resync_lock,
- raid1_unplug(conf->mddev->queue));
+ md_kick_device(conf->mddev));
conf->nr_waiting--;
}
conf->nr_pending++;
@@ -712,7 +667,7 @@ static void freeze_array(conf_t *conf)
conf->nr_pending == conf->nr_queued+1,
conf->resync_lock,
({ flush_pending_writes(conf);
- raid1_unplug(conf->mddev->queue); }));
+ md_kick_device(conf->mddev); }));
spin_unlock_irq(&conf->resync_lock);
}
static void unfreeze_array(conf_t *conf)
@@ -962,7 +917,6 @@ static int make_request(mddev_t *mddev, struct bio * bio)
atomic_inc(&r1_bio->remaining);
spin_lock_irqsave(&conf->device_lock, flags);
bio_list_add(&conf->pending_bio_list, mbio);
- blk_plug_device_unlocked(mddev->queue);
spin_unlock_irqrestore(&conf->device_lock, flags);
}
r1_bio_write_done(r1_bio, bio->bi_vcnt, behind_pages, behind_pages != NULL);
@@ -971,7 +925,7 @@ static int make_request(mddev_t *mddev, struct bio * bio)
/* In case raid1d snuck in to freeze_array */
wake_up(&conf->wait_barrier);
- if (do_sync)
+ if (do_sync || !bitmap)
md_wakeup_thread(mddev->thread);
return 0;
@@ -1561,7 +1515,6 @@ static void raid1d(mddev_t *mddev)
unsigned long flags;
conf_t *conf = mddev->private;
struct list_head *head = &conf->retry_list;
- int unplug=0;
mdk_rdev_t *rdev;
md_check_recovery(mddev);
@@ -1569,7 +1522,7 @@ static void raid1d(mddev_t *mddev)
for (;;) {
char b[BDEVNAME_SIZE];
- unplug += flush_pending_writes(conf);
+ flush_pending_writes(conf);
spin_lock_irqsave(&conf->device_lock, flags);
if (list_empty(head)) {
@@ -1583,10 +1536,9 @@ static void raid1d(mddev_t *mddev)
mddev = r1_bio->mddev;
conf = mddev->private;
- if (test_bit(R1BIO_IsSync, &r1_bio->state)) {
+ if (test_bit(R1BIO_IsSync, &r1_bio->state))
sync_request_write(mddev, r1_bio);
- unplug = 1;
- } else {
+ else {
int disk;
/* we got a read error. Maybe the drive is bad. Maybe just
@@ -1636,14 +1588,11 @@ static void raid1d(mddev_t *mddev)
bio->bi_end_io = raid1_end_read_request;
bio->bi_rw = READ | do_sync;
bio->bi_private = r1_bio;
- unplug = 1;
generic_make_request(bio);
}
}
cond_resched();
}
- if (unplug)
- unplug_slaves(mddev);
}
@@ -2066,7 +2015,6 @@ static int run(mddev_t *mddev)
md_set_array_sectors(mddev, raid1_size(mddev, 0, 0));
- mddev->queue->unplug_fn = raid1_unplug;
mddev->queue->backing_dev_info.congested_fn = raid1_congested;
mddev->queue->backing_dev_info.congested_data = mddev;
md_integrity_register(mddev);
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 747d061d8e05..c0d0f5f7e407 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -57,23 +57,16 @@
*/
#define NR_RAID10_BIOS 256
-static void unplug_slaves(mddev_t *mddev);
-
static void allow_barrier(conf_t *conf);
static void lower_barrier(conf_t *conf);
static void * r10bio_pool_alloc(gfp_t gfp_flags, void *data)
{
conf_t *conf = data;
- r10bio_t *r10_bio;
int size = offsetof(struct r10bio_s, devs[conf->copies]);
/* allocate a r10bio with room for raid_disks entries in the bios array */
- r10_bio = kzalloc(size, gfp_flags);
- if (!r10_bio && conf->mddev)
- unplug_slaves(conf->mddev);
-
- return r10_bio;
+ return kzalloc(size, gfp_flags);
}
static void r10bio_pool_free(void *r10_bio, void *data)
@@ -106,10 +99,8 @@ static void * r10buf_pool_alloc(gfp_t gfp_flags, void *data)
int nalloc;
r10_bio = r10bio_pool_alloc(gfp_flags, conf);
- if (!r10_bio) {
- unplug_slaves(conf->mddev);
+ if (!r10_bio)
return NULL;
- }
if (test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery))
nalloc = conf->copies; /* resync */
@@ -597,37 +588,6 @@ rb_out:
return disk;
}
-static void unplug_slaves(mddev_t *mddev)
-{
- conf_t *conf = mddev->private;
- int i;
-
- rcu_read_lock();
- for (i=0; i < conf->raid_disks; i++) {
- mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev);
- if (rdev && !test_bit(Faulty, &rdev->flags) && atomic_read(&rdev->nr_pending)) {
- struct request_queue *r_queue = bdev_get_queue(rdev->bdev);
-
- atomic_inc(&rdev->nr_pending);
- rcu_read_unlock();
-
- blk_unplug(r_queue);
-
- rdev_dec_pending(rdev, mddev);
- rcu_read_lock();
- }
- }
- rcu_read_unlock();
-}
-
-static void raid10_unplug(struct request_queue *q)
-{
- mddev_t *mddev = q->queuedata;
-
- unplug_slaves(q->queuedata);
- md_wakeup_thread(mddev->thread);
-}
-
static int raid10_congested(void *data, int bits)
{
mddev_t *mddev = data;
@@ -649,23 +609,16 @@ static int raid10_congested(void *data, int bits)
return ret;
}
-static int flush_pending_writes(conf_t *conf)
+static void flush_pending_writes(conf_t *conf)
{
/* Any writes that have been queued but are awaiting
* bitmap updates get flushed here.
- * We return 1 if any requests were actually submitted.
*/
- int rv = 0;
-
spin_lock_irq(&conf->device_lock);
if (conf->pending_bio_list.head) {
struct bio *bio;
bio = bio_list_get(&conf->pending_bio_list);
- /* Spinlock only taken to quiet a warning */
- spin_lock(conf->mddev->queue->queue_lock);
- blk_remove_plug(conf->mddev->queue);
- spin_unlock(conf->mddev->queue->queue_lock);
spin_unlock_irq(&conf->device_lock);
/* flush any pending bitmap writes to disk
* before proceeding w/ I/O */
@@ -677,11 +630,16 @@ static int flush_pending_writes(conf_t *conf)
generic_make_request(bio);
bio = next;
}
- rv = 1;
} else
spin_unlock_irq(&conf->device_lock);
- return rv;
}
+
+static void md_kick_device(mddev_t *mddev)
+{
+ blk_flush_plug(current);
+ md_wakeup_thread(mddev->thread);
+}
+
/* Barriers....
* Sometimes we need to suspend IO while we do something else,
* either some resync/recovery, or reconfigure the array.
@@ -711,8 +669,7 @@ static void raise_barrier(conf_t *conf, int force)
/* Wait until no block IO is waiting (unless 'force') */
wait_event_lock_irq(conf->wait_barrier, force || !conf->nr_waiting,
- conf->resync_lock,
- raid10_unplug(conf->mddev->queue));
+ conf->resync_lock, md_kick_device(conf->mddev));
/* block any new IO from starting */
conf->barrier++;
@@ -720,8 +677,7 @@ static void raise_barrier(conf_t *conf, int force)
/* No wait for all pending IO to complete */
wait_event_lock_irq(conf->wait_barrier,
!conf->nr_pending && conf->barrier < RESYNC_DEPTH,
- conf->resync_lock,
- raid10_unplug(conf->mddev->queue));
+ conf->resync_lock, md_kick_device(conf->mddev));
spin_unlock_irq(&conf->resync_lock);
}
@@ -742,7 +698,7 @@ static void wait_barrier(conf_t *conf)
conf->nr_waiting++;
wait_event_lock_irq(conf->wait_barrier, !conf->barrier,
conf->resync_lock,
- raid10_unplug(conf->mddev->queue));
+ md_kick_device(conf->mddev));
conf->nr_waiting--;
}
conf->nr_pending++;
@@ -779,7 +735,7 @@ static void freeze_array(conf_t *conf)
conf->nr_pending == conf->nr_queued+1,
conf->resync_lock,
({ flush_pending_writes(conf);
- raid10_unplug(conf->mddev->queue); }));
+ md_kick_device(conf->mddev); }));
spin_unlock_irq(&conf->resync_lock);
}
@@ -974,7 +930,6 @@ static int make_request(mddev_t *mddev, struct bio * bio)
atomic_inc(&r10_bio->remaining);
spin_lock_irqsave(&conf->device_lock, flags);
bio_list_add(&conf->pending_bio_list, mbio);
- blk_plug_device_unlocked(mddev->queue);
spin_unlock_irqrestore(&conf->device_lock, flags);
}
@@ -991,7 +946,7 @@ static int make_request(mddev_t *mddev, struct bio * bio)
/* In case raid10d snuck in to freeze_array */
wake_up(&conf->wait_barrier);
- if (do_sync)
+ if (do_sync || !mddev->bitmap)
md_wakeup_thread(mddev->thread);
return 0;
@@ -1684,7 +1639,6 @@ static void raid10d(mddev_t *mddev)
unsigned long flags;
conf_t *conf = mddev->private;
struct list_head *head = &conf->retry_list;
- int unplug=0;
mdk_rdev_t *rdev;
md_check_recovery(mddev);
@@ -1692,7 +1646,7 @@ static void raid10d(mddev_t *mddev)
for (;;) {
char b[BDEVNAME_SIZE];
- unplug += flush_pending_writes(conf);
+ flush_pending_writes(conf);
spin_lock_irqsave(&conf->device_lock, flags);
if (list_empty(head)) {
@@ -1706,13 +1660,11 @@ static void raid10d(mddev_t *mddev)
mddev = r10_bio->mddev;
conf = mddev->private;
- if (test_bit(R10BIO_IsSync, &r10_bio->state)) {
+ if (test_bit(R10BIO_IsSync, &r10_bio->state))
sync_request_write(mddev, r10_bio);
- unplug = 1;
- } else if (test_bit(R10BIO_IsRecover, &r10_bio->state)) {
+ else if (test_bit(R10BIO_IsRecover, &r10_bio->state))
recovery_request_write(mddev, r10_bio);
- unplug = 1;
- } else {
+ else {
int mirror;
/* we got a read error. Maybe the drive is bad. Maybe just
* the block and we can fix it.
@@ -1759,14 +1711,11 @@ static void raid10d(mddev_t *mddev)
bio->bi_rw = READ | do_sync;
bio->bi_private = r10_bio;
bio->bi_end_io = raid10_end_read_request;
- unplug = 1;
generic_make_request(bio);
}
}
cond_resched();
}
- if (unplug)
- unplug_slaves(mddev);
}
@@ -2377,7 +2326,6 @@ static int run(mddev_t *mddev)
md_set_array_sectors(mddev, size);
mddev->resync_max_sectors = size;
- mddev->queue->unplug_fn = raid10_unplug;
mddev->queue->backing_dev_info.congested_fn = raid10_congested;
mddev->queue->backing_dev_info.congested_data = mddev;
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 78536fdbd87f..e867ee42b152 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -433,8 +433,6 @@ static int has_failed(raid5_conf_t *conf)
return 0;
}
-static void unplug_slaves(mddev_t *mddev);
-
static struct stripe_head *
get_active_stripe(raid5_conf_t *conf, sector_t sector,
int previous, int noblock, int noquiesce)
@@ -463,8 +461,7 @@ get_active_stripe(raid5_conf_t *conf, sector_t sector,
< (conf->max_nr_stripes *3/4)
|| !conf->inactive_blocked),
conf->device_lock,
- md_raid5_unplug_device(conf)
- );
+ md_raid5_kick_device(conf));
conf->inactive_blocked = 0;
} else
init_stripe(sh, sector, previous);
@@ -1473,8 +1470,7 @@ static int resize_stripes(raid5_conf_t *conf, int newsize)
wait_event_lock_irq(conf->wait_for_stripe,
!list_empty(&conf->inactive_list),
conf->device_lock,
- unplug_slaves(conf->mddev)
- );
+ blk_flush_plug(current));
osh = get_free_stripe(conf);
spin_unlock_irq(&conf->device_lock);
atomic_set(&nsh->count, 1);
@@ -3645,58 +3641,19 @@ static void activate_bit_delay(raid5_conf_t *conf)
}
}
-static void unplug_slaves(mddev_t *mddev)
+void md_raid5_kick_device(raid5_conf_t *conf)
{
- raid5_conf_t *conf = mddev->private;
- int i;
- int devs = max(conf->raid_disks, conf->previous_raid_disks);
-
- rcu_read_lock();
- for (i = 0; i < devs; i++) {
- mdk_rdev_t *rdev = rcu_dereference(conf->disks[i].rdev);
- if (rdev && !test_bit(Faulty, &rdev->flags) && atomic_read(&rdev->nr_pending)) {
- struct request_queue *r_queue = bdev_get_queue(rdev->bdev);
-
- atomic_inc(&rdev->nr_pending);
- rcu_read_unlock();
-
- blk_unplug(r_queue);
-
- rdev_dec_pending(rdev, mddev);
- rcu_read_lock();
- }
- }
- rcu_read_unlock();
-}
-
-void md_raid5_unplug_device(raid5_conf_t *conf)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&conf->device_lock, flags);
-
- if (plugger_remove_plug(&conf->plug)) {
- conf->seq_flush++;
- raid5_activate_delayed(conf);
- }
+ blk_flush_plug(current);
+ raid5_activate_delayed(conf);
md_wakeup_thread(conf->mddev->thread);
-
- spin_unlock_irqrestore(&conf->device_lock, flags);
-
- unplug_slaves(conf->mddev);
}
-EXPORT_SYMBOL_GPL(md_raid5_unplug_device);
+EXPORT_SYMBOL_GPL(md_raid5_kick_device);
static void raid5_unplug(struct plug_handle *plug)
{
raid5_conf_t *conf = container_of(plug, raid5_conf_t, plug);
- md_raid5_unplug_device(conf);
-}
-static void raid5_unplug_queue(struct request_queue *q)
-{
- mddev_t *mddev = q->queuedata;
- md_raid5_unplug_device(mddev->private);
+ md_raid5_kick_device(conf);
}
int md_raid5_congested(mddev_t *mddev, int bits)
@@ -4100,7 +4057,7 @@ static int make_request(mddev_t *mddev, struct bio * bi)
* add failed due to overlap. Flush everything
* and wait a while
*/
- md_raid5_unplug_device(conf);
+ md_raid5_kick_device(conf);
release_stripe(sh);
schedule();
goto retry;
@@ -4365,7 +4322,6 @@ static inline sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *ski
if (sector_nr >= max_sector) {
/* just being told to finish up .. nothing much to do */
- unplug_slaves(mddev);
if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) {
end_reshape(conf);
@@ -4569,7 +4525,6 @@ static void raid5d(mddev_t *mddev)
spin_unlock_irq(&conf->device_lock);
async_tx_issue_pending_all();
- unplug_slaves(mddev);
pr_debug("--- raid5d inactive\n");
}
@@ -5204,7 +5159,7 @@ static int run(mddev_t *mddev)
mddev->queue->backing_dev_info.congested_data = mddev;
mddev->queue->backing_dev_info.congested_fn = raid5_congested;
- mddev->queue->unplug_fn = raid5_unplug_queue;
+ mddev->queue->queue_lock = &conf->device_lock;
chunk_size = mddev->chunk_sectors << 9;
blk_queue_io_min(mddev->queue, chunk_size);
diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h
index 2ace0582b409..8d563a4f022a 100644
--- a/drivers/md/raid5.h
+++ b/drivers/md/raid5.h
@@ -503,6 +503,6 @@ static inline int algorithm_is_DDF(int layout)
}
extern int md_raid5_congested(mddev_t *mddev, int bits);
-extern void md_raid5_unplug_device(raid5_conf_t *conf);
+extern void md_raid5_kick_device(raid5_conf_t *conf);
extern int raid5_set_cache_size(mddev_t *mddev, int size);
#endif
diff --git a/drivers/message/i2o/i2o_block.c b/drivers/message/i2o/i2o_block.c
index f81c25d4a125..47ec5bc0ed21 100644
--- a/drivers/message/i2o/i2o_block.c
+++ b/drivers/message/i2o/i2o_block.c
@@ -897,11 +897,7 @@ static void i2o_block_request_fn(struct request_queue *q)
{
struct request *req;
- while (!blk_queue_plugged(q)) {
- req = blk_peek_request(q);
- if (!req)
- break;
-
+ while ((req = blk_peek_request(q)) != NULL) {
if (req->cmd_type == REQ_TYPE_FS) {
struct i2o_block_delayed_request *dreq;
struct i2o_block_request *ireq = req->special;
diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c
index 4e42d030e097..2ae727568df9 100644
--- a/drivers/mmc/card/queue.c
+++ b/drivers/mmc/card/queue.c
@@ -55,8 +55,7 @@ static int mmc_queue_thread(void *d)
spin_lock_irq(q->queue_lock);
set_current_state(TASK_INTERRUPTIBLE);
- if (!blk_queue_plugged(q))
- req = blk_fetch_request(q);
+ req = blk_fetch_request(q);
mq->req = req;
spin_unlock_irq(q->queue_lock);
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index 794bfd962266..4d2df2f76ea0 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -1917,7 +1917,7 @@ static void __dasd_process_request_queue(struct dasd_block *block)
return;
}
/* Now we try to fetch requests from the request queue */
- while (!blk_queue_plugged(queue) && (req = blk_peek_request(queue))) {
+ while ((req = blk_peek_request(queue))) {
if (basedev->features & DASD_FEATURE_READONLY &&
rq_data_dir(req) == WRITE) {
DBF_DEV_EVENT(DBF_ERR, basedev,
diff --git a/drivers/s390/char/tape_block.c b/drivers/s390/char/tape_block.c
index ad8431636012..83cea9a55e2f 100644
--- a/drivers/s390/char/tape_block.c
+++ b/drivers/s390/char/tape_block.c
@@ -161,7 +161,6 @@ tapeblock_requeue(struct work_struct *work) {
spin_lock_irq(&device->blk_data.request_queue_lock);
while (
- !blk_queue_plugged(queue) &&
blk_peek_request(queue) &&
nr_queued < TAPEBLOCK_MIN_REQUEUE
) {
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index fb2bb35c62cb..bf80a4c5a481 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -67,6 +67,13 @@ static struct scsi_host_sg_pool scsi_sg_pools[] = {
struct kmem_cache *scsi_sdb_cache;
+/*
+ * When to reinvoke queueing after a resource shortage. It's 3 msecs to
+ * not change behaviour from the previous unplug mechanism, experimentation
+ * may prove this needs changing.
+ */
+#define SCSI_QUEUE_DELAY 3
+
static void scsi_run_queue(struct request_queue *q);
/*
@@ -149,14 +156,7 @@ static int __scsi_queue_insert(struct scsi_cmnd *cmd, int reason, int unbusy)
/*
* Requeue this command. It will go before all other commands
* that are already in the queue.
- *
- * NOTE: there is magic here about the way the queue is plugged if
- * we have no outstanding commands.
- *
- * Although we *don't* plug the queue, we call the request
- * function. The SCSI request function detects the blocked condition
- * and plugs the queue appropriately.
- */
+ */
spin_lock_irqsave(q->queue_lock, flags);
blk_requeue_request(q, cmd->request);
spin_unlock_irqrestore(q->queue_lock, flags);
@@ -1194,11 +1194,11 @@ int scsi_prep_return(struct request_queue *q, struct request *req, int ret)
case BLKPREP_DEFER:
/*
* If we defer, the blk_peek_request() returns NULL, but the
- * queue must be restarted, so we plug here if no returning
- * command will automatically do that.
+ * queue must be restarted, so we schedule a callback to happen
+ * shortly.
*/
if (sdev->device_busy == 0)
- blk_plug_device(q);
+ blk_delay_queue(q, SCSI_QUEUE_DELAY);
break;
default:
req->cmd_flags |= REQ_DONTPREP;
@@ -1237,7 +1237,7 @@ static inline int scsi_dev_queue_ready(struct request_queue *q,
sdev_printk(KERN_INFO, sdev,
"unblocking device at zero depth\n"));
} else {
- blk_plug_device(q);
+ blk_delay_queue(q, SCSI_QUEUE_DELAY);
return 0;
}
}
@@ -1467,7 +1467,7 @@ static void scsi_request_fn(struct request_queue *q)
* the host is no longer able to accept any more requests.
*/
shost = sdev->host;
- while (!blk_queue_plugged(q)) {
+ for (;;) {
int rtn;
/*
* get next queueable request. We do this early to make sure
@@ -1546,15 +1546,8 @@ static void scsi_request_fn(struct request_queue *q)
*/
rtn = scsi_dispatch_cmd(cmd);
spin_lock_irq(q->queue_lock);
- if(rtn) {
- /* we're refusing the command; because of
- * the way locks get dropped, we need to
- * check here if plugging is required */
- if(sdev->device_busy == 0)
- blk_plug_device(q);
-
- break;
- }
+ if (rtn)
+ goto out_delay;
}
goto out;
@@ -1573,9 +1566,10 @@ static void scsi_request_fn(struct request_queue *q)
spin_lock_irq(q->queue_lock);
blk_requeue_request(q, req);
sdev->device_busy--;
- if(sdev->device_busy == 0)
- blk_plug_device(q);
- out:
+out_delay:
+ if (sdev->device_busy == 0)
+ blk_delay_queue(q, SCSI_QUEUE_DELAY);
+out:
/* must be careful here...if we trigger the ->remove() function
* we cannot be holding the q lock */
spin_unlock_irq(q->queue_lock);
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
index 5c3ccfc6b622..2941d2d92c94 100644
--- a/drivers/scsi/scsi_transport_fc.c
+++ b/drivers/scsi/scsi_transport_fc.c
@@ -3913,7 +3913,7 @@ fc_bsg_request_handler(struct request_queue *q, struct Scsi_Host *shost,
if (!get_device(dev))
return;
- while (!blk_queue_plugged(q)) {
+ while (1) {
if (rport && (rport->port_state == FC_PORTSTATE_BLOCKED) &&
!(rport->flags & FC_RPORT_FAST_FAIL_TIMEDOUT))
break;
diff --git a/drivers/scsi/scsi_transport_sas.c b/drivers/scsi/scsi_transport_sas.c
index 927e99cb7225..c6fcf76cade5 100644
--- a/drivers/scsi/scsi_transport_sas.c
+++ b/drivers/scsi/scsi_transport_sas.c
@@ -173,11 +173,7 @@ static void sas_smp_request(struct request_queue *q, struct Scsi_Host *shost,
int ret;
int (*handler)(struct Scsi_Host *, struct sas_rphy *, struct request *);
- while (!blk_queue_plugged(q)) {
- req = blk_fetch_request(q);
- if (!req)
- break;
-
+ while ((req = blk_fetch_request(q)) != NULL) {
spin_unlock_irq(q->queue_lock);
handler = to_sas_internal(shost->transportt)->f->smp_handler;
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c
index 67f0c09983c8..c1b539d7b0d3 100644
--- a/drivers/target/target_core_iblock.c
+++ b/drivers/target/target_core_iblock.c
@@ -392,9 +392,8 @@ static int iblock_do_task(struct se_task *task)
{
struct se_device *dev = task->task_se_cmd->se_dev;
struct iblock_req *req = IBLOCK_REQ(task);
- struct iblock_dev *ibd = (struct iblock_dev *)req->ib_dev;
- struct request_queue *q = bdev_get_queue(ibd->ibd_bd);
struct bio *bio = req->ib_bio, *nbio = NULL;
+ struct blk_plug plug;
int rw;
if (task->task_data_direction == DMA_TO_DEVICE) {
@@ -412,6 +411,7 @@ static int iblock_do_task(struct se_task *task)
rw = READ;
}
+ blk_start_plug(&plug);
while (bio) {
nbio = bio->bi_next;
bio->bi_next = NULL;
@@ -421,9 +421,8 @@ static int iblock_do_task(struct se_task *task)
submit_bio(rw, bio);
bio = nbio;
}
+ blk_finish_plug(&plug);
- if (q->unplug_fn)
- q->unplug_fn(q);
return PYX_TRANSPORT_SENT_TO_TRANSPORT;
}
diff --git a/fs/adfs/inode.c b/fs/adfs/inode.c
index 65794b8fe79e..1cc84b276131 100644
--- a/fs/adfs/inode.c
+++ b/fs/adfs/inode.c
@@ -73,7 +73,6 @@ static sector_t _adfs_bmap(struct address_space *mapping, sector_t block)
static const struct address_space_operations adfs_aops = {
.readpage = adfs_readpage,
.writepage = adfs_writepage,
- .sync_page = block_sync_page,
.write_begin = adfs_write_begin,
.write_end = generic_write_end,
.bmap = _adfs_bmap
diff --git a/fs/affs/file.c b/fs/affs/file.c
index 0a90dcd46de2..acf321b70fcd 100644
--- a/fs/affs/file.c
+++ b/fs/affs/file.c
@@ -429,7 +429,6 @@ static sector_t _affs_bmap(struct address_space *mapping, sector_t block)
const struct address_space_operations affs_aops = {
.readpage = affs_readpage,
.writepage = affs_writepage,
- .sync_page = block_sync_page,
.write_begin = affs_write_begin,
.write_end = generic_write_end,
.bmap = _affs_bmap
@@ -786,7 +785,6 @@ out:
const struct address_space_operations affs_aops_ofs = {
.readpage = affs_readpage_ofs,
//.writepage = affs_writepage_ofs,
- //.sync_page = affs_sync_page_ofs,
.write_begin = affs_write_begin_ofs,
.write_end = affs_write_end_ofs
};
diff --git a/fs/aio.c b/fs/aio.c
index 26869cde3953..a936b7fe4f69 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -34,8 +34,6 @@
#include <linux/security.h>
#include <linux/eventfd.h>
#include <linux/blkdev.h>
-#include <linux/mempool.h>
-#include <linux/hash.h>
#include <linux/compat.h>
#include <asm/kmap_types.h>
@@ -65,14 +63,6 @@ static DECLARE_WORK(fput_work, aio_fput_routine);
static DEFINE_SPINLOCK(fput_lock);
static LIST_HEAD(fput_head);
-#define AIO_BATCH_HASH_BITS 3 /* allocated on-stack, so don't go crazy */
-#define AIO_BATCH_HASH_SIZE (1 << AIO_BATCH_HASH_BITS)
-struct aio_batch_entry {
- struct hlist_node list;
- struct address_space *mapping;
-};
-mempool_t *abe_pool;
-
static void aio_kick_handler(struct work_struct *);
static void aio_queue_work(struct kioctx *);
@@ -86,8 +76,7 @@ static int __init aio_setup(void)
kioctx_cachep = KMEM_CACHE(kioctx,SLAB_HWCACHE_ALIGN|SLAB_PANIC);
aio_wq = create_workqueue("aio");
- abe_pool = mempool_create_kmalloc_pool(1, sizeof(struct aio_batch_entry));
- BUG_ON(!aio_wq || !abe_pool);
+ BUG_ON(!aio_wq);
pr_debug("aio_setup: sizeof(struct page) = %d\n", (int)sizeof(struct page));
@@ -1525,57 +1514,8 @@ static ssize_t aio_setup_iocb(struct kiocb *kiocb, bool compat)
return 0;
}
-static void aio_batch_add(struct address_space *mapping,
- struct hlist_head *batch_hash)
-{
- struct aio_batch_entry *abe;
- struct hlist_node *pos;
- unsigned bucket;
-
- bucket = hash_ptr(mapping, AIO_BATCH_HASH_BITS);
- hlist_for_each_entry(abe, pos, &batch_hash[bucket], list) {
- if (abe->mapping == mapping)
- return;
- }
-
- abe = mempool_alloc(abe_pool, GFP_KERNEL);
-
- /*
- * we should be using igrab here, but
- * we don't want to hammer on the global
- * inode spinlock just to take an extra
- * reference on a file that we must already
- * have a reference to.
- *
- * When we're called, we always have a reference
- * on the file, so we must always have a reference
- * on the inode, so ihold() is safe here.
- */
- ihold(mapping->host);
- abe->mapping = mapping;
- hlist_add_head(&abe->list, &batch_hash[bucket]);
- return;
-}
-
-static void aio_batch_free(struct hlist_head *batch_hash)
-{
- struct aio_batch_entry *abe;
- struct hlist_node *pos, *n;
- int i;
-
- for (i = 0; i < AIO_BATCH_HASH_SIZE; i++) {
- hlist_for_each_entry_safe(abe, pos, n, &batch_hash[i], list) {
- blk_run_address_space(abe->mapping);
- iput(abe->mapping->host);
- hlist_del(&abe->list);
- mempool_free(abe, abe_pool);
- }
- }
-}
-
static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
- struct iocb *iocb, struct hlist_head *batch_hash,
- bool compat)
+ struct iocb *iocb, bool compat)
{
struct kiocb *req;
struct file *file;
@@ -1666,11 +1606,6 @@ static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
;
}
spin_unlock_irq(&ctx->ctx_lock);
- if (req->ki_opcode == IOCB_CMD_PREAD ||
- req->ki_opcode == IOCB_CMD_PREADV ||
- req->ki_opcode == IOCB_CMD_PWRITE ||
- req->ki_opcode == IOCB_CMD_PWRITEV)
- aio_batch_add(file->f_mapping, batch_hash);
aio_put_req(req); /* drop extra ref to req */
return 0;
@@ -1687,7 +1622,7 @@ long do_io_submit(aio_context_t ctx_id, long nr,
struct kioctx *ctx;
long ret = 0;
int i;
- struct hlist_head batch_hash[AIO_BATCH_HASH_SIZE] = { { 0, }, };
+ struct blk_plug plug;
if (unlikely(nr < 0))
return -EINVAL;
@@ -1704,6 +1639,8 @@ long do_io_submit(aio_context_t ctx_id, long nr,
return -EINVAL;
}
+ blk_start_plug(&plug);
+
/*
* AKPM: should this return a partial result if some of the IOs were
* successfully submitted?
@@ -1722,11 +1659,11 @@ long do_io_submit(aio_context_t ctx_id, long nr,
break;
}
- ret = io_submit_one(ctx, user_iocb, &tmp, batch_hash, compat);
+ ret = io_submit_one(ctx, user_iocb, &tmp, compat);
if (ret)
break;
}
- aio_batch_free(batch_hash);
+ blk_finish_plug(&plug);
put_ioctx(ctx);
return i ? i : ret;
diff --git a/fs/befs/linuxvfs.c b/fs/befs/linuxvfs.c
index b1d0c794747b..06457ed8f3e7 100644
--- a/fs/befs/linuxvfs.c
+++ b/fs/befs/linuxvfs.c
@@ -75,7 +75,6 @@ static const struct inode_operations befs_dir_inode_operations = {
static const struct address_space_operations befs_aops = {
.readpage = befs_readpage,
- .sync_page = block_sync_page,
.bmap = befs_bmap,
};
diff --git a/fs/bfs/file.c b/fs/bfs/file.c
index eb67edd0f8ea..f20e8a71062f 100644
--- a/fs/bfs/file.c
+++ b/fs/bfs/file.c
@@ -186,7 +186,6 @@ static sector_t bfs_bmap(struct address_space *mapping, sector_t block)
const struct address_space_operations bfs_aops = {
.readpage = bfs_readpage,
.writepage = bfs_writepage,
- .sync_page = block_sync_page,
.write_begin = bfs_write_begin,
.write_end = generic_write_end,
.bmap = bfs_bmap,
diff --git a/fs/block_dev.c b/fs/block_dev.c
index d42cad2757ae..fbe05cbdd692 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -1529,7 +1529,6 @@ static int blkdev_releasepage(struct page *page, gfp_t wait)
static const struct address_space_operations def_blk_aops = {
.readpage = blkdev_readpage,
.writepage = blkdev_writepage,
- .sync_page = block_sync_page,
.write_begin = blkdev_write_begin,
.write_end = blkdev_write_end,
.writepages = generic_writepages,
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index e1aa8d607bc7..ada1f6bd0a57 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -847,7 +847,6 @@ static const struct address_space_operations btree_aops = {
.writepages = btree_writepages,
.releasepage = btree_releasepage,
.invalidatepage = btree_invalidatepage,
- .sync_page = block_sync_page,
#ifdef CONFIG_MIGRATION
.migratepage = btree_migratepage,
#endif
@@ -1331,82 +1330,6 @@ static int btrfs_congested_fn(void *congested_data, int bdi_bits)
}
/*
- * this unplugs every device on the box, and it is only used when page
- * is null
- */
-static void __unplug_io_fn(struct backing_dev_info *bdi, struct page *page)
-{
- struct btrfs_device *device;
- struct btrfs_fs_info *info;
-
- info = (struct btrfs_fs_info *)bdi->unplug_io_data;
- list_for_each_entry(device, &info->fs_devices->devices, dev_list) {
- if (!device->bdev)
- continue;
-
- bdi = blk_get_backing_dev_info(device->bdev);
- if (bdi->unplug_io_fn)
- bdi->unplug_io_fn(bdi, page);
- }
-}
-
-static void btrfs_unplug_io_fn(struct backing_dev_info *bdi, struct page *page)
-{
- struct inode *inode;
- struct extent_map_tree *em_tree;
- struct extent_map *em;
- struct address_space *mapping;
- u64 offset;
-
- /* the generic O_DIRECT read code does this */
- if (1 || !page) {
- __unplug_io_fn(bdi, page);
- return;
- }
-
- /*
- * page->mapping may change at any time. Get a consistent copy
- * and use that for everything below
- */
- smp_mb();
- mapping = page->mapping;
- if (!mapping)
- return;
-
- inode = mapping->host;
-
- /*
- * don't do the expensive searching for a small number of
- * devices
- */
- if (BTRFS_I(inode)->root->fs_info->fs_devices->open_devices <= 2) {
- __unplug_io_fn(bdi, page);
- return;
- }
-
- offset = page_offset(page);
-
- em_tree = &BTRFS_I(inode)->extent_tree;
- read_lock(&em_tree->lock);
- em = lookup_extent_mapping(em_tree, offset, PAGE_CACHE_SIZE);
- read_unlock(&em_tree->lock);
- if (!em) {
- __unplug_io_fn(bdi, page);
- return;
- }
-
- if (em->block_start >= EXTENT_MAP_LAST_BYTE) {
- free_extent_map(em);
- __unplug_io_fn(bdi, page);
- return;
- }
- offset = offset - em->start;
- btrfs_unplug_page(&BTRFS_I(inode)->root->fs_info->mapping_tree,
- em->block_start + offset, page);
- free_extent_map(em);
-}
-
-/*
* If this fails, caller must call bdi_destroy() to get rid of the
* bdi again.
*/
@@ -1420,8 +1343,6 @@ static int setup_bdi(struct btrfs_fs_info *info, struct backing_dev_info *bdi)
return err;
bdi->ra_pages = default_backing_dev_info.ra_pages;
- bdi->unplug_io_fn = btrfs_unplug_io_fn;
- bdi->unplug_io_data = info;
bdi->congested_fn = btrfs_congested_fn;
bdi->congested_data = info;
return 0;
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index fd3f172e94e6..00497d551e51 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -2188,7 +2188,7 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc,
unsigned long nr_written = 0;
if (wbc->sync_mode == WB_SYNC_ALL)
- write_flags = WRITE_SYNC_PLUG;
+ write_flags = WRITE_SYNC;
else
write_flags = WRITE;
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 0efdb65953c5..02438c917923 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -7340,7 +7340,6 @@ static const struct address_space_operations btrfs_aops = {
.writepage = btrfs_writepage,
.writepages = btrfs_writepages,
.readpages = btrfs_readpages,
- .sync_page = block_sync_page,
.direct_IO = btrfs_direct_IO,
.invalidatepage = btrfs_invalidatepage,
.releasepage = btrfs_releasepage,
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index dd13eb81ee40..9d554e8e6583 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -162,7 +162,6 @@ static noinline int run_scheduled_bios(struct btrfs_device *device)
struct bio *cur;
int again = 0;
unsigned long num_run;
- unsigned long num_sync_run;
unsigned long batch_run = 0;
unsigned long limit;
unsigned long last_waited = 0;
@@ -173,11 +172,6 @@ static noinline int run_scheduled_bios(struct btrfs_device *device)
limit = btrfs_async_submit_limit(fs_info);
limit = limit * 2 / 3;
- /* we want to make sure that every time we switch from the sync
- * list to the normal list, we unplug
- */
- num_sync_run = 0;
-
loop:
spin_lock(&device->io_lock);
@@ -223,15 +217,6 @@ loop_lock:
spin_unlock(&device->io_lock);
- /*
- * if we're doing the regular priority list, make sure we unplug
- * for any high prio bios we've sent down
- */
- if (pending_bios == &device->pending_bios && num_sync_run > 0) {
- num_sync_run = 0;
- blk_run_backing_dev(bdi, NULL);
- }
-
while (pending) {
rmb();
@@ -259,19 +244,11 @@ loop_lock:
BUG_ON(atomic_read(&cur->bi_cnt) == 0);
- if (cur->bi_rw & REQ_SYNC)
- num_sync_run++;
-
submit_bio(cur->bi_rw, cur);
num_run++;
batch_run++;
- if (need_resched()) {
- if (num_sync_run) {
- blk_run_backing_dev(bdi, NULL);
- num_sync_run = 0;
- }
+ if (need_resched())
cond_resched();
- }
/*
* we made progress, there is more work to do and the bdi
@@ -304,13 +281,8 @@ loop_lock:
* against it before looping
*/
last_waited = ioc->last_waited;
- if (need_resched()) {
- if (num_sync_run) {
- blk_run_backing_dev(bdi, NULL);
- num_sync_run = 0;
- }
+ if (need_resched())
cond_resched();
- }
continue;
}
spin_lock(&device->io_lock);
@@ -323,22 +295,6 @@ loop_lock:
}
}
- if (num_sync_run) {
- num_sync_run = 0;
- blk_run_backing_dev(bdi, NULL);
- }
- /*
- * IO has already been through a long path to get here. Checksumming,
- * async helper threads, perhaps compression. We've done a pretty
- * good job of collecting a batch of IO and should just unplug
- * the device right away.
- *
- * This will help anyone who is waiting on the IO, they might have
- * already unplugged, but managed to do so before the bio they
- * cared about found its way down here.
- */
- blk_run_backing_dev(bdi, NULL);
-
cond_resched();
if (again)
goto loop;
@@ -2955,7 +2911,7 @@ static int find_live_mirror(struct map_lookup *map, int first, int num,
static int __btrfs_map_block(struct btrfs_mapping_tree *map_tree, int rw,
u64 logical, u64 *length,
struct btrfs_multi_bio **multi_ret,
- int mirror_num, struct page *unplug_page)
+ int mirror_num)
{
struct extent_map *em;
struct map_lookup *map;
@@ -2987,11 +2943,6 @@ again:
em = lookup_extent_mapping(em_tree, logical, *length);
read_unlock(&em_tree->lock);
- if (!em && unplug_page) {
- kfree(multi);
- return 0;
- }
-
if (!em) {
printk(KERN_CRIT "unable to find logical %llu len %llu\n",
(unsigned long long)logical,
@@ -3047,13 +2998,13 @@ again:
*length = em->len - offset;
}
- if (!multi_ret && !unplug_page)
+ if (!multi_ret)
goto out;
num_stripes = 1;
stripe_index = 0;
if (map->type & BTRFS_BLOCK_GROUP_RAID1) {
- if (unplug_page || (rw & REQ_WRITE))
+ if (rw & REQ_WRITE)
num_stripes = map->num_stripes;
else if (mirror_num)
stripe_index = mirror_num - 1;
@@ -3075,7 +3026,7 @@ again:
stripe_index = do_div(stripe_nr, factor);
stripe_index *= map->sub_stripes;
- if (unplug_page || (rw & REQ_WRITE))
+ if (rw & REQ_WRITE)
num_stripes = map->sub_stripes;
else if (mirror_num)
stripe_index += mirror_num - 1;
@@ -3095,22 +3046,10 @@ again:
BUG_ON(stripe_index >= map->num_stripes);
for (i = 0; i < num_stripes; i++) {
- if (unplug_page) {
- struct btrfs_device *device;
- struct backing_dev_info *bdi;
-
- device = map->stripes[stripe_index].dev;
- if (device->bdev) {
- bdi = blk_get_backing_dev_info(device->bdev);
- if (bdi->unplug_io_fn)
- bdi->unplug_io_fn(bdi, unplug_page);
- }
- } else {
- multi->stripes[i].physical =
- map->stripes[stripe_index].physical +
- stripe_offset + stripe_nr * map->stripe_len;
- multi->stripes[i].dev = map->stripes[stripe_index].dev;
- }
+ multi->stripes[i].physical =
+ map->stripes[stripe_index].physical +
+ stripe_offset + stripe_nr * map->stripe_len;
+ multi->stripes[i].dev = map->stripes[stripe_index].dev;
stripe_index++;
}
if (multi_ret) {
@@ -3128,7 +3067,7 @@ int btrfs_map_block(struct btrfs_mapping_tree *map_tree, int rw,
struct btrfs_multi_bio **multi_ret, int mirror_num)
{
return __btrfs_map_block(map_tree, rw, logical, length, multi_ret,
- mirror_num, NULL);
+ mirror_num);
}
int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree,
@@ -3196,14 +3135,6 @@ int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree,
return 0;
}
-int btrfs_unplug_page(struct btrfs_mapping_tree *map_tree,
- u64 logical, struct page *page)
-{
- u64 length = PAGE_CACHE_SIZE;
- return __btrfs_map_block(map_tree, READ, logical, &length,
- NULL, 0, page);
-}
-
static void end_bio_multi_stripe(struct bio *bio, int err)
{
struct btrfs_multi_bio *multi = bio->bi_private;
diff --git a/fs/buffer.c b/fs/buffer.c
index 2219a76e2caf..42534f67d71b 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -54,23 +54,15 @@ init_buffer(struct buffer_head *bh, bh_end_io_t *handler, void *private)
}
EXPORT_SYMBOL(init_buffer);
-static int sync_buffer(void *word)
+static int sleep_on_buffer(void *word)
{
- struct block_device *bd;
- struct buffer_head *bh
- = container_of(word, struct buffer_head, b_state);
-
- smp_mb();
- bd = bh->b_bdev;
- if (bd)
- blk_run_address_space(bd->bd_inode->i_mapping);
io_schedule();
return 0;
}
void __lock_buffer(struct buffer_head *bh)
{
- wait_on_bit_lock(&bh->b_state, BH_Lock, sync_buffer,
+ wait_on_bit_lock(&bh->b_state, BH_Lock, sleep_on_buffer,
TASK_UNINTERRUPTIBLE);
}
EXPORT_SYMBOL(__lock_buffer);
@@ -90,7 +82,7 @@ EXPORT_SYMBOL(unlock_buffer);
*/
void __wait_on_buffer(struct buffer_head * bh)
{
- wait_on_bit(&bh->b_state, BH_Lock, sync_buffer, TASK_UNINTERRUPTIBLE);
+ wait_on_bit(&bh->b_state, BH_Lock, sleep_on_buffer, TASK_UNINTERRUPTIBLE);
}
EXPORT_SYMBOL(__wait_on_buffer);
@@ -749,7 +741,7 @@ static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
{
struct buffer_head *bh;
struct list_head tmp;
- struct address_space *mapping, *prev_mapping = NULL;
+ struct address_space *mapping;
int err = 0, err2;
INIT_LIST_HEAD(&tmp);
@@ -775,7 +767,7 @@ static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
* still in flight on potentially older
* contents.
*/
- write_dirty_buffer(bh, WRITE_SYNC_PLUG);
+ write_dirty_buffer(bh, WRITE_SYNC);
/*
* Kick off IO for the previous mapping. Note
@@ -783,10 +775,6 @@ static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
* wait_on_buffer() will do that for us
* through sync_buffer().
*/
- if (prev_mapping && prev_mapping != mapping)
- blk_run_address_space(prev_mapping);
- prev_mapping = mapping;
-
brelse(bh);
spin_lock(lock);
}
@@ -1614,14 +1602,8 @@ EXPORT_SYMBOL(unmap_underlying_metadata);
* prevents this contention from occurring.
*
* If block_write_full_page() is called with wbc->sync_mode ==
- * WB_SYNC_ALL, the writes are posted using WRITE_SYNC_PLUG; this
- * causes the writes to be flagged as synchronous writes, but the
- * block device queue will NOT be unplugged, since usually many pages
- * will be pushed to the out before the higher-level caller actually
- * waits for the writes to be completed. The various wait functions,
- * such as wait_on_writeback_range() will ultimately call sync_page()
- * which will ultimately call blk_run_backing_dev(), which will end up
- * unplugging the device queue.
+ * WB_SYNC_ALL, the writes are posted using WRITE_SYNC; this
+ * causes the writes to be flagged as synchronous writes.
*/
static int __block_write_full_page(struct inode *inode, struct page *page,
get_block_t *get_block, struct writeback_control *wbc,
@@ -1634,7 +1616,7 @@ static int __block_write_full_page(struct inode *inode, struct page *page,
const unsigned blocksize = 1 << inode->i_blkbits;
int nr_underway = 0;
int write_op = (wbc->sync_mode == WB_SYNC_ALL ?
- WRITE_SYNC_PLUG : WRITE);
+ WRITE_SYNC : WRITE);
BUG_ON(!PageLocked(page));
@@ -3138,17 +3120,6 @@ out:
}
EXPORT_SYMBOL(try_to_free_buffers);
-void block_sync_page(struct page *page)
-{
- struct address_space *mapping;
-
- smp_mb();
- mapping = page_mapping(page);
- if (mapping)
- blk_run_backing_dev(mapping->backing_dev_info, page);
-}
-EXPORT_SYMBOL(block_sync_page);
-
/*
* There are no bdflush tunables left. But distributions are
* still running obsolete flush daemons, so we terminate them here.
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index e964b1cd5dd0..c27d236738fc 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -1569,34 +1569,6 @@ int cifs_fsync(struct file *file, int datasync)
return rc;
}
-/* static void cifs_sync_page(struct page *page)
-{
- struct address_space *mapping;
- struct inode *inode;
- unsigned long index = page->index;
- unsigned int rpages = 0;
- int rc = 0;
-
- cFYI(1, "sync page %p", page);
- mapping = page->mapping;
- if (!mapping)
- return 0;
- inode = mapping->host;
- if (!inode)
- return; */
-
-/* fill in rpages then
- result = cifs_pagein_inode(inode, index, rpages); */ /* BB finish */
-
-/* cFYI(1, "rpages is %d for sync page of Index %ld", rpages, index);
-
-#if 0
- if (rc < 0)
- return rc;
- return 0;
-#endif
-} */
-
/*
* As file closes, flush all cached write data for this inode checking
* for write behind errors.
@@ -2510,7 +2482,6 @@ const struct address_space_operations cifs_addr_ops = {
.set_page_dirty = __set_page_dirty_nobuffers,
.releasepage = cifs_release_page,
.invalidatepage = cifs_invalidate_page,
- /* .sync_page = cifs_sync_page, */
/* .direct_IO = */
};
@@ -2528,6 +2499,5 @@ const struct address_space_operations cifs_addr_ops_smallbuf = {
.set_page_dirty = __set_page_dirty_nobuffers,
.releasepage = cifs_release_page,
.invalidatepage = cifs_invalidate_page,
- /* .sync_page = cifs_sync_page, */
/* .direct_IO = */
};
diff --git a/fs/direct-io.c b/fs/direct-io.c
index b044705eedd4..426083136099 100644
--- a/fs/direct-io.c
+++ b/fs/direct-io.c
@@ -1110,11 +1110,8 @@ direct_io_worker(int rw, struct kiocb *iocb, struct inode *inode,
((rw & READ) || (dio->result == dio->size)))
ret = -EIOCBQUEUED;
- if (ret != -EIOCBQUEUED) {
- /* All IO is now issued, send it on its way */
- blk_run_address_space(inode->i_mapping);
+ if (ret != -EIOCBQUEUED)
dio_await_completion(dio);
- }
/*
* Sync will always be dropping the final ref and completing the
@@ -1176,7 +1173,7 @@ __blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
struct dio *dio;
if (rw & WRITE)
- rw = WRITE_ODIRECT_PLUG;
+ rw = WRITE_ODIRECT;
if (bdev)
bdev_blkbits = blksize_bits(bdev_logical_block_size(bdev));
diff --git a/fs/efs/inode.c b/fs/efs/inode.c
index a8e7797b9477..9c13412e6c99 100644
--- a/fs/efs/inode.c
+++ b/fs/efs/inode.c
@@ -23,7 +23,6 @@ static sector_t _efs_bmap(struct address_space *mapping, sector_t block)
}
static const struct address_space_operations efs_aops = {
.readpage = efs_readpage,
- .sync_page = block_sync_page,
.bmap = _efs_bmap
};
diff --git a/fs/exofs/inode.c b/fs/exofs/inode.c
index a7555238c41a..82b94c8f5d22 100644
--- a/fs/exofs/inode.c
+++ b/fs/exofs/inode.c
@@ -795,7 +795,6 @@ const struct address_space_operations exofs_aops = {
.direct_IO = NULL, /* TODO: Should be trivial to do */
/* With these NULL has special meaning or default is not exported */
- .sync_page = NULL,
.get_xip_mem = NULL,
.migratepage = NULL,
.launder_page = NULL,
diff --git a/fs/ext2/inode.c b/fs/ext2/inode.c
index 40ad210a5049..c47f706878b5 100644
--- a/fs/ext2/inode.c
+++ b/fs/ext2/inode.c
@@ -860,7 +860,6 @@ const struct address_space_operations ext2_aops = {
.readpage = ext2_readpage,
.readpages = ext2_readpages,
.writepage = ext2_writepage,
- .sync_page = block_sync_page,
.write_begin = ext2_write_begin,
.write_end = ext2_write_end,
.bmap = ext2_bmap,
@@ -880,7 +879,6 @@ const struct address_space_operations ext2_nobh_aops = {
.readpage = ext2_readpage,
.readpages = ext2_readpages,
.writepage = ext2_nobh_writepage,
- .sync_page = block_sync_page,
.write_begin = ext2_nobh_write_begin,
.write_end = nobh_write_end,
.bmap = ext2_bmap,
diff --git a/fs/ext3/inode.c b/fs/ext3/inode.c
index ae94f6d949f5..fe2541d250e4 100644
--- a/fs/ext3/inode.c
+++ b/fs/ext3/inode.c
@@ -1894,7 +1894,6 @@ static const struct address_space_operations ext3_ordered_aops = {
.readpage = ext3_readpage,
.readpages = ext3_readpages,
.writepage = ext3_ordered_writepage,
- .sync_page = block_sync_page,
.write_begin = ext3_write_begin,
.write_end = ext3_ordered_write_end,
.bmap = ext3_bmap,
@@ -1910,7 +1909,6 @@ static const struct address_space_operations ext3_writeback_aops = {
.readpage = ext3_readpage,
.readpages = ext3_readpages,
.writepage = ext3_writeback_writepage,
- .sync_page = block_sync_page,
.write_begin = ext3_write_begin,
.write_end = ext3_writeback_write_end,
.bmap = ext3_bmap,
@@ -1926,7 +1924,6 @@ static const struct address_space_operations ext3_journalled_aops = {
.readpage = ext3_readpage,
.readpages = ext3_readpages,
.writepage = ext3_journalled_writepage,
- .sync_page = block_sync_page,
.write_begin = ext3_write_begin,
.write_end = ext3_journalled_write_end,
.set_page_dirty = ext3_journalled_set_page_dirty,
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 9f7f9e49914f..9297ad46c465 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -3903,7 +3903,6 @@ static const struct address_space_operations ext4_ordered_aops = {
.readpage = ext4_readpage,
.readpages = ext4_readpages,
.writepage = ext4_writepage,
- .sync_page = block_sync_page,
.write_begin = ext4_write_begin,
.write_end = ext4_ordered_write_end,
.bmap = ext4_bmap,
@@ -3919,7 +3918,6 @@ static const struct address_space_operations ext4_writeback_aops = {
.readpage = ext4_readpage,
.readpages = ext4_readpages,
.writepage = ext4_writepage,
- .sync_page = block_sync_page,
.write_begin = ext4_write_begin,
.write_end = ext4_writeback_write_end,
.bmap = ext4_bmap,
@@ -3935,7 +3933,6 @@ static const struct address_space_operations ext4_journalled_aops = {
.readpage = ext4_readpage,
.readpages = ext4_readpages,
.writepage = ext4_writepage,
- .sync_page = block_sync_page,
.write_begin = ext4_write_begin,
.write_end = ext4_journalled_write_end,
.set_page_dirty = ext4_journalled_set_page_dirty,
@@ -3951,7 +3948,6 @@ static const struct address_space_operations ext4_da_aops = {
.readpages = ext4_readpages,
.writepage = ext4_writepage,
.writepages = ext4_da_writepages,
- .sync_page = block_sync_page,
.write_begin = ext4_da_write_begin,
.write_end = ext4_da_write_end,
.bmap = ext4_bmap,
diff --git a/fs/ext4/page-io.c b/fs/ext4/page-io.c
index 955cc309142f..e2cd90e4bb7c 100644
--- a/fs/ext4/page-io.c
+++ b/fs/ext4/page-io.c
@@ -310,8 +310,7 @@ static int io_submit_init(struct ext4_io_submit *io,
io_end->offset = (page->index << PAGE_CACHE_SHIFT) + bh_offset(bh);
io->io_bio = bio;
- io->io_op = (wbc->sync_mode == WB_SYNC_ALL ?
- WRITE_SYNC_PLUG : WRITE);
+ io->io_op = (wbc->sync_mode == WB_SYNC_ALL ? WRITE_SYNC : WRITE);
io->io_next_block = bh->b_blocknr;
return 0;
}
diff --git a/fs/fat/inode.c b/fs/fat/inode.c
index 86753fe10bd1..f4ff09fb79b1 100644
--- a/fs/fat/inode.c
+++ b/fs/fat/inode.c
@@ -236,7 +236,6 @@ static const struct address_space_operations fat_aops = {
.readpages = fat_readpages,
.writepage = fat_writepage,
.writepages = fat_writepages,
- .sync_page = block_sync_page,
.write_begin = fat_write_begin,
.write_end = fat_write_end,
.direct_IO = fat_direct_IO,
diff --git a/fs/freevxfs/vxfs_subr.c b/fs/freevxfs/vxfs_subr.c
index 1429f3ae1e86..5d318c44f855 100644
--- a/fs/freevxfs/vxfs_subr.c
+++ b/fs/freevxfs/vxfs_subr.c
@@ -44,7 +44,6 @@ static sector_t vxfs_bmap(struct address_space *, sector_t);
const struct address_space_operations vxfs_aops = {
.readpage = vxfs_readpage,
.bmap = vxfs_bmap,
- .sync_page = block_sync_page,
};
inline void
diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
index 9e3f68cc1bd1..09e8d51eeb64 100644
--- a/fs/fuse/inode.c
+++ b/fs/fuse/inode.c
@@ -868,7 +868,6 @@ static int fuse_bdi_init(struct fuse_conn *fc, struct super_block *sb)
fc->bdi.name = "fuse";
fc->bdi.ra_pages = (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE;
- fc->bdi.unplug_io_fn = default_unplug_io_fn;
/* fuse does it's own writeback accounting */
fc->bdi.capabilities = BDI_CAP_NO_ACCT_WB;
diff --git a/fs/gfs2/aops.c b/fs/gfs2/aops.c
index 4f36f8832b9b..2f87ad27efd0 100644
--- a/fs/gfs2/aops.c
+++ b/fs/gfs2/aops.c
@@ -1116,7 +1116,6 @@ static const struct address_space_operations gfs2_writeback_aops = {
.writepages = gfs2_writeback_writepages,
.readpage = gfs2_readpage,
.readpages = gfs2_readpages,
- .sync_page = block_sync_page,
.write_begin = gfs2_write_begin,
.write_end = gfs2_write_end,
.bmap = gfs2_bmap,
@@ -1132,7 +1131,6 @@ static const struct address_space_operations gfs2_ordered_aops = {
.writepage = gfs2_ordered_writepage,
.readpage = gfs2_readpage,
.readpages = gfs2_readpages,
- .sync_page = block_sync_page,
.write_begin = gfs2_write_begin,
.write_end = gfs2_write_end,
.set_page_dirty = gfs2_set_page_dirty,
@@ -1150,7 +1148,6 @@ static const struct address_space_operations gfs2_jdata_aops = {
.writepages = gfs2_jdata_writepages,
.readpage = gfs2_readpage,
.readpages = gfs2_readpages,
- .sync_page = block_sync_page,
.write_begin = gfs2_write_begin,
.write_end = gfs2_write_end,
.set_page_dirty = gfs2_set_page_dirty,
diff --git a/fs/gfs2/log.c b/fs/gfs2/log.c
index eb01f3575e10..7f1c11202342 100644
--- a/fs/gfs2/log.c
+++ b/fs/gfs2/log.c
@@ -121,7 +121,7 @@ __acquires(&sdp->sd_log_lock)
lock_buffer(bh);
if (test_clear_buffer_dirty(bh)) {
bh->b_end_io = end_buffer_write_sync;
- submit_bh(WRITE_SYNC_PLUG, bh);
+ submit_bh(WRITE_SYNC, bh);
} else {
unlock_buffer(bh);
brelse(bh);
@@ -647,7 +647,7 @@ static void gfs2_ordered_write(struct gfs2_sbd *sdp)
lock_buffer(bh);
if (buffer_mapped(bh) && test_clear_buffer_dirty(bh)) {
bh->b_end_io = end_buffer_write_sync;
- submit_bh(WRITE_SYNC_PLUG, bh);
+ submit_bh(WRITE_SYNC, bh);
} else {
unlock_buffer(bh);
brelse(bh);
diff --git a/fs/gfs2/lops.c b/fs/gfs2/lops.c
index bf33f822058d..48b545a1979a 100644
--- a/fs/gfs2/lops.c
+++ b/fs/gfs2/lops.c
@@ -200,7 +200,7 @@ static void buf_lo_before_commit(struct gfs2_sbd *sdp)
}
gfs2_log_unlock(sdp);
- submit_bh(WRITE_SYNC_PLUG, bh);
+ submit_bh(WRITE_SYNC, bh);
gfs2_log_lock(sdp);
n = 0;
@@ -210,7 +210,7 @@ static void buf_lo_before_commit(struct gfs2_sbd *sdp)
gfs2_log_unlock(sdp);
lock_buffer(bd2->bd_bh);
bh = gfs2_log_fake_buf(sdp, bd2->bd_bh);
- submit_bh(WRITE_SYNC_PLUG, bh);
+ submit_bh(WRITE_SYNC, bh);
gfs2_log_lock(sdp);
if (++n >= num)
break;
@@ -352,7 +352,7 @@ static void revoke_lo_before_commit(struct gfs2_sbd *sdp)
sdp->sd_log_num_revoke--;
if (offset + sizeof(u64) > sdp->sd_sb.sb_bsize) {
- submit_bh(WRITE_SYNC_PLUG, bh);
+ submit_bh(WRITE_SYNC, bh);
bh = gfs2_log_get_buf(sdp);
mh = (struct gfs2_meta_header *)bh->b_data;
@@ -369,7 +369,7 @@ static void revoke_lo_before_commit(struct gfs2_sbd *sdp)
}
gfs2_assert_withdraw(sdp, !sdp->sd_log_num_revoke);
- submit_bh(WRITE_SYNC_PLUG, bh);
+ submit_bh(WRITE_SYNC, bh);
}
static void revoke_lo_before_scan(struct gfs2_jdesc *jd,
@@ -571,7 +571,7 @@ static void gfs2_write_blocks(struct gfs2_sbd *sdp, struct buffer_head *bh,
ptr = bh_log_ptr(bh);
get_bh(bh);
- submit_bh(WRITE_SYNC_PLUG, bh);
+ submit_bh(WRITE_SYNC, bh);
gfs2_log_lock(sdp);
while(!list_empty(list)) {
bd = list_entry(list->next, struct gfs2_bufdata, bd_le.le_list);
@@ -597,7 +597,7 @@ static void gfs2_write_blocks(struct gfs2_sbd *sdp, struct buffer_head *bh,
} else {
bh1 = gfs2_log_fake_buf(sdp, bd->bd_bh);
}
- submit_bh(WRITE_SYNC_PLUG, bh1);
+ submit_bh(WRITE_SYNC, bh1);
gfs2_log_lock(sdp);
ptr += 2;
}
diff --git a/fs/gfs2/meta_io.c b/fs/gfs2/meta_io.c
index 939739c7b3f9..867b713cba92 100644
--- a/fs/gfs2/meta_io.c
+++ b/fs/gfs2/meta_io.c
@@ -37,7 +37,7 @@ static int gfs2_aspace_writepage(struct page *page, struct writeback_control *wb
struct buffer_head *bh, *head;
int nr_underway = 0;
int write_op = REQ_META |
- (wbc->sync_mode == WB_SYNC_ALL ? WRITE_SYNC_PLUG : WRITE);
+ (wbc->sync_mode == WB_SYNC_ALL ? WRITE_SYNC : WRITE);
BUG_ON(!PageLocked(page));
BUG_ON(!page_has_buffers(page));
@@ -94,7 +94,6 @@ static int gfs2_aspace_writepage(struct page *page, struct writeback_control *wb
const struct address_space_operations gfs2_meta_aops = {
.writepage = gfs2_aspace_writepage,
.releasepage = gfs2_releasepage,
- .sync_page = block_sync_page,
};
/**
diff --git a/fs/hfs/inode.c b/fs/hfs/inode.c
index dffb4e996643..fff16c968e67 100644
--- a/fs/hfs/inode.c
+++ b/fs/hfs/inode.c
@@ -150,7 +150,6 @@ static int hfs_writepages(struct address_space *mapping,
const struct address_space_operations hfs_btree_aops = {
.readpage = hfs_readpage,
.writepage = hfs_writepage,
- .sync_page = block_sync_page,
.write_begin = hfs_write_begin,
.write_end = generic_write_end,
.bmap = hfs_bmap,
@@ -160,7 +159,6 @@ const struct address_space_operations hfs_btree_aops = {
const struct address_space_operations hfs_aops = {
.readpage = hfs_readpage,
.writepage = hfs_writepage,
- .sync_page = block_sync_page,
.write_begin = hfs_write_begin,
.write_end = generic_write_end,
.bmap = hfs_bmap,
diff --git a/fs/hfsplus/inode.c b/fs/hfsplus/inode.c
index a8df651747f0..b248a6cfcad9 100644
--- a/fs/hfsplus/inode.c
+++ b/fs/hfsplus/inode.c
@@ -146,7 +146,6 @@ static int hfsplus_writepages(struct address_space *mapping,
const struct address_space_operations hfsplus_btree_aops = {
.readpage = hfsplus_readpage,
.writepage = hfsplus_writepage,
- .sync_page = block_sync_page,
.write_begin = hfsplus_write_begin,
.write_end = generic_write_end,
.bmap = hfsplus_bmap,
@@ -156,7 +155,6 @@ const struct address_space_operations hfsplus_btree_aops = {
const struct address_space_operations hfsplus_aops = {
.readpage = hfsplus_readpage,
.writepage = hfsplus_writepage,
- .sync_page = block_sync_page,
.write_begin = hfsplus_write_begin,
.write_end = generic_write_end,
.bmap = hfsplus_bmap,
diff --git a/fs/hpfs/file.c b/fs/hpfs/file.c
index c0340887c7ea..9e84257b3ad5 100644
--- a/fs/hpfs/file.c
+++ b/fs/hpfs/file.c
@@ -120,7 +120,6 @@ static sector_t _hpfs_bmap(struct address_space *mapping, sector_t block)
const struct address_space_operations hpfs_aops = {
.readpage = hpfs_readpage,
.writepage = hpfs_writepage,
- .sync_page = block_sync_page,
.write_begin = hpfs_write_begin,
.write_end = generic_write_end,
.bmap = _hpfs_bmap
diff --git a/fs/isofs/inode.c b/fs/isofs/inode.c
index a0f3833c0dbf..3db5ba4568fc 100644
--- a/fs/isofs/inode.c
+++ b/fs/isofs/inode.c
@@ -1158,7 +1158,6 @@ static sector_t _isofs_bmap(struct address_space *mapping, sector_t block)
static const struct address_space_operations isofs_aops = {
.readpage = isofs_readpage,
- .sync_page = block_sync_page,
.bmap = _isofs_bmap
};
diff --git a/fs/jbd/commit.c b/fs/jbd/commit.c
index 34a4861c14b8..66be299acb1b 100644
--- a/fs/jbd/commit.c
+++ b/fs/jbd/commit.c
@@ -333,7 +333,7 @@ void journal_commit_transaction(journal_t *journal)
* instead we rely on sync_buffer() doing the unplug for us.
*/
if (commit_transaction->t_synchronous_commit)
- write_op = WRITE_SYNC_PLUG;
+ write_op = WRITE_SYNC;
spin_lock(&commit_transaction->t_handle_lock);
while (commit_transaction->t_updates) {
DEFINE_WAIT(wait);
diff --git a/fs/jbd2/commit.c b/fs/jbd2/commit.c
index f3ad1598b201..3da1cc4346d5 100644
--- a/fs/jbd2/commit.c
+++ b/fs/jbd2/commit.c
@@ -137,9 +137,9 @@ static int journal_submit_commit_record(journal_t *journal,
if (journal->j_flags & JBD2_BARRIER &&
!JBD2_HAS_INCOMPAT_FEATURE(journal,
JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT))
- ret = submit_bh(WRITE_SYNC_PLUG | WRITE_FLUSH_FUA, bh);
+ ret = submit_bh(WRITE_SYNC | WRITE_FLUSH_FUA, bh);
else
- ret = submit_bh(WRITE_SYNC_PLUG, bh);
+ ret = submit_bh(WRITE_SYNC, bh);
*cbh = bh;
return ret;
@@ -369,7 +369,7 @@ void jbd2_journal_commit_transaction(journal_t *journal)
* instead we rely on sync_buffer() doing the unplug for us.
*/
if (commit_transaction->t_synchronous_commit)
- write_op = WRITE_SYNC_PLUG;
+ write_op = WRITE_SYNC;
trace_jbd2_commit_locking(journal, commit_transaction);
stats.run.rs_wait = commit_transaction->t_max_wait;
stats.run.rs_locked = jiffies;
diff --git a/fs/jfs/inode.c b/fs/jfs/inode.c
index 9978803ceedc..eddbb373209e 100644
--- a/fs/jfs/inode.c
+++ b/fs/jfs/inode.c
@@ -352,7 +352,6 @@ const struct address_space_operations jfs_aops = {
.readpages = jfs_readpages,
.writepage = jfs_writepage,
.writepages = jfs_writepages,
- .sync_page = block_sync_page,
.write_begin = jfs_write_begin,
.write_end = nobh_write_end,
.bmap = jfs_bmap,
diff --git a/fs/jfs/jfs_metapage.c b/fs/jfs/jfs_metapage.c
index 48b44bd8267b..6740d34cd82b 100644
--- a/fs/jfs/jfs_metapage.c
+++ b/fs/jfs/jfs_metapage.c
@@ -583,7 +583,6 @@ static void metapage_invalidatepage(struct page *page, unsigned long offset)
const struct address_space_operations jfs_metapage_aops = {
.readpage = metapage_readpage,
.writepage = metapage_writepage,
- .sync_page = block_sync_page,
.releasepage = metapage_releasepage,
.invalidatepage = metapage_invalidatepage,
.set_page_dirty = __set_page_dirty_nobuffers,
diff --git a/fs/logfs/dev_bdev.c b/fs/logfs/dev_bdev.c
index 723bc5bca09a..1adc8d455f0e 100644
--- a/fs/logfs/dev_bdev.c
+++ b/fs/logfs/dev_bdev.c
@@ -39,7 +39,6 @@ static int sync_request(struct page *page, struct block_device *bdev, int rw)
bio.bi_end_io = request_complete;
submit_bio(rw, &bio);
- generic_unplug_device(bdev_get_queue(bdev));
wait_for_completion(&complete);
return test_bit(BIO_UPTODATE, &bio.bi_flags) ? 0 : -EIO;
}
@@ -168,7 +167,6 @@ static void bdev_writeseg(struct super_block *sb, u64 ofs, size_t len)
}
len = PAGE_ALIGN(len);
__bdev_writeseg(sb, ofs, ofs >> PAGE_SHIFT, len >> PAGE_SHIFT);
- generic_unplug_device(bdev_get_queue(logfs_super(sb)->s_bdev));
}
diff --git a/fs/minix/inode.c b/fs/minix/inode.c
index ae0b83f476a6..adcdc0a4e182 100644
--- a/fs/minix/inode.c
+++ b/fs/minix/inode.c
@@ -399,7 +399,6 @@ static sector_t minix_bmap(struct address_space *mapping, sector_t block)
static const struct address_space_operations minix_aops = {
.readpage = minix_readpage,
.writepage = minix_writepage,
- .sync_page = block_sync_page,
.write_begin = minix_write_begin,
.write_end = generic_write_end,
.bmap = minix_bmap
diff --git a/fs/mpage.c b/fs/mpage.c
index d78455a81ec9..0afc809e46e0 100644
--- a/fs/mpage.c
+++ b/fs/mpage.c
@@ -364,6 +364,9 @@ mpage_readpages(struct address_space *mapping, struct list_head *pages,
sector_t last_block_in_bio = 0;
struct buffer_head map_bh;
unsigned long first_logical_block = 0;
+ struct blk_plug plug;
+
+ blk_start_plug(&plug);
map_bh.b_state = 0;
map_bh.b_size = 0;
@@ -385,6 +388,7 @@ mpage_readpages(struct address_space *mapping, struct list_head *pages,
BUG_ON(!list_empty(pages));
if (bio)
mpage_bio_submit(READ, bio);
+ blk_finish_plug(&plug);
return 0;
}
EXPORT_SYMBOL(mpage_readpages);
@@ -666,8 +670,11 @@ int
mpage_writepages(struct address_space *mapping,
struct writeback_control *wbc, get_block_t get_block)
{
+ struct blk_plug plug;
int ret;
+ blk_start_plug(&plug);
+
if (!get_block)
ret = generic_writepages(mapping, wbc);
else {
@@ -682,6 +689,7 @@ mpage_writepages(struct address_space *mapping,
if (mpd.bio)
mpage_bio_submit(WRITE, mpd.bio);
}
+ blk_finish_plug(&plug);
return ret;
}
EXPORT_SYMBOL(mpage_writepages);
diff --git a/fs/nilfs2/btnode.c b/fs/nilfs2/btnode.c
index 85f7baa15f5d..609cd223eea8 100644
--- a/fs/nilfs2/btnode.c
+++ b/fs/nilfs2/btnode.c
@@ -34,15 +34,10 @@
#include "page.h"
#include "btnode.h"
-
-static const struct address_space_operations def_btnode_aops = {
- .sync_page = block_sync_page,
-};
-
void nilfs_btnode_cache_init(struct address_space *btnc,
struct backing_dev_info *bdi)
{
- nilfs_mapping_init(btnc, bdi, &def_btnode_aops);
+ nilfs_mapping_init(btnc, bdi);
}
void nilfs_btnode_cache_clear(struct address_space *btnc)
diff --git a/fs/nilfs2/gcinode.c b/fs/nilfs2/gcinode.c
index caf9a6a3fb54..1c2a3e23f8b2 100644
--- a/fs/nilfs2/gcinode.c
+++ b/fs/nilfs2/gcinode.c
@@ -49,7 +49,6 @@
#include "ifile.h"
static const struct address_space_operations def_gcinode_aops = {
- .sync_page = block_sync_page,
};
/*
diff --git a/fs/nilfs2/inode.c b/fs/nilfs2/inode.c
index 2fd440d8d6b8..c89d5d1ea7c7 100644
--- a/fs/nilfs2/inode.c
+++ b/fs/nilfs2/inode.c
@@ -262,7 +262,6 @@ nilfs_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
const struct address_space_operations nilfs_aops = {
.writepage = nilfs_writepage,
.readpage = nilfs_readpage,
- .sync_page = block_sync_page,
.writepages = nilfs_writepages,
.set_page_dirty = nilfs_set_page_dirty,
.readpages = nilfs_readpages,
diff --git a/fs/nilfs2/mdt.c b/fs/nilfs2/mdt.c
index a0babd2bff6a..a649b05f7069 100644
--- a/fs/nilfs2/mdt.c
+++ b/fs/nilfs2/mdt.c
@@ -399,7 +399,6 @@ nilfs_mdt_write_page(struct page *page, struct writeback_control *wbc)
static const struct address_space_operations def_mdt_aops = {
.writepage = nilfs_mdt_write_page,
- .sync_page = block_sync_page,
};
static const struct inode_operations def_mdt_iops;
@@ -438,10 +437,6 @@ void nilfs_mdt_set_entry_size(struct inode *inode, unsigned entry_size,
mi->mi_first_entry_offset = DIV_ROUND_UP(header_size, entry_size);
}
-static const struct address_space_operations shadow_map_aops = {
- .sync_page = block_sync_page,
-};
-
/**
* nilfs_mdt_setup_shadow_map - setup shadow map and bind it to metadata file
* @inode: inode of the metadata file
@@ -455,9 +450,9 @@ int nilfs_mdt_setup_shadow_map(struct inode *inode,
INIT_LIST_HEAD(&shadow->frozen_buffers);
address_space_init_once(&shadow->frozen_data);
- nilfs_mapping_init(&shadow->frozen_data, bdi, &shadow_map_aops);
+ nilfs_mapping_init(&shadow->frozen_data, bdi);
address_space_init_once(&shadow->frozen_btnodes);
- nilfs_mapping_init(&shadow->frozen_btnodes, bdi, &shadow_map_aops);
+ nilfs_mapping_init(&shadow->frozen_btnodes, bdi);
mi->mi_shadow = shadow;
return 0;
}
diff --git a/fs/nilfs2/page.c b/fs/nilfs2/page.c
index a585b35fd6bc..4d2a1ee0eb47 100644
--- a/fs/nilfs2/page.c
+++ b/fs/nilfs2/page.c
@@ -493,15 +493,14 @@ unsigned nilfs_page_count_clean_buffers(struct page *page,
}
void nilfs_mapping_init(struct address_space *mapping,
- struct backing_dev_info *bdi,
- const struct address_space_operations *aops)
+ struct backing_dev_info *bdi)
{
mapping->host = NULL;
mapping->flags = 0;
mapping_set_gfp_mask(mapping, GFP_NOFS);
mapping->assoc_mapping = NULL;
mapping->backing_dev_info = bdi;
- mapping->a_ops = aops;
+ mapping->a_ops = NULL;
}
/*
diff --git a/fs/nilfs2/page.h b/fs/nilfs2/page.h
index 2a00953ebd5f..f06b79ad7493 100644
--- a/fs/nilfs2/page.h
+++ b/fs/nilfs2/page.h
@@ -62,8 +62,7 @@ int nilfs_copy_dirty_pages(struct address_space *, struct address_space *);
void nilfs_copy_back_pages(struct address_space *, struct address_space *);
void nilfs_clear_dirty_pages(struct address_space *);
void nilfs_mapping_init(struct address_space *mapping,
- struct backing_dev_info *bdi,
- const struct address_space_operations *aops);
+ struct backing_dev_info *bdi);
unsigned nilfs_page_count_clean_buffers(struct page *, unsigned, unsigned);
unsigned long nilfs_find_uncommitted_extent(struct inode *inode,
sector_t start_blk,
diff --git a/fs/nilfs2/segbuf.c b/fs/nilfs2/segbuf.c
index 0f83e93935b2..2853ff20f85a 100644
--- a/fs/nilfs2/segbuf.c
+++ b/fs/nilfs2/segbuf.c
@@ -509,7 +509,7 @@ static int nilfs_segbuf_write(struct nilfs_segment_buffer *segbuf,
* Last BIO is always sent through the following
* submission.
*/
- rw |= REQ_SYNC | REQ_UNPLUG;
+ rw |= REQ_SYNC;
res = nilfs_segbuf_submit_bio(segbuf, &wi, rw);
}
diff --git a/fs/ntfs/aops.c b/fs/ntfs/aops.c
index c3c2c7ac9020..0b1e885b8cf8 100644
--- a/fs/ntfs/aops.c
+++ b/fs/ntfs/aops.c
@@ -1543,8 +1543,6 @@ err_out:
*/
const struct address_space_operations ntfs_aops = {
.readpage = ntfs_readpage, /* Fill page with data. */
- .sync_page = block_sync_page, /* Currently, just unplugs the
- disk request queue. */
#ifdef NTFS_RW
.writepage = ntfs_writepage, /* Write dirty page to disk. */
#endif /* NTFS_RW */
@@ -1560,8 +1558,6 @@ const struct address_space_operations ntfs_aops = {
*/
const struct address_space_operations ntfs_mst_aops = {
.readpage = ntfs_readpage, /* Fill page with data. */
- .sync_page = block_sync_page, /* Currently, just unplugs the
- disk request queue. */
#ifdef NTFS_RW
.writepage = ntfs_writepage, /* Write dirty page to disk. */
.set_page_dirty = __set_page_dirty_nobuffers, /* Set the page dirty
diff --git a/fs/ntfs/compress.c b/fs/ntfs/compress.c
index 6551c7cbad92..ef9ed854255c 100644
--- a/fs/ntfs/compress.c
+++ b/fs/ntfs/compress.c
@@ -698,8 +698,7 @@ lock_retry_remap:
"uptodate! Unplugging the disk queue "
"and rescheduling.");
get_bh(tbh);
- blk_run_address_space(mapping);
- schedule();
+ io_schedule();
put_bh(tbh);
if (unlikely(!buffer_uptodate(tbh)))
goto read_err;
diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c
index 1fbb0e20131b..daea0359e974 100644
--- a/fs/ocfs2/aops.c
+++ b/fs/ocfs2/aops.c
@@ -2043,7 +2043,6 @@ const struct address_space_operations ocfs2_aops = {
.write_begin = ocfs2_write_begin,
.write_end = ocfs2_write_end,
.bmap = ocfs2_bmap,
- .sync_page = block_sync_page,
.direct_IO = ocfs2_direct_IO,
.invalidatepage = ocfs2_invalidatepage,
.releasepage = ocfs2_releasepage,
diff --git a/fs/ocfs2/cluster/heartbeat.c b/fs/ocfs2/cluster/heartbeat.c
index b108e863d8f6..1adab287bd24 100644
--- a/fs/ocfs2/cluster/heartbeat.c
+++ b/fs/ocfs2/cluster/heartbeat.c
@@ -367,11 +367,7 @@ static inline void o2hb_bio_wait_dec(struct o2hb_bio_wait_ctxt *wc,
static void o2hb_wait_on_io(struct o2hb_region *reg,
struct o2hb_bio_wait_ctxt *wc)
{
- struct address_space *mapping = reg->hr_bdev->bd_inode->i_mapping;
-
- blk_run_address_space(mapping);
o2hb_bio_wait_dec(wc, 1);
-
wait_for_completion(&wc->wc_io_complete);
}
diff --git a/fs/omfs/file.c b/fs/omfs/file.c
index 8a6d34fa668a..d738a7e493dd 100644
--- a/fs/omfs/file.c
+++ b/fs/omfs/file.c
@@ -372,7 +372,6 @@ const struct address_space_operations omfs_aops = {
.readpages = omfs_readpages,
.writepage = omfs_writepage,
.writepages = omfs_writepages,
- .sync_page = block_sync_page,
.write_begin = omfs_write_begin,
.write_end = generic_write_end,
.bmap = omfs_bmap,
diff --git a/fs/qnx4/inode.c b/fs/qnx4/inode.c
index e63b4171d583..2b0646613f5a 100644
--- a/fs/qnx4/inode.c
+++ b/fs/qnx4/inode.c
@@ -335,7 +335,6 @@ static sector_t qnx4_bmap(struct address_space *mapping, sector_t block)
static const struct address_space_operations qnx4_aops = {
.readpage = qnx4_readpage,
.writepage = qnx4_writepage,
- .sync_page = block_sync_page,
.write_begin = qnx4_write_begin,
.write_end = generic_write_end,
.bmap = qnx4_bmap
diff --git a/fs/reiserfs/inode.c b/fs/reiserfs/inode.c
index 0bae036831e2..03674675f886 100644
--- a/fs/reiserfs/inode.c
+++ b/fs/reiserfs/inode.c
@@ -3212,7 +3212,6 @@ const struct address_space_operations reiserfs_address_space_operations = {
.readpages = reiserfs_readpages,
.releasepage = reiserfs_releasepage,
.invalidatepage = reiserfs_invalidatepage,
- .sync_page = block_sync_page,
.write_begin = reiserfs_write_begin,
.write_end = reiserfs_write_end,
.bmap = reiserfs_aop_bmap,
diff --git a/fs/sysv/itree.c b/fs/sysv/itree.c
index 9ca66276315e..fa8d43c92bb8 100644
--- a/fs/sysv/itree.c
+++ b/fs/sysv/itree.c
@@ -488,7 +488,6 @@ static sector_t sysv_bmap(struct address_space *mapping, sector_t block)
const struct address_space_operations sysv_aops = {
.readpage = sysv_readpage,
.writepage = sysv_writepage,
- .sync_page = block_sync_page,
.write_begin = sysv_write_begin,
.write_end = generic_write_end,
.bmap = sysv_bmap
diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c
index 6e11c2975dcf..81368d4d4a2c 100644
--- a/fs/ubifs/super.c
+++ b/fs/ubifs/super.c
@@ -1979,7 +1979,6 @@ static int ubifs_fill_super(struct super_block *sb, void *data, int silent)
*/
c->bdi.name = "ubifs",
c->bdi.capabilities = BDI_CAP_MAP_COPY;
- c->bdi.unplug_io_fn = default_unplug_io_fn;
err = bdi_init(&c->bdi);
if (err)
goto out_close;
diff --git a/fs/udf/file.c b/fs/udf/file.c
index 89c78486cbbe..94e4553491c9 100644
--- a/fs/udf/file.c
+++ b/fs/udf/file.c
@@ -98,7 +98,6 @@ static int udf_adinicb_write_end(struct file *file,
const struct address_space_operations udf_adinicb_aops = {
.readpage = udf_adinicb_readpage,
.writepage = udf_adinicb_writepage,
- .sync_page = block_sync_page,
.write_begin = simple_write_begin,
.write_end = udf_adinicb_write_end,
};
diff --git a/fs/udf/inode.c b/fs/udf/inode.c
index c6a2e782b97b..fa96fc0fe12b 100644
--- a/fs/udf/inode.c
+++ b/fs/udf/inode.c
@@ -133,7 +133,6 @@ static sector_t udf_bmap(struct address_space *mapping, sector_t block)
const struct address_space_operations udf_aops = {
.readpage = udf_readpage,
.writepage = udf_writepage,
- .sync_page = block_sync_page,
.write_begin = udf_write_begin,
.write_end = generic_write_end,
.bmap = udf_bmap,
diff --git a/fs/ufs/inode.c b/fs/ufs/inode.c
index 2b251f2093af..83b28444eb17 100644
--- a/fs/ufs/inode.c
+++ b/fs/ufs/inode.c
@@ -588,7 +588,6 @@ static sector_t ufs_bmap(struct address_space *mapping, sector_t block)
const struct address_space_operations ufs_aops = {
.readpage = ufs_readpage,
.writepage = ufs_writepage,
- .sync_page = block_sync_page,
.write_begin = ufs_write_begin,
.write_end = generic_write_end,
.bmap = ufs_bmap
diff --git a/fs/ufs/truncate.c b/fs/ufs/truncate.c
index a58f9155fc9a..ff0e79276f2d 100644
--- a/fs/ufs/truncate.c
+++ b/fs/ufs/truncate.c
@@ -481,7 +481,7 @@ int ufs_truncate(struct inode *inode, loff_t old_i_size)
break;
if (IS_SYNC(inode) && (inode->i_state & I_DIRTY))
ufs_sync_inode (inode);
- blk_run_address_space(inode->i_mapping);
+ blk_flush_plug(current);
yield();
}
diff --git a/fs/xfs/linux-2.6/xfs_aops.c b/fs/xfs/linux-2.6/xfs_aops.c
index ec7bbb5645b6..6bbb0ee33253 100644
--- a/fs/xfs/linux-2.6/xfs_aops.c
+++ b/fs/xfs/linux-2.6/xfs_aops.c
@@ -413,8 +413,7 @@ xfs_submit_ioend_bio(
if (xfs_ioend_new_eof(ioend))
xfs_mark_inode_dirty(XFS_I(ioend->io_inode));
- submit_bio(wbc->sync_mode == WB_SYNC_ALL ?
- WRITE_SYNC_PLUG : WRITE, bio);
+ submit_bio(wbc->sync_mode == WB_SYNC_ALL ? WRITE_SYNC : WRITE, bio);
}
STATIC struct bio *
@@ -1495,7 +1494,6 @@ const struct address_space_operations xfs_address_space_operations = {
.readpages = xfs_vm_readpages,
.writepage = xfs_vm_writepage,
.writepages = xfs_vm_writepages,
- .sync_page = block_sync_page,
.releasepage = xfs_vm_releasepage,
.invalidatepage = xfs_vm_invalidatepage,
.write_begin = xfs_vm_write_begin,
diff --git a/fs/xfs/linux-2.6/xfs_buf.c b/fs/xfs/linux-2.6/xfs_buf.c
index ac1c7e8378dd..4f8f53c4d42c 100644
--- a/fs/xfs/linux-2.6/xfs_buf.c
+++ b/fs/xfs/linux-2.6/xfs_buf.c
@@ -991,7 +991,7 @@ xfs_buf_lock(
if (atomic_read(&bp->b_pin_count) && (bp->b_flags & XBF_STALE))
xfs_log_force(bp->b_target->bt_mount, 0);
if (atomic_read(&bp->b_io_remaining))
- blk_run_address_space(bp->b_target->bt_mapping);
+ blk_flush_plug(current);
down(&bp->b_sema);
XB_SET_OWNER(bp);
@@ -1035,9 +1035,7 @@ xfs_buf_wait_unpin(
set_current_state(TASK_UNINTERRUPTIBLE);
if (atomic_read(&bp->b_pin_count) == 0)
break;
- if (atomic_read(&bp->b_io_remaining))
- blk_run_address_space(bp->b_target->bt_mapping);
- schedule();
+ io_schedule();
}
remove_wait_queue(&bp->b_waiters, &wait);
set_current_state(TASK_RUNNING);
@@ -1443,7 +1441,7 @@ xfs_buf_iowait(
trace_xfs_buf_iowait(bp, _RET_IP_);
if (atomic_read(&bp->b_io_remaining))
- blk_run_address_space(bp->b_target->bt_mapping);
+ blk_flush_plug(current);
wait_for_completion(&bp->b_iowait);
trace_xfs_buf_iowait_done(bp, _RET_IP_);
@@ -1667,7 +1665,6 @@ xfs_mapping_buftarg(
struct inode *inode;
struct address_space *mapping;
static const struct address_space_operations mapping_aops = {
- .sync_page = block_sync_page,
.migratepage = fail_migrate_page,
};
@@ -1948,7 +1945,7 @@ xfsbufd(
count++;
}
if (count)
- blk_run_address_space(target->bt_mapping);
+ blk_flush_plug(current);
} while (!kthread_should_stop());
@@ -1996,7 +1993,7 @@ xfs_flush_buftarg(
if (wait) {
/* Expedite and wait for IO to complete. */
- blk_run_address_space(target->bt_mapping);
+ blk_flush_plug(current);
while (!list_empty(&wait_list)) {
bp = list_first_entry(&wait_list, struct xfs_buf, b_list);
diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h
index 4ce34fa937d4..96f4094b706d 100644
--- a/include/linux/backing-dev.h
+++ b/include/linux/backing-dev.h
@@ -66,8 +66,6 @@ struct backing_dev_info {
unsigned int capabilities; /* Device capabilities */
congested_fn *congested_fn; /* Function pointer if device is md/dm */
void *congested_data; /* Pointer to aux data for congested func */
- void (*unplug_io_fn)(struct backing_dev_info *, struct page *);
- void *unplug_io_data;
char *name;
@@ -251,7 +249,6 @@ int bdi_set_max_ratio(struct backing_dev_info *bdi, unsigned int max_ratio);
extern struct backing_dev_info default_backing_dev_info;
extern struct backing_dev_info noop_backing_dev_info;
-void default_unplug_io_fn(struct backing_dev_info *bdi, struct page *page);
int writeback_in_progress(struct backing_dev_info *bdi);
@@ -336,17 +333,4 @@ static inline int bdi_sched_wait(void *word)
return 0;
}
-static inline void blk_run_backing_dev(struct backing_dev_info *bdi,
- struct page *page)
-{
- if (bdi && bdi->unplug_io_fn)
- bdi->unplug_io_fn(bdi, page);
-}
-
-static inline void blk_run_address_space(struct address_space *mapping)
-{
- if (mapping)
- blk_run_backing_dev(mapping->backing_dev_info, NULL);
-}
-
#endif /* _LINUX_BACKING_DEV_H */
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index dddedfc0af81..be50d9e70a7d 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -128,7 +128,6 @@ enum rq_flag_bits {
__REQ_NOIDLE, /* don't anticipate more IO after this one */
/* bio only flags */
- __REQ_UNPLUG, /* unplug the immediately after submission */
__REQ_RAHEAD, /* read ahead, can fail anytime */
__REQ_THROTTLED, /* This bio has already been subjected to
* throttling rules. Don't do it again. */
@@ -152,6 +151,7 @@ enum rq_flag_bits {
__REQ_IO_STAT, /* account I/O stat */
__REQ_MIXED_MERGE, /* merge of different types, fail separately */
__REQ_SECURE, /* secure discard (used with __REQ_DISCARD) */
+ __REQ_ON_PLUG, /* on plug list */
__REQ_NR_BITS, /* stops here */
};
@@ -171,7 +171,6 @@ enum rq_flag_bits {
REQ_NOIDLE | REQ_FLUSH | REQ_FUA)
#define REQ_CLONE_MASK REQ_COMMON_MASK
-#define REQ_UNPLUG (1 << __REQ_UNPLUG)
#define REQ_RAHEAD (1 << __REQ_RAHEAD)
#define REQ_THROTTLED (1 << __REQ_THROTTLED)
@@ -193,5 +192,6 @@ enum rq_flag_bits {
#define REQ_IO_STAT (1 << __REQ_IO_STAT)
#define REQ_MIXED_MERGE (1 << __REQ_MIXED_MERGE)
#define REQ_SECURE (1 << __REQ_SECURE)
+#define REQ_ON_PLUG (1 << __REQ_ON_PLUG)
#endif /* __LINUX_BLK_TYPES_H */
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 13b75ca62181..91fa428fa2c1 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -196,7 +196,6 @@ typedef void (request_fn_proc) (struct request_queue *q);
typedef int (make_request_fn) (struct request_queue *q, struct bio *bio);
typedef int (prep_rq_fn) (struct request_queue *, struct request *);
typedef void (unprep_rq_fn) (struct request_queue *, struct request *);
-typedef void (unplug_fn) (struct request_queue *);
struct bio_vec;
struct bvec_merge_data {
@@ -279,7 +278,6 @@ struct request_queue
make_request_fn *make_request_fn;
prep_rq_fn *prep_rq_fn;
unprep_rq_fn *unprep_rq_fn;
- unplug_fn *unplug_fn;
merge_bvec_fn *merge_bvec_fn;
softirq_done_fn *softirq_done_fn;
rq_timed_out_fn *rq_timed_out_fn;
@@ -293,12 +291,9 @@ struct request_queue
struct request *boundary_rq;
/*
- * Auto-unplugging state
+ * Delayed queue handling
*/
- struct timer_list unplug_timer;
- int unplug_thresh; /* After this many requests */
- unsigned long unplug_delay; /* After this many jiffies */
- struct work_struct unplug_work;
+ struct delayed_work delay_work;
struct backing_dev_info backing_dev_info;
@@ -394,14 +389,13 @@ struct request_queue
#define QUEUE_FLAG_ASYNCFULL 4 /* write queue has been filled */
#define QUEUE_FLAG_DEAD 5 /* queue being torn down */
#define QUEUE_FLAG_REENTER 6 /* Re-entrancy avoidance */
-#define QUEUE_FLAG_PLUGGED 7 /* queue is plugged */
-#define QUEUE_FLAG_ELVSWITCH 8 /* don't use elevator, just do FIFO */
-#define QUEUE_FLAG_BIDI 9 /* queue supports bidi requests */
-#define QUEUE_FLAG_NOMERGES 10 /* disable merge attempts */
-#define QUEUE_FLAG_SAME_COMP 11 /* force complete on same CPU */
-#define QUEUE_FLAG_FAIL_IO 12 /* fake timeout */
-#define QUEUE_FLAG_STACKABLE 13 /* supports request stacking */
-#define QUEUE_FLAG_NONROT 14 /* non-rotational device (SSD) */
+#define QUEUE_FLAG_ELVSWITCH 7 /* don't use elevator, just do FIFO */
+#define QUEUE_FLAG_BIDI 8 /* queue supports bidi requests */
+#define QUEUE_FLAG_NOMERGES 9 /* disable merge attempts */
+#define QUEUE_FLAG_SAME_COMP 10 /* force complete on same CPU */
+#define QUEUE_FLAG_FAIL_IO 11 /* fake timeout */
+#define QUEUE_FLAG_STACKABLE 12 /* supports request stacking */
+#define QUEUE_FLAG_NONROT 13 /* non-rotational device (SSD) */
#define QUEUE_FLAG_VIRT QUEUE_FLAG_NONROT /* paravirt device */
#define QUEUE_FLAG_IO_STAT 15 /* do IO stats */
#define QUEUE_FLAG_DISCARD 16 /* supports DISCARD */
@@ -479,7 +473,6 @@ static inline void queue_flag_clear(unsigned int flag, struct request_queue *q)
__clear_bit(flag, &q->queue_flags);
}
-#define blk_queue_plugged(q) test_bit(QUEUE_FLAG_PLUGGED, &(q)->queue_flags)
#define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags)
#define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags)
#define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags)
@@ -674,9 +667,7 @@ extern int blk_rq_prep_clone(struct request *rq, struct request *rq_src,
extern void blk_rq_unprep_clone(struct request *rq);
extern int blk_insert_cloned_request(struct request_queue *q,
struct request *rq);
-extern void blk_plug_device(struct request_queue *);
-extern void blk_plug_device_unlocked(struct request_queue *);
-extern int blk_remove_plug(struct request_queue *);
+extern void blk_delay_queue(struct request_queue *, unsigned long);
extern void blk_recount_segments(struct request_queue *, struct bio *);
extern int scsi_cmd_ioctl(struct request_queue *, struct gendisk *, fmode_t,
unsigned int, void __user *);
@@ -720,7 +711,6 @@ extern int blk_execute_rq(struct request_queue *, struct gendisk *,
struct request *, int);
extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *,
struct request *, int, rq_end_io_fn *);
-extern void blk_unplug(struct request_queue *q);
static inline struct request_queue *bdev_get_queue(struct block_device *bdev)
{
@@ -857,7 +847,6 @@ extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bd
extern int blk_rq_map_sg(struct request_queue *, struct request *, struct scatterlist *);
extern void blk_dump_rq_flags(struct request *, char *);
-extern void generic_unplug_device(struct request_queue *);
extern long nr_blockdev_pages(void);
int blk_get_queue(struct request_queue *);
@@ -865,6 +854,31 @@ struct request_queue *blk_alloc_queue(gfp_t);
struct request_queue *blk_alloc_queue_node(gfp_t, int);
extern void blk_put_queue(struct request_queue *);
+struct blk_plug {
+ unsigned long magic;
+ struct list_head list;
+ unsigned int should_sort;
+};
+
+extern void blk_start_plug(struct blk_plug *);
+extern void blk_finish_plug(struct blk_plug *);
+extern void __blk_flush_plug(struct task_struct *, struct blk_plug *);
+
+static inline void blk_flush_plug(struct task_struct *tsk)
+{
+ struct blk_plug *plug = tsk->plug;
+
+ if (unlikely(plug))
+ __blk_flush_plug(tsk, plug);
+}
+
+static inline bool blk_needs_flush_plug(struct task_struct *tsk)
+{
+ struct blk_plug *plug = tsk->plug;
+
+ return plug && !list_empty(&plug->list);
+}
+
/*
* tag stuff
*/
@@ -1283,6 +1297,23 @@ static inline long nr_blockdev_pages(void)
return 0;
}
+static inline void blk_start_plug(struct list_head *list)
+{
+}
+
+static inline void blk_finish_plug(struct list_head *list)
+{
+}
+
+static inline void blk_flush_plug(struct task_struct *tsk)
+{
+}
+
+static inline bool blk_needs_flush_plug(struct task_struct *tsk)
+{
+ return false;
+}
+
#endif /* CONFIG_BLOCK */
#endif
diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h
index 68d1fe7b877c..f5df23561b96 100644
--- a/include/linux/buffer_head.h
+++ b/include/linux/buffer_head.h
@@ -219,7 +219,6 @@ int generic_cont_expand_simple(struct inode *inode, loff_t size);
int block_commit_write(struct page *page, unsigned from, unsigned to);
int block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
get_block_t get_block);
-void block_sync_page(struct page *);
sector_t generic_block_bmap(struct address_space *, sector_t, get_block_t *);
int block_truncate_page(struct address_space *, loff_t, get_block_t *);
int nobh_write_begin(struct address_space *, loff_t, unsigned, unsigned,
diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h
index 272496d1fae4..e2768834f397 100644
--- a/include/linux/device-mapper.h
+++ b/include/linux/device-mapper.h
@@ -286,11 +286,6 @@ void dm_table_add_target_callbacks(struct dm_table *t, struct dm_target_callback
int dm_table_complete(struct dm_table *t);
/*
- * Unplug all devices in a table.
- */
-void dm_table_unplug_all(struct dm_table *t);
-
-/*
* Table reference counting.
*/
struct dm_table *dm_get_live_table(struct mapped_device *md);
diff --git a/include/linux/elevator.h b/include/linux/elevator.h
index 39b68edb388d..ec6f72b84477 100644
--- a/include/linux/elevator.h
+++ b/include/linux/elevator.h
@@ -20,7 +20,6 @@ typedef void (elevator_bio_merged_fn) (struct request_queue *,
typedef int (elevator_dispatch_fn) (struct request_queue *, int);
typedef void (elevator_add_req_fn) (struct request_queue *, struct request *);
-typedef int (elevator_queue_empty_fn) (struct request_queue *);
typedef struct request *(elevator_request_list_fn) (struct request_queue *, struct request *);
typedef void (elevator_completed_req_fn) (struct request_queue *, struct request *);
typedef int (elevator_may_queue_fn) (struct request_queue *, int);
@@ -46,7 +45,6 @@ struct elevator_ops
elevator_activate_req_fn *elevator_activate_req_fn;
elevator_deactivate_req_fn *elevator_deactivate_req_fn;
- elevator_queue_empty_fn *elevator_queue_empty_fn;
elevator_completed_req_fn *elevator_completed_req_fn;
elevator_request_list_fn *elevator_former_req_fn;
@@ -101,17 +99,17 @@ struct elevator_queue
*/
extern void elv_dispatch_sort(struct request_queue *, struct request *);
extern void elv_dispatch_add_tail(struct request_queue *, struct request *);
-extern void elv_add_request(struct request_queue *, struct request *, int, int);
-extern void __elv_add_request(struct request_queue *, struct request *, int, int);
+extern void elv_add_request(struct request_queue *, struct request *, int);
+extern void __elv_add_request(struct request_queue *, struct request *, int);
extern void elv_insert(struct request_queue *, struct request *, int);
extern int elv_merge(struct request_queue *, struct request **, struct bio *);
+extern int elv_try_merge(struct request *, struct bio *);
extern void elv_merge_requests(struct request_queue *, struct request *,
struct request *);
extern void elv_merged_request(struct request_queue *, struct request *, int);
extern void elv_bio_merged(struct request_queue *q, struct request *,
struct bio *);
extern void elv_requeue_request(struct request_queue *, struct request *);
-extern int elv_queue_empty(struct request_queue *);
extern struct request *elv_former_request(struct request_queue *, struct request *);
extern struct request *elv_latter_request(struct request_queue *, struct request *);
extern int elv_register_queue(struct request_queue *q);
diff --git a/include/linux/fs.h b/include/linux/fs.h
index e38b50a4b9d2..b95199daa5eb 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -135,16 +135,10 @@ struct inodes_stat_t {
* block layer could (in theory) choose to ignore this
* request if it runs into resource problems.
* WRITE A normal async write. Device will be plugged.
- * WRITE_SYNC_PLUG Synchronous write. Identical to WRITE, but passes down
+ * WRITE_SYNC Synchronous write. Identical to WRITE, but passes down
* the hint that someone will be waiting on this IO
- * shortly. The device must still be unplugged explicitly,
- * WRITE_SYNC_PLUG does not do this as we could be
- * submitting more writes before we actually wait on any
- * of them.
- * WRITE_SYNC Like WRITE_SYNC_PLUG, but also unplugs the device
- * immediately after submission. The write equivalent
- * of READ_SYNC.
- * WRITE_ODIRECT_PLUG Special case write for O_DIRECT only.
+ * shortly. The write equivalent of READ_SYNC.
+ * WRITE_ODIRECT Special case write for O_DIRECT only.
* WRITE_FLUSH Like WRITE_SYNC but with preceding cache flush.
* WRITE_FUA Like WRITE_SYNC but data is guaranteed to be on
* non-volatile media on completion.
@@ -160,18 +154,14 @@ struct inodes_stat_t {
#define WRITE RW_MASK
#define READA RWA_MASK
-#define READ_SYNC (READ | REQ_SYNC | REQ_UNPLUG)
+#define READ_SYNC (READ | REQ_SYNC)
#define READ_META (READ | REQ_META)
-#define WRITE_SYNC_PLUG (WRITE | REQ_SYNC | REQ_NOIDLE)
-#define WRITE_SYNC (WRITE | REQ_SYNC | REQ_NOIDLE | REQ_UNPLUG)
-#define WRITE_ODIRECT_PLUG (WRITE | REQ_SYNC)
+#define WRITE_SYNC (WRITE | REQ_SYNC | REQ_NOIDLE)
+#define WRITE_ODIRECT (WRITE | REQ_SYNC)
#define WRITE_META (WRITE | REQ_META)
-#define WRITE_FLUSH (WRITE | REQ_SYNC | REQ_NOIDLE | REQ_UNPLUG | \
- REQ_FLUSH)
-#define WRITE_FUA (WRITE | REQ_SYNC | REQ_NOIDLE | REQ_UNPLUG | \
- REQ_FUA)
-#define WRITE_FLUSH_FUA (WRITE | REQ_SYNC | REQ_NOIDLE | REQ_UNPLUG | \
- REQ_FLUSH | REQ_FUA)
+#define WRITE_FLUSH (WRITE | REQ_SYNC | REQ_NOIDLE | REQ_FLUSH)
+#define WRITE_FUA (WRITE | REQ_SYNC | REQ_NOIDLE | REQ_FUA)
+#define WRITE_FLUSH_FUA (WRITE | REQ_SYNC | REQ_NOIDLE | REQ_FLUSH | REQ_FUA)
#define SEL_IN 1
#define SEL_OUT 2
@@ -583,7 +573,6 @@ typedef int (*read_actor_t)(read_descriptor_t *, struct page *,
struct address_space_operations {
int (*writepage)(struct page *page, struct writeback_control *wbc);
int (*readpage)(struct file *, struct page *);
- void (*sync_page)(struct page *);
/* Write back some dirty pages from this mapping. */
int (*writepages)(struct address_space *, struct writeback_control *);
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index 9c66e994540f..e112b8db2f3c 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -298,7 +298,6 @@ static inline pgoff_t linear_page_index(struct vm_area_struct *vma,
extern void __lock_page(struct page *page);
extern int __lock_page_killable(struct page *page);
-extern void __lock_page_nosync(struct page *page);
extern int __lock_page_or_retry(struct page *page, struct mm_struct *mm,
unsigned int flags);
extern void unlock_page(struct page *page);
@@ -342,17 +341,6 @@ static inline int lock_page_killable(struct page *page)
}
/*
- * lock_page_nosync should only be used if we can't pin the page's inode.
- * Doesn't play quite so well with block device plugging.
- */
-static inline void lock_page_nosync(struct page *page)
-{
- might_sleep();
- if (!trylock_page(page))
- __lock_page_nosync(page);
-}
-
-/*
* lock_page_or_retry - Lock the page, unless this would block and the
* caller indicated that it can handle a retry.
*/
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 777d8a5ed06b..96ac22643742 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -99,6 +99,7 @@ struct robust_list_head;
struct bio_list;
struct fs_struct;
struct perf_event_context;
+struct blk_plug;
/*
* List of flags we want to share for kernel threads,
@@ -1429,6 +1430,11 @@ struct task_struct {
/* stacked block device info */
struct bio_list *bio_list;
+#ifdef CONFIG_BLOCK
+/* stack plugging */
+ struct blk_plug *plug;
+#endif
+
/* VM state */
struct reclaim_state *reclaim_state;
diff --git a/include/linux/swap.h b/include/linux/swap.h
index 4d559325d919..9ee321833b21 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -299,8 +299,6 @@ extern void mem_cgroup_get_shmem_target(struct inode *inode, pgoff_t pgoff,
struct page **pagep, swp_entry_t *ent);
#endif
-extern void swap_unplug_io_fn(struct backing_dev_info *, struct page *);
-
#ifdef CONFIG_SWAP
/* linux/mm/page_io.c */
extern int swap_readpage(struct page *);
diff --git a/kernel/exit.c b/kernel/exit.c
index f9a45ebcc7b1..6a488ad2dce5 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -908,6 +908,7 @@ NORET_TYPE void do_exit(long code)
profile_task_exit(tsk);
WARN_ON(atomic_read(&tsk->fs_excl));
+ WARN_ON(blk_needs_flush_plug(tsk));
if (unlikely(in_interrupt()))
panic("Aiee, killing interrupt handler!");
diff --git a/kernel/fork.c b/kernel/fork.c
index 25e429152ddc..027c80e5162f 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -1204,6 +1204,9 @@ static struct task_struct *copy_process(unsigned long clone_flags,
* Clear TID on mm_release()?
*/
p->clear_child_tid = (clone_flags & CLONE_CHILD_CLEARTID) ? child_tidptr: NULL;
+#ifdef CONFIG_BLOCK
+ p->plug = NULL;
+#endif
#ifdef CONFIG_FUTEX
p->robust_list = NULL;
#ifdef CONFIG_COMPAT
diff --git a/kernel/power/block_io.c b/kernel/power/block_io.c
index 83bbc7c02df9..d09dd10c5a5e 100644
--- a/kernel/power/block_io.c
+++ b/kernel/power/block_io.c
@@ -28,7 +28,7 @@
static int submit(int rw, struct block_device *bdev, sector_t sector,
struct page *page, struct bio **bio_chain)
{
- const int bio_rw = rw | REQ_SYNC | REQ_UNPLUG;
+ const int bio_rw = rw | REQ_SYNC;
struct bio *bio;
bio = bio_alloc(__GFP_WAIT | __GFP_HIGH, 1);
diff --git a/kernel/sched.c b/kernel/sched.c
index 18d38e4ec7ba..ca098bf4cc65 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -3978,6 +3978,16 @@ need_resched_nonpreemptible:
switch_count = &prev->nvcsw;
}
+ /*
+ * If we are going to sleep and we have plugged IO queued, make
+ * sure to submit it to avoid deadlocks.
+ */
+ if (prev->state != TASK_RUNNING && blk_needs_flush_plug(prev)) {
+ raw_spin_unlock(&rq->lock);
+ blk_flush_plug(prev);
+ raw_spin_lock(&rq->lock);
+ }
+
pre_schedule(rq, prev);
if (unlikely(!rq->nr_running))
@@ -5333,6 +5343,7 @@ void __sched io_schedule(void)
delayacct_blkio_start();
atomic_inc(&rq->nr_iowait);
+ blk_flush_plug(current);
current->in_iowait = 1;
schedule();
current->in_iowait = 0;
@@ -5348,6 +5359,7 @@ long __sched io_schedule_timeout(long timeout)
delayacct_blkio_start();
atomic_inc(&rq->nr_iowait);
+ blk_flush_plug(current);
current->in_iowait = 1;
ret = schedule_timeout(timeout);
current->in_iowait = 0;
diff --git a/mm/backing-dev.c b/mm/backing-dev.c
index 027100d30227..c91e139a652e 100644
--- a/mm/backing-dev.c
+++ b/mm/backing-dev.c
@@ -14,17 +14,11 @@
static atomic_long_t bdi_seq = ATOMIC_LONG_INIT(0);
-void default_unplug_io_fn(struct backing_dev_info *bdi, struct page *page)
-{
-}
-EXPORT_SYMBOL(default_unplug_io_fn);
-
struct backing_dev_info default_backing_dev_info = {
.name = "default",
.ra_pages = VM_MAX_READAHEAD * 1024 / PAGE_CACHE_SIZE,
.state = 0,
.capabilities = BDI_CAP_MAP_COPY,
- .unplug_io_fn = default_unplug_io_fn,
};
EXPORT_SYMBOL_GPL(default_backing_dev_info);
diff --git a/mm/filemap.c b/mm/filemap.c
index 83a45d35468b..f9a29c87a2cf 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -155,45 +155,15 @@ void remove_from_page_cache(struct page *page)
}
EXPORT_SYMBOL(remove_from_page_cache);
-static int sync_page(void *word)
+static int sleep_on_page(void *word)
{
- struct address_space *mapping;
- struct page *page;
-
- page = container_of((unsigned long *)word, struct page, flags);
-
- /*
- * page_mapping() is being called without PG_locked held.
- * Some knowledge of the state and use of the page is used to
- * reduce the requirements down to a memory barrier.
- * The danger here is of a stale page_mapping() return value
- * indicating a struct address_space different from the one it's
- * associated with when it is associated with one.
- * After smp_mb(), it's either the correct page_mapping() for
- * the page, or an old page_mapping() and the page's own
- * page_mapping() has gone NULL.
- * The ->sync_page() address_space operation must tolerate
- * page_mapping() going NULL. By an amazing coincidence,
- * this comes about because none of the users of the page
- * in the ->sync_page() methods make essential use of the
- * page_mapping(), merely passing the page down to the backing
- * device's unplug functions when it's non-NULL, which in turn
- * ignore it for all cases but swap, where only page_private(page) is
- * of interest. When page_mapping() does go NULL, the entire
- * call stack gracefully ignores the page and returns.
- * -- wli
- */
- smp_mb();
- mapping = page_mapping(page);
- if (mapping && mapping->a_ops && mapping->a_ops->sync_page)
- mapping->a_ops->sync_page(page);
io_schedule();
return 0;
}
-static int sync_page_killable(void *word)
+static int sleep_on_page_killable(void *word)
{
- sync_page(word);
+ sleep_on_page(word);
return fatal_signal_pending(current) ? -EINTR : 0;
}
@@ -479,12 +449,6 @@ struct page *__page_cache_alloc(gfp_t gfp)
EXPORT_SYMBOL(__page_cache_alloc);
#endif
-static int __sleep_on_page_lock(void *word)
-{
- io_schedule();
- return 0;
-}
-
/*
* In order to wait for pages to become available there must be
* waitqueues associated with pages. By using a hash table of
@@ -512,7 +476,7 @@ void wait_on_page_bit(struct page *page, int bit_nr)
DEFINE_WAIT_BIT(wait, &page->flags, bit_nr);
if (test_bit(bit_nr, &page->flags))
- __wait_on_bit(page_waitqueue(page), &wait, sync_page,
+ __wait_on_bit(page_waitqueue(page), &wait, sleep_on_page,
TASK_UNINTERRUPTIBLE);
}
EXPORT_SYMBOL(wait_on_page_bit);
@@ -576,17 +540,12 @@ EXPORT_SYMBOL(end_page_writeback);
/**
* __lock_page - get a lock on the page, assuming we need to sleep to get it
* @page: the page to lock
- *
- * Ugly. Running sync_page() in state TASK_UNINTERRUPTIBLE is scary. If some
- * random driver's requestfn sets TASK_RUNNING, we could busywait. However
- * chances are that on the second loop, the block layer's plug list is empty,
- * so sync_page() will then return in state TASK_UNINTERRUPTIBLE.
*/
void __lock_page(struct page *page)
{
DEFINE_WAIT_BIT(wait, &page->flags, PG_locked);
- __wait_on_bit_lock(page_waitqueue(page), &wait, sync_page,
+ __wait_on_bit_lock(page_waitqueue(page), &wait, sleep_on_page,
TASK_UNINTERRUPTIBLE);
}
EXPORT_SYMBOL(__lock_page);
@@ -596,24 +555,10 @@ int __lock_page_killable(struct page *page)
DEFINE_WAIT_BIT(wait, &page->flags, PG_locked);
return __wait_on_bit_lock(page_waitqueue(page), &wait,
- sync_page_killable, TASK_KILLABLE);
+ sleep_on_page_killable, TASK_KILLABLE);
}
EXPORT_SYMBOL_GPL(__lock_page_killable);
-/**
- * __lock_page_nosync - get a lock on the page, without calling sync_page()
- * @page: the page to lock
- *
- * Variant of lock_page that does not require the caller to hold a reference
- * on the page's mapping.
- */
-void __lock_page_nosync(struct page *page)
-{
- DEFINE_WAIT_BIT(wait, &page->flags, PG_locked);
- __wait_on_bit_lock(page_waitqueue(page), &wait, __sleep_on_page_lock,
- TASK_UNINTERRUPTIBLE);
-}
-
int __lock_page_or_retry(struct page *page, struct mm_struct *mm,
unsigned int flags)
{
@@ -1298,12 +1243,15 @@ generic_file_aio_read(struct kiocb *iocb, const struct iovec *iov,
unsigned long seg = 0;
size_t count;
loff_t *ppos = &iocb->ki_pos;
+ struct blk_plug plug;
count = 0;
retval = generic_segment_checks(iov, &nr_segs, &count, VERIFY_WRITE);
if (retval)
return retval;
+ blk_start_plug(&plug);
+
/* coalesce the iovecs and go direct-to-BIO for O_DIRECT */
if (filp->f_flags & O_DIRECT) {
loff_t size;
@@ -1376,6 +1324,7 @@ generic_file_aio_read(struct kiocb *iocb, const struct iovec *iov,
break;
}
out:
+ blk_finish_plug(&plug);
return retval;
}
EXPORT_SYMBOL(generic_file_aio_read);
@@ -2487,11 +2436,13 @@ ssize_t generic_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
{
struct file *file = iocb->ki_filp;
struct inode *inode = file->f_mapping->host;
+ struct blk_plug plug;
ssize_t ret;
BUG_ON(iocb->ki_pos != pos);
mutex_lock(&inode->i_mutex);
+ blk_start_plug(&plug);
ret = __generic_file_aio_write(iocb, iov, nr_segs, &iocb->ki_pos);
mutex_unlock(&inode->i_mutex);
@@ -2502,6 +2453,7 @@ ssize_t generic_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
if (err < 0 && ret > 0)
ret = err;
}
+ blk_finish_plug(&plug);
return ret;
}
EXPORT_SYMBOL(generic_file_aio_write);
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index 0207c2f6f8bd..bfba796d374d 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -945,7 +945,7 @@ static int hwpoison_user_mappings(struct page *p, unsigned long pfn,
collect_procs(ppage, &tokill);
if (hpage != ppage)
- lock_page_nosync(ppage);
+ lock_page(ppage);
ret = try_to_unmap(ppage, ttu);
if (ret != SWAP_SUCCESS)
@@ -1038,7 +1038,7 @@ int __memory_failure(unsigned long pfn, int trapno, int flags)
* Check "just unpoisoned", "filter hit", and
* "race with other subpage."
*/
- lock_page_nosync(hpage);
+ lock_page(hpage);
if (!PageHWPoison(hpage)
|| (hwpoison_filter(p) && TestClearPageHWPoison(p))
|| (p != hpage && TestSetPageHWPoison(hpage))) {
@@ -1088,7 +1088,7 @@ int __memory_failure(unsigned long pfn, int trapno, int flags)
* It's very difficult to mess with pages currently under IO
* and in many cases impossible, so we just avoid it here.
*/
- lock_page_nosync(hpage);
+ lock_page(hpage);
/*
* unpoison always clear PG_hwpoison inside page lock
@@ -1231,7 +1231,7 @@ int unpoison_memory(unsigned long pfn)
return 0;
}
- lock_page_nosync(page);
+ lock_page(page);
/*
* This test is racy because PG_hwpoison is set outside of page lock.
* That's acceptable because that won't trigger kernel panic. Instead,
diff --git a/mm/nommu.c b/mm/nommu.c
index f59e1424d3db..fb6cbd6abe16 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -1842,10 +1842,6 @@ int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
}
EXPORT_SYMBOL(remap_vmalloc_range);
-void swap_unplug_io_fn(struct backing_dev_info *bdi, struct page *page)
-{
-}
-
unsigned long arch_get_unmapped_area(struct file *file, unsigned long addr,
unsigned long len, unsigned long pgoff, unsigned long flags)
{
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 2cb01f6ec5d0..cc0ede169e41 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -1239,7 +1239,7 @@ int set_page_dirty_lock(struct page *page)
{
int ret;
- lock_page_nosync(page);
+ lock_page(page);
ret = set_page_dirty(page);
unlock_page(page);
return ret;
diff --git a/mm/page_io.c b/mm/page_io.c
index 2dee975bf469..dc76b4d0611e 100644
--- a/mm/page_io.c
+++ b/mm/page_io.c
@@ -106,7 +106,7 @@ int swap_writepage(struct page *page, struct writeback_control *wbc)
goto out;
}
if (wbc->sync_mode == WB_SYNC_ALL)
- rw |= REQ_SYNC | REQ_UNPLUG;
+ rw |= REQ_SYNC;
count_vm_event(PSWPOUT);
set_page_writeback(page);
unlock_page(page);
diff --git a/mm/readahead.c b/mm/readahead.c
index 77506a291a2d..2c0cc489e288 100644
--- a/mm/readahead.c
+++ b/mm/readahead.c
@@ -109,9 +109,12 @@ EXPORT_SYMBOL(read_cache_pages);
static int read_pages(struct address_space *mapping, struct file *filp,
struct list_head *pages, unsigned nr_pages)
{
+ struct blk_plug plug;
unsigned page_idx;
int ret;
+ blk_start_plug(&plug);
+
if (mapping->a_ops->readpages) {
ret = mapping->a_ops->readpages(filp, mapping, pages, nr_pages);
/* Clean up the remaining pages */
@@ -129,7 +132,10 @@ static int read_pages(struct address_space *mapping, struct file *filp,
page_cache_release(page);
}
ret = 0;
+
out:
+ blk_finish_plug(&plug);
+
return ret;
}
@@ -554,17 +560,5 @@ page_cache_async_readahead(struct address_space *mapping,
/* do read-ahead */
ondemand_readahead(mapping, ra, filp, true, offset, req_size);
-
-#ifdef CONFIG_BLOCK
- /*
- * Normally the current page is !uptodate and lock_page() will be
- * immediately called to implicitly unplug the device. However this
- * is not always true for RAID conifgurations, where data arrives
- * not strictly in their submission order. In this case we need to
- * explicitly kick off the IO.
- */
- if (PageUptodate(page))
- blk_run_backing_dev(mapping->backing_dev_info, NULL);
-#endif
}
EXPORT_SYMBOL_GPL(page_cache_async_readahead);
diff --git a/mm/shmem.c b/mm/shmem.c
index 5ee67c990602..24d23f5bedf1 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -224,7 +224,6 @@ static const struct vm_operations_struct shmem_vm_ops;
static struct backing_dev_info shmem_backing_dev_info __read_mostly = {
.ra_pages = 0, /* No readahead */
.capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK | BDI_CAP_SWAP_BACKED,
- .unplug_io_fn = default_unplug_io_fn,
};
static LIST_HEAD(shmem_swaplist);
diff --git a/mm/swap_state.c b/mm/swap_state.c
index 5c8cfabbc9bc..46680461785b 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -24,12 +24,10 @@
/*
* swapper_space is a fiction, retained to simplify the path through
- * vmscan's shrink_page_list, to make sync_page look nicer, and to allow
- * future use of radix_tree tags in the swap cache.
+ * vmscan's shrink_page_list.
*/
static const struct address_space_operations swap_aops = {
.writepage = swap_writepage,
- .sync_page = block_sync_page,
.set_page_dirty = __set_page_dirty_nobuffers,
.migratepage = migrate_page,
};
@@ -37,7 +35,6 @@ static const struct address_space_operations swap_aops = {
static struct backing_dev_info swap_backing_dev_info = {
.name = "swap",
.capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK | BDI_CAP_SWAP_BACKED,
- .unplug_io_fn = swap_unplug_io_fn,
};
struct address_space swapper_space = {
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 0341c5700e34..64d627ab624c 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -95,39 +95,6 @@ __try_to_reclaim_swap(struct swap_info_struct *si, unsigned long offset)
}
/*
- * We need this because the bdev->unplug_fn can sleep and we cannot
- * hold swap_lock while calling the unplug_fn. And swap_lock
- * cannot be turned into a mutex.
- */
-static DECLARE_RWSEM(swap_unplug_sem);
-
-void swap_unplug_io_fn(struct backing_dev_info *unused_bdi, struct page *page)
-{
- swp_entry_t entry;
-
- down_read(&swap_unplug_sem);
- entry.val = page_private(page);
- if (PageSwapCache(page)) {
- struct block_device *bdev = swap_info[swp_type(entry)]->bdev;
- struct backing_dev_info *bdi;
-
- /*
- * If the page is removed from swapcache from under us (with a
- * racy try_to_unuse/swapoff) we need an additional reference
- * count to avoid reading garbage from page_private(page) above.
- * If the WARN_ON triggers during a swapoff it maybe the race
- * condition and it's harmless. However if it triggers without
- * swapoff it signals a problem.
- */
- WARN_ON(page_count(page) <= 1);
-
- bdi = bdev->bd_inode->i_mapping->backing_dev_info;
- blk_run_backing_dev(bdi, page);
- }
- up_read(&swap_unplug_sem);
-}
-
-/*
* swapon tell device that all the old swap contents can be discarded,
* to allow the swap device to optimize its wear-levelling.
*/
@@ -1643,10 +1610,6 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
goto out_dput;
}
- /* wait for any unplug function to finish */
- down_write(&swap_unplug_sem);
- up_write(&swap_unplug_sem);
-
destroy_swap_extents(p);
if (p->flags & SWP_CONTINUED)
free_swap_count_continuations(p);
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 6771ea70bfe7..951cac21c2e7 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -358,7 +358,7 @@ static int may_write_to_queue(struct backing_dev_info *bdi,
static void handle_write_error(struct address_space *mapping,
struct page *page, int error)
{
- lock_page_nosync(page);
+ lock_page(page);
if (page_mapping(page) == mapping)
mapping_set_error(mapping, error);
unlock_page(page);