summaryrefslogtreecommitdiff
path: root/drivers/md/dm-mpath.c
diff options
context:
space:
mode:
authorHannes Reinecke <hare@suse.de>2013-10-01 13:49:56 +0400
committerMike Snitzer <snitzer@redhat.com>2013-11-05 20:20:34 +0400
commitb63349a7a53d34ffde70cb4feec48ea9e6f5e97b (patch)
treed0e3b95ed6e2d8dbb219fce7fa8008351b8c1b8b /drivers/md/dm-mpath.c
parent954a73d5d3073df2231820c718fdd2f18b0fe4c9 (diff)
downloadlinux-b63349a7a53d34ffde70cb4feec48ea9e6f5e97b.tar.xz
dm mpath: requeue I/O during pg_init
When pg_init is running no I/O can be submitted to the underlying devices, as the path priority etc might change. When using queue_io for this, requests will be piling up within multipath as the block I/O scheduler just sees a _very fast_ device. All of this queued I/O has to be resubmitted from within multipathing once pg_init is done. This approach has the problem that it's virtually impossible to abort I/O when pg_init is running, and we're adding heavy load to the devices after pg_init since all of the queued I/O needs to be resubmitted _before_ any requests can be pulled off of the request queue and normal operation continues. This patch will requeue the I/O that triggers the pg_init call, and return 'busy' when pg_init is in progress. With these changes the block I/O scheduler will stop submitting I/O during pg_init, resulting in a quicker path switch and less I/O pressure (and memory consumption) after pg_init. Signed-off-by: Hannes Reinecke <hare@suse.de> [patch header edited for clarity and typos by Mike Snitzer] Signed-off-by: Mike Snitzer <snitzer@redhat.com>
Diffstat (limited to 'drivers/md/dm-mpath.c')
-rw-r--r--drivers/md/dm-mpath.c16
1 files changed, 12 insertions, 4 deletions
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index 799e479db93b..6eb9dc9ef8f3 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -391,13 +391,16 @@ static int map_io(struct multipath *m, struct request *clone,
if (was_queued)
m->queue_size--;
- if ((pgpath && m->queue_io) ||
- (!pgpath && m->queue_if_no_path)) {
+ if (m->pg_init_required) {
+ if (!m->pg_init_in_progress)
+ queue_work(kmultipathd, &m->process_queued_ios);
+ r = DM_MAPIO_REQUEUE;
+ } else if ((pgpath && m->queue_io) ||
+ (!pgpath && m->queue_if_no_path)) {
/* Queue for the daemon to resubmit */
list_add_tail(&clone->queuelist, &m->queued_ios);
m->queue_size++;
- if ((m->pg_init_required && !m->pg_init_in_progress) ||
- !m->queue_io)
+ if (!m->queue_io)
queue_work(kmultipathd, &m->process_queued_ios);
pgpath = NULL;
r = DM_MAPIO_SUBMITTED;
@@ -1677,6 +1680,11 @@ static int multipath_busy(struct dm_target *ti)
spin_lock_irqsave(&m->lock, flags);
+ /* pg_init in progress, requeue until done */
+ if (m->pg_init_in_progress) {
+ busy = 1;
+ goto out;
+ }
/* Guess which priority_group will be used at next mapping time */
if (unlikely(!m->current_pgpath && m->next_pg))
pg = m->next_pg;