summaryrefslogtreecommitdiff
path: root/drivers/md/dm-queue-length.c
diff options
context:
space:
mode:
authorMike Snitzer <snitzer@redhat.com>2016-02-15 22:25:00 +0300
committerMike Snitzer <snitzer@redhat.com>2016-02-23 06:34:41 +0300
commit9659f811446a67951127d4e81576204ffce9c86e (patch)
tree115ce6ad350c9e431c0995130b57a67e43150f24 /drivers/md/dm-queue-length.c
parent21136f89d76dc10d31d294f2bb79c585a039bad6 (diff)
downloadlinux-9659f811446a67951127d4e81576204ffce9c86e.tar.xz
dm mpath: push path selector locking down to path selectors
Proper locking of the lists used by the path selectors should be handled within the selectors (relying on dm-mpath.c code's use of the m->lock spinlock was reckless). Signed-off-by: Mike Snitzer <snitzer@redhat.com>
Diffstat (limited to 'drivers/md/dm-queue-length.c')
-rw-r--r--drivers/md/dm-queue-length.c23
1 files changed, 20 insertions, 3 deletions
diff --git a/drivers/md/dm-queue-length.c b/drivers/md/dm-queue-length.c
index 51d82290ad4c..aeea3b7f3d5e 100644
--- a/drivers/md/dm-queue-length.c
+++ b/drivers/md/dm-queue-length.c
@@ -29,6 +29,7 @@
struct selector {
struct list_head valid_paths;
struct list_head failed_paths;
+ spinlock_t lock;
};
struct path_info {
@@ -45,6 +46,7 @@ static struct selector *alloc_selector(void)
if (s) {
INIT_LIST_HEAD(&s->valid_paths);
INIT_LIST_HEAD(&s->failed_paths);
+ spin_lock_init(&s->lock);
}
return s;
@@ -113,6 +115,7 @@ static int ql_add_path(struct path_selector *ps, struct dm_path *path,
struct path_info *pi;
unsigned repeat_count = QL_MIN_IO;
char dummy;
+ unsigned long flags;
/*
* Arguments: [<repeat_count>]
@@ -147,7 +150,9 @@ static int ql_add_path(struct path_selector *ps, struct dm_path *path,
path->pscontext = pi;
+ spin_lock_irqsave(&s->lock, flags);
list_add_tail(&pi->list, &s->valid_paths);
+ spin_unlock_irqrestore(&s->lock, flags);
return 0;
}
@@ -156,16 +161,22 @@ static void ql_fail_path(struct path_selector *ps, struct dm_path *path)
{
struct selector *s = ps->context;
struct path_info *pi = path->pscontext;
+ unsigned long flags;
+ spin_lock_irqsave(&s->lock, flags);
list_move(&pi->list, &s->failed_paths);
+ spin_unlock_irqrestore(&s->lock, flags);
}
static int ql_reinstate_path(struct path_selector *ps, struct dm_path *path)
{
struct selector *s = ps->context;
struct path_info *pi = path->pscontext;
+ unsigned long flags;
+ spin_lock_irqsave(&s->lock, flags);
list_move_tail(&pi->list, &s->valid_paths);
+ spin_unlock_irqrestore(&s->lock, flags);
return 0;
}
@@ -178,9 +189,12 @@ static struct dm_path *ql_select_path(struct path_selector *ps,
{
struct selector *s = ps->context;
struct path_info *pi = NULL, *best = NULL;
+ struct dm_path *ret = NULL;
+ unsigned long flags;
+ spin_lock_irqsave(&s->lock, flags);
if (list_empty(&s->valid_paths))
- return NULL;
+ goto out;
/* Change preferred (first in list) path to evenly balance. */
list_move_tail(s->valid_paths.next, &s->valid_paths);
@@ -195,11 +209,14 @@ static struct dm_path *ql_select_path(struct path_selector *ps,
}
if (!best)
- return NULL;
+ goto out;
*repeat_count = best->repeat_count;
- return best->path;
+ ret = best->path;
+out:
+ spin_unlock_irqrestore(&s->lock, flags);
+ return ret;
}
static int ql_start_io(struct path_selector *ps, struct dm_path *path,