summaryrefslogtreecommitdiff
path: root/block
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2016-10-10 03:29:33 +0300
committerLinus Torvalds <torvalds@linux-foundation.org>2016-10-10 03:29:33 +0300
commit12e3d3cdd975fe986cc5c35f60b1467a8ec20b80 (patch)
tree14ec935d2e15f454ba69353fcf5329ac67f72e4f /block
parent48915c2cbc77eceec2005afb695ac658fede4e0d (diff)
parent8ec2ef2b66ea2fd00acc28aca8edaad441dbb424 (diff)
downloadlinux-12e3d3cdd975fe986cc5c35f60b1467a8ec20b80.tar.xz
Merge branch 'for-4.9/block-irq' of git://git.kernel.dk/linux-block
Pull blk-mq irq/cpu mapping updates from Jens Axboe: "This is the block-irq topic branch for 4.9-rc. It's mostly from Christoph, and it allows drivers to specify their own mappings, and more importantly, to share the blk-mq mappings with the IRQ affinity mappings. It's a good step towards making this work better out of the box" * 'for-4.9/block-irq' of git://git.kernel.dk/linux-block: blk_mq: linux/blk-mq.h does not include all the headers it depends on blk-mq: kill unused blk_mq_create_mq_map() blk-mq: get rid of the cpumask in struct blk_mq_tags nvme: remove the post_scan callout nvme: switch to use pci_alloc_irq_vectors blk-mq: provide a default queue mapping for PCI device blk-mq: allow the driver to pass in a queue mapping blk-mq: remove ->map_queue blk-mq: only allocate a single mq_map per tag_set blk-mq: don't redistribute hardware queues on a CPU hotplug event
Diffstat (limited to 'block')
-rw-r--r--block/Kconfig5
-rw-r--r--block/Makefile2
-rw-r--r--block/blk-flush.c6
-rw-r--r--block/blk-mq-cpumap.c25
-rw-r--r--block/blk-mq-pci.c47
-rw-r--r--block/blk-mq-tag.c11
-rw-r--r--block/blk-mq-tag.h2
-rw-r--r--block/blk-mq.c83
-rw-r--r--block/blk-mq.h10
-rw-r--r--block/blk.h11
10 files changed, 110 insertions, 92 deletions
diff --git a/block/Kconfig b/block/Kconfig
index 5136ad4bb6d5..1d4d624492fc 100644
--- a/block/Kconfig
+++ b/block/Kconfig
@@ -125,4 +125,9 @@ config BLOCK_COMPAT
depends on BLOCK && COMPAT
default y
+config BLK_MQ_PCI
+ bool
+ depends on BLOCK && PCI
+ default y
+
source block/Kconfig.iosched
diff --git a/block/Makefile b/block/Makefile
index 9eda2322b2d4..37a0d93f97bb 100644
--- a/block/Makefile
+++ b/block/Makefile
@@ -22,4 +22,4 @@ obj-$(CONFIG_IOSCHED_CFQ) += cfq-iosched.o
obj-$(CONFIG_BLOCK_COMPAT) += compat_ioctl.o
obj-$(CONFIG_BLK_CMDLINE_PARSER) += cmdline-parser.o
obj-$(CONFIG_BLK_DEV_INTEGRITY) += bio-integrity.o blk-integrity.o t10-pi.o
-
+obj-$(CONFIG_BLK_MQ_PCI) += blk-mq-pci.o
diff --git a/block/blk-flush.c b/block/blk-flush.c
index d308def812db..6a14b68b9135 100644
--- a/block/blk-flush.c
+++ b/block/blk-flush.c
@@ -232,7 +232,7 @@ static void flush_end_io(struct request *flush_rq, int error)
/* release the tag's ownership to the req cloned from */
spin_lock_irqsave(&fq->mq_flush_lock, flags);
- hctx = q->mq_ops->map_queue(q, flush_rq->mq_ctx->cpu);
+ hctx = blk_mq_map_queue(q, flush_rq->mq_ctx->cpu);
blk_mq_tag_set_rq(hctx, flush_rq->tag, fq->orig_rq);
flush_rq->tag = -1;
}
@@ -325,7 +325,7 @@ static bool blk_kick_flush(struct request_queue *q, struct blk_flush_queue *fq)
flush_rq->tag = first_rq->tag;
fq->orig_rq = first_rq;
- hctx = q->mq_ops->map_queue(q, first_rq->mq_ctx->cpu);
+ hctx = blk_mq_map_queue(q, first_rq->mq_ctx->cpu);
blk_mq_tag_set_rq(hctx, first_rq->tag, flush_rq);
}
@@ -358,7 +358,7 @@ static void mq_flush_data_end_io(struct request *rq, int error)
unsigned long flags;
struct blk_flush_queue *fq = blk_get_flush_queue(q, ctx);
- hctx = q->mq_ops->map_queue(q, ctx->cpu);
+ hctx = blk_mq_map_queue(q, ctx->cpu);
/*
* After populating an empty queue, kick it to avoid stall. Read
diff --git a/block/blk-mq-cpumap.c b/block/blk-mq-cpumap.c
index d0634bcf322f..19b1d9c5f07e 100644
--- a/block/blk-mq-cpumap.c
+++ b/block/blk-mq-cpumap.c
@@ -31,14 +31,16 @@ static int get_first_sibling(unsigned int cpu)
return cpu;
}
-int blk_mq_update_queue_map(unsigned int *map, unsigned int nr_queues,
- const struct cpumask *online_mask)
+int blk_mq_map_queues(struct blk_mq_tag_set *set)
{
+ unsigned int *map = set->mq_map;
+ unsigned int nr_queues = set->nr_hw_queues;
+ const struct cpumask *online_mask = cpu_online_mask;
unsigned int i, nr_cpus, nr_uniq_cpus, queue, first_sibling;
cpumask_var_t cpus;
if (!alloc_cpumask_var(&cpus, GFP_ATOMIC))
- return 1;
+ return -ENOMEM;
cpumask_clear(cpus);
nr_cpus = nr_uniq_cpus = 0;
@@ -86,23 +88,6 @@ int blk_mq_update_queue_map(unsigned int *map, unsigned int nr_queues,
return 0;
}
-unsigned int *blk_mq_make_queue_map(struct blk_mq_tag_set *set)
-{
- unsigned int *map;
-
- /* If cpus are offline, map them to first hctx */
- map = kzalloc_node(sizeof(*map) * nr_cpu_ids, GFP_KERNEL,
- set->numa_node);
- if (!map)
- return NULL;
-
- if (!blk_mq_update_queue_map(map, set->nr_hw_queues, cpu_online_mask))
- return map;
-
- kfree(map);
- return NULL;
-}
-
/*
* We have no quick way of doing reverse lookups. This is only used at
* queue init time, so runtime isn't important.
diff --git a/block/blk-mq-pci.c b/block/blk-mq-pci.c
new file mode 100644
index 000000000000..966c2169762e
--- /dev/null
+++ b/block/blk-mq-pci.c
@@ -0,0 +1,47 @@
+/*
+ * Copyright (c) 2016 Christoph Hellwig.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+#include <linux/kobject.h>
+#include <linux/blkdev.h>
+#include <linux/blk-mq.h>
+#include <linux/blk-mq-pci.h>
+#include <linux/pci.h>
+#include <linux/module.h>
+
+/**
+ * blk_mq_pci_map_queues - provide a default queue mapping for PCI device
+ * @set: tagset to provide the mapping for
+ * @pdev: PCI device associated with @set.
+ *
+ * This function assumes the PCI device @pdev has at least as many available
+ * interrupt vetors as @set has queues. It will then queuery the vector
+ * corresponding to each queue for it's affinity mask and built queue mapping
+ * that maps a queue to the CPUs that have irq affinity for the corresponding
+ * vector.
+ */
+int blk_mq_pci_map_queues(struct blk_mq_tag_set *set, struct pci_dev *pdev)
+{
+ const struct cpumask *mask;
+ unsigned int queue, cpu;
+
+ for (queue = 0; queue < set->nr_hw_queues; queue++) {
+ mask = pci_irq_get_affinity(pdev, queue);
+ if (!mask)
+ return -EINVAL;
+
+ for_each_cpu(cpu, mask)
+ set->mq_map[cpu] = queue;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(blk_mq_pci_map_queues);
diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c
index cef618f6fc92..dcf5ce3ba4bf 100644
--- a/block/blk-mq-tag.c
+++ b/block/blk-mq-tag.c
@@ -141,8 +141,7 @@ static int bt_get(struct blk_mq_alloc_data *data, struct sbitmap_queue *bt,
io_schedule();
data->ctx = blk_mq_get_ctx(data->q);
- data->hctx = data->q->mq_ops->map_queue(data->q,
- data->ctx->cpu);
+ data->hctx = blk_mq_map_queue(data->q, data->ctx->cpu);
if (data->flags & BLK_MQ_REQ_RESERVED) {
bt = &data->hctx->tags->breserved_tags;
} else {
@@ -399,11 +398,6 @@ struct blk_mq_tags *blk_mq_init_tags(unsigned int total_tags,
if (!tags)
return NULL;
- if (!zalloc_cpumask_var(&tags->cpumask, GFP_KERNEL)) {
- kfree(tags);
- return NULL;
- }
-
tags->nr_tags = total_tags;
tags->nr_reserved_tags = reserved_tags;
@@ -414,7 +408,6 @@ void blk_mq_free_tags(struct blk_mq_tags *tags)
{
sbitmap_queue_free(&tags->bitmap_tags);
sbitmap_queue_free(&tags->breserved_tags);
- free_cpumask_var(tags->cpumask);
kfree(tags);
}
@@ -453,7 +446,7 @@ u32 blk_mq_unique_tag(struct request *rq)
int hwq = 0;
if (q->mq_ops) {
- hctx = q->mq_ops->map_queue(q, rq->mq_ctx->cpu);
+ hctx = blk_mq_map_queue(q, rq->mq_ctx->cpu);
hwq = hctx->queue_num;
}
diff --git a/block/blk-mq-tag.h b/block/blk-mq-tag.h
index 09f4cc0aaa84..d1662734dc53 100644
--- a/block/blk-mq-tag.h
+++ b/block/blk-mq-tag.h
@@ -17,8 +17,6 @@ struct blk_mq_tags {
struct request **rqs;
struct list_head page_list;
-
- cpumask_var_t cpumask;
};
diff --git a/block/blk-mq.c b/block/blk-mq.c
index dc5f47f60931..b65f572a4faf 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -224,7 +224,7 @@ struct request *blk_mq_alloc_request(struct request_queue *q, int rw,
return ERR_PTR(ret);
ctx = blk_mq_get_ctx(q);
- hctx = q->mq_ops->map_queue(q, ctx->cpu);
+ hctx = blk_mq_map_queue(q, ctx->cpu);
blk_mq_set_alloc_data(&alloc_data, q, flags, ctx, hctx);
rq = __blk_mq_alloc_request(&alloc_data, rw, 0);
blk_mq_put_ctx(ctx);
@@ -319,11 +319,7 @@ EXPORT_SYMBOL_GPL(blk_mq_free_hctx_request);
void blk_mq_free_request(struct request *rq)
{
- struct blk_mq_hw_ctx *hctx;
- struct request_queue *q = rq->q;
-
- hctx = q->mq_ops->map_queue(q, rq->mq_ctx->cpu);
- blk_mq_free_hctx_request(hctx, rq);
+ blk_mq_free_hctx_request(blk_mq_map_queue(rq->q, rq->mq_ctx->cpu), rq);
}
EXPORT_SYMBOL_GPL(blk_mq_free_request);
@@ -1058,9 +1054,7 @@ void blk_mq_insert_request(struct request *rq, bool at_head, bool run_queue,
{
struct blk_mq_ctx *ctx = rq->mq_ctx;
struct request_queue *q = rq->q;
- struct blk_mq_hw_ctx *hctx;
-
- hctx = q->mq_ops->map_queue(q, ctx->cpu);
+ struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, ctx->cpu);
spin_lock(&ctx->lock);
__blk_mq_insert_request(hctx, rq, at_head);
@@ -1077,12 +1071,10 @@ static void blk_mq_insert_requests(struct request_queue *q,
bool from_schedule)
{
- struct blk_mq_hw_ctx *hctx;
+ struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, ctx->cpu);
trace_block_unplug(q, depth, !from_schedule);
- hctx = q->mq_ops->map_queue(q, ctx->cpu);
-
/*
* preemption doesn't flush plug list, so it's possible ctx->cpu is
* offline now
@@ -1216,7 +1208,7 @@ static struct request *blk_mq_map_request(struct request_queue *q,
blk_queue_enter_live(q);
ctx = blk_mq_get_ctx(q);
- hctx = q->mq_ops->map_queue(q, ctx->cpu);
+ hctx = blk_mq_map_queue(q, ctx->cpu);
if (rw_is_sync(bio_op(bio), bio->bi_opf))
op_flags |= REQ_SYNC;
@@ -1235,8 +1227,7 @@ static int blk_mq_direct_issue_request(struct request *rq, blk_qc_t *cookie)
{
int ret;
struct request_queue *q = rq->q;
- struct blk_mq_hw_ctx *hctx = q->mq_ops->map_queue(q,
- rq->mq_ctx->cpu);
+ struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, rq->mq_ctx->cpu);
struct blk_mq_queue_data bd = {
.rq = rq,
.list = NULL,
@@ -1440,15 +1431,6 @@ run_queue:
return cookie;
}
-/*
- * Default mapping to a software queue, since we use one per CPU.
- */
-struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q, const int cpu)
-{
- return q->queue_hw_ctx[q->mq_map[cpu]];
-}
-EXPORT_SYMBOL(blk_mq_map_queue);
-
static void blk_mq_free_rq_map(struct blk_mq_tag_set *set,
struct blk_mq_tags *tags, unsigned int hctx_idx)
{
@@ -1757,7 +1739,7 @@ static void blk_mq_init_cpu_queues(struct request_queue *q,
if (!cpu_online(i))
continue;
- hctx = q->mq_ops->map_queue(q, i);
+ hctx = blk_mq_map_queue(q, i);
/*
* Set local node, IFF we have more than one hw queue. If
@@ -1795,7 +1777,7 @@ static void blk_mq_map_swqueue(struct request_queue *q,
continue;
ctx = per_cpu_ptr(q->queue_ctx, i);
- hctx = q->mq_ops->map_queue(q, i);
+ hctx = blk_mq_map_queue(q, i);
cpumask_set_cpu(i, hctx->cpumask);
ctx->index_hw = hctx->nr_ctx;
@@ -1824,7 +1806,6 @@ static void blk_mq_map_swqueue(struct request_queue *q,
hctx->tags = set->tags[i];
WARN_ON(!hctx->tags);
- cpumask_copy(hctx->tags->cpumask, hctx->cpumask);
/*
* Set the map size to the number of mapped software queues.
* This is more accurate and more efficient than looping
@@ -1918,7 +1899,6 @@ void blk_mq_release(struct request_queue *q)
kfree(hctx);
}
- kfree(q->mq_map);
q->mq_map = NULL;
kfree(q->queue_hw_ctx);
@@ -2017,9 +1997,7 @@ struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
if (!q->queue_hw_ctx)
goto err_percpu;
- q->mq_map = blk_mq_make_queue_map(set);
- if (!q->mq_map)
- goto err_map;
+ q->mq_map = set->mq_map;
blk_mq_realloc_hw_ctxs(set, q);
if (!q->nr_hw_queues)
@@ -2069,8 +2047,6 @@ struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
return q;
err_hctxs:
- kfree(q->mq_map);
-err_map:
kfree(q->queue_hw_ctx);
err_percpu:
free_percpu(q->queue_ctx);
@@ -2102,8 +2078,6 @@ static void blk_mq_queue_reinit(struct request_queue *q,
blk_mq_sysfs_unregister(q);
- blk_mq_update_queue_map(q->mq_map, q->nr_hw_queues, online_mask);
-
/*
* redo blk_mq_init_cpu_queues and blk_mq_init_hw_queues. FIXME: maybe
* we should change hctx numa_node according to new topology (this
@@ -2242,12 +2216,6 @@ static int blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set)
return 0;
}
-struct cpumask *blk_mq_tags_cpumask(struct blk_mq_tags *tags)
-{
- return tags->cpumask;
-}
-EXPORT_SYMBOL_GPL(blk_mq_tags_cpumask);
-
/*
* Alloc a tag set to be associated with one or more request queues.
* May fail with EINVAL for various error conditions. May adjust the
@@ -2256,6 +2224,8 @@ EXPORT_SYMBOL_GPL(blk_mq_tags_cpumask);
*/
int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set)
{
+ int ret;
+
BUILD_BUG_ON(BLK_MQ_MAX_DEPTH > 1 << BLK_MQ_UNIQUE_TAG_BITS);
if (!set->nr_hw_queues)
@@ -2265,7 +2235,7 @@ int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set)
if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN)
return -EINVAL;
- if (!set->ops->queue_rq || !set->ops->map_queue)
+ if (!set->ops->queue_rq)
return -EINVAL;
if (set->queue_depth > BLK_MQ_MAX_DEPTH) {
@@ -2294,17 +2264,35 @@ int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set)
if (!set->tags)
return -ENOMEM;
- if (blk_mq_alloc_rq_maps(set))
- goto enomem;
+ ret = -ENOMEM;
+ set->mq_map = kzalloc_node(sizeof(*set->mq_map) * nr_cpu_ids,
+ GFP_KERNEL, set->numa_node);
+ if (!set->mq_map)
+ goto out_free_tags;
+
+ if (set->ops->map_queues)
+ ret = set->ops->map_queues(set);
+ else
+ ret = blk_mq_map_queues(set);
+ if (ret)
+ goto out_free_mq_map;
+
+ ret = blk_mq_alloc_rq_maps(set);
+ if (ret)
+ goto out_free_mq_map;
mutex_init(&set->tag_list_lock);
INIT_LIST_HEAD(&set->tag_list);
return 0;
-enomem:
+
+out_free_mq_map:
+ kfree(set->mq_map);
+ set->mq_map = NULL;
+out_free_tags:
kfree(set->tags);
set->tags = NULL;
- return -ENOMEM;
+ return ret;
}
EXPORT_SYMBOL(blk_mq_alloc_tag_set);
@@ -2317,6 +2305,9 @@ void blk_mq_free_tag_set(struct blk_mq_tag_set *set)
blk_mq_free_rq_map(set, set->tags[i], i);
}
+ kfree(set->mq_map);
+ set->mq_map = NULL;
+
kfree(set->tags);
set->tags = NULL;
}
diff --git a/block/blk-mq.h b/block/blk-mq.h
index 9b15d2ef7f7b..df6474cb5a4c 100644
--- a/block/blk-mq.h
+++ b/block/blk-mq.h
@@ -45,11 +45,15 @@ void blk_mq_disable_hotplug(void);
/*
* CPU -> queue mappings
*/
-extern unsigned int *blk_mq_make_queue_map(struct blk_mq_tag_set *set);
-extern int blk_mq_update_queue_map(unsigned int *map, unsigned int nr_queues,
- const struct cpumask *online_mask);
+int blk_mq_map_queues(struct blk_mq_tag_set *set);
extern int blk_mq_hw_queue_to_node(unsigned int *map, unsigned int);
+static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q,
+ int cpu)
+{
+ return q->queue_hw_ctx[q->mq_map[cpu]];
+}
+
/*
* sysfs helpers
*/
diff --git a/block/blk.h b/block/blk.h
index c37492f5edaa..74444c49078f 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -39,14 +39,9 @@ extern struct ida blk_queue_ida;
static inline struct blk_flush_queue *blk_get_flush_queue(
struct request_queue *q, struct blk_mq_ctx *ctx)
{
- struct blk_mq_hw_ctx *hctx;
-
- if (!q->mq_ops)
- return q->fq;
-
- hctx = q->mq_ops->map_queue(q, ctx->cpu);
-
- return hctx->fq;
+ if (q->mq_ops)
+ return blk_mq_map_queue(q, ctx->cpu)->fq;
+ return q->fq;
}
static inline void __blk_get_queue(struct request_queue *q)