summaryrefslogtreecommitdiff
path: root/block
diff options
context:
space:
mode:
authorSagi Grimberg <sagi@grimberg.me>2018-12-12 10:38:54 +0300
committerChristoph Hellwig <hch@lst.de>2018-12-13 11:59:08 +0300
commite42b3867de4bd5ee3a1849afb68a1fa8627f7282 (patch)
tree9109fd71585cba4c6f25b26ca065fcdad4a250e1 /block
parent23454d59cc16ddddf4b2290bbe60d2d9581dfd9a (diff)
downloadlinux-e42b3867de4bd5ee3a1849afb68a1fa8627f7282.tar.xz
blk-mq-rdma: pass in queue map to blk_mq_rdma_map_queues
Will be used by nvme-rdma for queue map separation support. Signed-off-by: Sagi Grimberg <sagi@grimberg.me> Signed-off-by: Christoph Hellwig <hch@lst.de>
Diffstat (limited to 'block')
-rw-r--r--block/blk-mq-rdma.c8
1 files changed, 4 insertions, 4 deletions
diff --git a/block/blk-mq-rdma.c b/block/blk-mq-rdma.c
index a71576aff3a5..45030a81a1ed 100644
--- a/block/blk-mq-rdma.c
+++ b/block/blk-mq-rdma.c
@@ -29,24 +29,24 @@
* @set->nr_hw_queues, or @dev does not provide an affinity mask for a
* vector, we fallback to the naive mapping.
*/
-int blk_mq_rdma_map_queues(struct blk_mq_tag_set *set,
+int blk_mq_rdma_map_queues(struct blk_mq_queue_map *map,
struct ib_device *dev, int first_vec)
{
const struct cpumask *mask;
unsigned int queue, cpu;
- for (queue = 0; queue < set->nr_hw_queues; queue++) {
+ for (queue = 0; queue < map->nr_queues; queue++) {
mask = ib_get_vector_affinity(dev, first_vec + queue);
if (!mask)
goto fallback;
for_each_cpu(cpu, mask)
- set->map[0].mq_map[cpu] = queue;
+ map->mq_map[cpu] = map->queue_offset + queue;
}
return 0;
fallback:
- return blk_mq_map_queues(&set->map[0]);
+ return blk_mq_map_queues(map);
}
EXPORT_SYMBOL_GPL(blk_mq_rdma_map_queues);