summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--block/blk-core.c38
-rw-r--r--drivers/md/dm-rq.c3
-rw-r--r--include/linux/blkdev.h3
3 files changed, 18 insertions, 26 deletions
diff --git a/block/blk-core.c b/block/blk-core.c
index 4cad535592c3..09819d24d385 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -823,15 +823,19 @@ EXPORT_SYMBOL(blk_init_queue);
struct request_queue *
blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id)
{
- struct request_queue *uninit_q, *q;
+ struct request_queue *q;
- uninit_q = blk_alloc_queue_node(GFP_KERNEL, node_id);
- if (!uninit_q)
+ q = blk_alloc_queue_node(GFP_KERNEL, node_id);
+ if (!q)
return NULL;
- q = blk_init_allocated_queue(uninit_q, rfn, lock);
- if (!q)
- blk_cleanup_queue(uninit_q);
+ q->request_fn = rfn;
+ if (lock)
+ q->queue_lock = lock;
+ if (blk_init_allocated_queue(q) < 0) {
+ blk_cleanup_queue(q);
+ return NULL;
+ }
return q;
}
@@ -839,30 +843,19 @@ EXPORT_SYMBOL(blk_init_queue_node);
static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio);
-struct request_queue *
-blk_init_allocated_queue(struct request_queue *q, request_fn_proc *rfn,
- spinlock_t *lock)
-{
- if (!q)
- return NULL;
+int blk_init_allocated_queue(struct request_queue *q)
+{
q->fq = blk_alloc_flush_queue(q, NUMA_NO_NODE, 0);
if (!q->fq)
- return NULL;
+ return -ENOMEM;
if (blk_init_rl(&q->root_rl, q, GFP_KERNEL))
goto fail;
INIT_WORK(&q->timeout_work, blk_timeout_work);
- q->request_fn = rfn;
- q->prep_rq_fn = NULL;
- q->unprep_rq_fn = NULL;
q->queue_flags |= QUEUE_FLAG_DEFAULT;
- /* Override internal queue lock with supplied lock pointer */
- if (lock)
- q->queue_lock = lock;
-
/*
* This also sets hw/phys segments, boundary and size
*/
@@ -880,13 +873,12 @@ blk_init_allocated_queue(struct request_queue *q, request_fn_proc *rfn,
}
mutex_unlock(&q->sysfs_lock);
-
- return q;
+ return 0;
fail:
blk_free_flush_queue(q->fq);
wbt_exit(q);
- return NULL;
+ return -ENOMEM;
}
EXPORT_SYMBOL(blk_init_allocated_queue);
diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c
index 9d7275fb541a..93f6e9f1ebe2 100644
--- a/drivers/md/dm-rq.c
+++ b/drivers/md/dm-rq.c
@@ -823,7 +823,8 @@ static void dm_old_request_fn(struct request_queue *q)
int dm_old_init_request_queue(struct mapped_device *md)
{
/* Fully initialize the queue */
- if (!blk_init_allocated_queue(md->queue, dm_old_request_fn, NULL))
+ md->queue->request_fn = dm_old_request_fn;
+ if (blk_init_allocated_queue(md->queue) < 0)
return -EINVAL;
/* disable dm_old_request_fn's merge heuristic by default */
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 05675b1dfd20..6b1efc5760ea 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -1137,8 +1137,7 @@ extern void blk_unprep_request(struct request *);
extern struct request_queue *blk_init_queue_node(request_fn_proc *rfn,
spinlock_t *lock, int node_id);
extern struct request_queue *blk_init_queue(request_fn_proc *, spinlock_t *);
-extern struct request_queue *blk_init_allocated_queue(struct request_queue *,
- request_fn_proc *, spinlock_t *);
+extern int blk_init_allocated_queue(struct request_queue *);
extern void blk_cleanup_queue(struct request_queue *);
extern void blk_queue_make_request(struct request_queue *, make_request_fn *);
extern void blk_queue_bounce_limit(struct request_queue *, u64);