summaryrefslogtreecommitdiff
path: root/block
diff options
context:
space:
mode:
Diffstat (limited to 'block')
-rw-r--r--block/blk-core.c81
-rw-r--r--block/blk-exec.c6
-rw-r--r--block/blk-flush.c103
-rw-r--r--block/blk-integrity.c40
-rw-r--r--block/blk-lib.c20
-rw-r--r--block/blk-map.c6
-rw-r--r--block/blk-merge.c145
-rw-r--r--block/blk-mq-cpu.c37
-rw-r--r--block/blk-mq-sysfs.c13
-rw-r--r--block/blk-mq-tag.c8
-rw-r--r--block/blk-mq.c258
-rw-r--r--block/blk-mq.h7
-rw-r--r--block/blk-settings.c4
-rw-r--r--block/blk-sysfs.c3
-rw-r--r--block/blk-throttle.c49
-rw-r--r--block/blk-timeout.c2
-rw-r--r--block/blk.h2
-rw-r--r--block/cfq-iosched.c131
-rw-r--r--block/cmdline-parser.c18
-rw-r--r--block/elevator.c2
-rw-r--r--block/scsi_ioctl.c6
21 files changed, 484 insertions, 457 deletions
diff --git a/block/blk-core.c b/block/blk-core.c
index cd0158163fe0..4db2b32b70e0 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -38,6 +38,7 @@
#include "blk.h"
#include "blk-cgroup.h"
+#include "blk-mq.h"
EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_remap);
EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_remap);
@@ -130,7 +131,7 @@ static void req_bio_endio(struct request *rq, struct bio *bio,
bio_advance(bio, nbytes);
/* don't actually finish bio if it's part of flush sequence */
- if (bio->bi_size == 0 && !(rq->cmd_flags & REQ_FLUSH_SEQ))
+ if (bio->bi_iter.bi_size == 0 && !(rq->cmd_flags & REQ_FLUSH_SEQ))
bio_endio(bio, error);
}
@@ -245,7 +246,16 @@ EXPORT_SYMBOL(blk_stop_queue);
void blk_sync_queue(struct request_queue *q)
{
del_timer_sync(&q->timeout);
- cancel_delayed_work_sync(&q->delay_work);
+
+ if (q->mq_ops) {
+ struct blk_mq_hw_ctx *hctx;
+ int i;
+
+ queue_for_each_hw_ctx(q, hctx, i)
+ cancel_delayed_work_sync(&hctx->delayed_work);
+ } else {
+ cancel_delayed_work_sync(&q->delay_work);
+ }
}
EXPORT_SYMBOL(blk_sync_queue);
@@ -497,8 +507,13 @@ void blk_cleanup_queue(struct request_queue *q)
* Drain all requests queued before DYING marking. Set DEAD flag to
* prevent that q->request_fn() gets invoked after draining finished.
*/
- spin_lock_irq(lock);
- __blk_drain_queue(q, true);
+ if (q->mq_ops) {
+ blk_mq_drain_queue(q);
+ spin_lock_irq(lock);
+ } else {
+ spin_lock_irq(lock);
+ __blk_drain_queue(q, true);
+ }
queue_flag_set(QUEUE_FLAG_DEAD, q);
spin_unlock_irq(lock);
@@ -678,11 +693,20 @@ blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id)
if (!uninit_q)
return NULL;
+ uninit_q->flush_rq = kzalloc(sizeof(struct request), GFP_KERNEL);
+ if (!uninit_q->flush_rq)
+ goto out_cleanup_queue;
+
q = blk_init_allocated_queue(uninit_q, rfn, lock);
if (!q)
- blk_cleanup_queue(uninit_q);
-
+ goto out_free_flush_rq;
return q;
+
+out_free_flush_rq:
+ kfree(uninit_q->flush_rq);
+out_cleanup_queue:
+ blk_cleanup_queue(uninit_q);
+ return NULL;
}
EXPORT_SYMBOL(blk_init_queue_node);
@@ -1112,7 +1136,7 @@ static struct request *blk_old_get_request(struct request_queue *q, int rw,
struct request *blk_get_request(struct request_queue *q, int rw, gfp_t gfp_mask)
{
if (q->mq_ops)
- return blk_mq_alloc_request(q, rw, gfp_mask, false);
+ return blk_mq_alloc_request(q, rw, gfp_mask);
else
return blk_old_get_request(q, rw, gfp_mask);
}
@@ -1263,6 +1287,11 @@ void __blk_put_request(struct request_queue *q, struct request *req)
if (unlikely(!q))
return;
+ if (q->mq_ops) {
+ blk_mq_free_request(req);
+ return;
+ }
+
blk_pm_put_request(req);
elv_completed_request(q, req);
@@ -1326,7 +1355,7 @@ void blk_add_request_payload(struct request *rq, struct page *page,
bio->bi_io_vec->bv_offset = 0;
bio->bi_io_vec->bv_len = len;
- bio->bi_size = len;
+ bio->bi_iter.bi_size = len;
bio->bi_vcnt = 1;
bio->bi_phys_segments = 1;
@@ -1351,7 +1380,7 @@ bool bio_attempt_back_merge(struct request_queue *q, struct request *req,
req->biotail->bi_next = bio;
req->biotail = bio;
- req->__data_len += bio->bi_size;
+ req->__data_len += bio->bi_iter.bi_size;
req->ioprio = ioprio_best(req->ioprio, bio_prio(bio));
blk_account_io_start(req, false);
@@ -1380,8 +1409,8 @@ bool bio_attempt_front_merge(struct request_queue *q, struct request *req,
* not touch req->buffer either...
*/
req->buffer = bio_data(bio);
- req->__sector = bio->bi_sector;
- req->__data_len += bio->bi_size;
+ req->__sector = bio->bi_iter.bi_sector;
+ req->__data_len += bio->bi_iter.bi_size;
req->ioprio = ioprio_best(req->ioprio, bio_prio(bio));
blk_account_io_start(req, false);
@@ -1459,7 +1488,7 @@ void init_request_from_bio(struct request *req, struct bio *bio)
req->cmd_flags |= REQ_FAILFAST_MASK;
req->errors = 0;
- req->__sector = bio->bi_sector;
+ req->__sector = bio->bi_iter.bi_sector;
req->ioprio = bio_prio(bio);
blk_rq_bio_prep(req->q, req, bio);
}
@@ -1583,12 +1612,12 @@ static inline void blk_partition_remap(struct bio *bio)
if (bio_sectors(bio) && bdev != bdev->bd_contains) {
struct hd_struct *p = bdev->bd_part;
- bio->bi_sector += p->start_sect;
+ bio->bi_iter.bi_sector += p->start_sect;
bio->bi_bdev = bdev->bd_contains;
trace_block_bio_remap(bdev_get_queue(bio->bi_bdev), bio,
bdev->bd_dev,
- bio->bi_sector - p->start_sect);
+ bio->bi_iter.bi_sector - p->start_sect);
}
}
@@ -1654,7 +1683,7 @@ static inline int bio_check_eod(struct bio *bio, unsigned int nr_sectors)
/* Test device or partition size, when known. */
maxsector = i_size_read(bio->bi_bdev->bd_inode) >> 9;
if (maxsector) {
- sector_t sector = bio->bi_sector;
+ sector_t sector = bio->bi_iter.bi_sector;
if (maxsector < nr_sectors || maxsector - nr_sectors < sector) {
/*
@@ -1690,7 +1719,7 @@ generic_make_request_checks(struct bio *bio)
"generic_make_request: Trying to access "
"nonexistent block-device %s (%Lu)\n",
bdevname(bio->bi_bdev, b),
- (long long) bio->bi_sector);
+ (long long) bio->bi_iter.bi_sector);
goto end_io;
}
@@ -1704,9 +1733,9 @@ generic_make_request_checks(struct bio *bio)
}
part = bio->bi_bdev->bd_part;
- if (should_fail_request(part, bio->bi_size) ||
+ if (should_fail_request(part, bio->bi_iter.bi_size) ||
should_fail_request(&part_to_disk(part)->part0,
- bio->bi_size))
+ bio->bi_iter.bi_size))
goto end_io;
/*
@@ -1865,7 +1894,7 @@ void submit_bio(int rw, struct bio *bio)
if (rw & WRITE) {
count_vm_events(PGPGOUT, count);
} else {
- task_io_account_read(bio->bi_size);
+ task_io_account_read(bio->bi_iter.bi_size);
count_vm_events(PGPGIN, count);
}
@@ -1874,7 +1903,7 @@ void submit_bio(int rw, struct bio *bio)
printk(KERN_DEBUG "%s(%d): %s block %Lu on %s (%u sectors)\n",
current->comm, task_pid_nr(current),
(rw & WRITE) ? "WRITE" : "READ",
- (unsigned long long)bio->bi_sector,
+ (unsigned long long)bio->bi_iter.bi_sector,
bdevname(bio->bi_bdev, b),
count);
}
@@ -2007,7 +2036,7 @@ unsigned int blk_rq_err_bytes(const struct request *rq)
for (bio = rq->bio; bio; bio = bio->bi_next) {
if ((bio->bi_rw & ff) != ff)
break;
- bytes += bio->bi_size;
+ bytes += bio->bi_iter.bi_size;
}
/* this could lead to infinite loop */
@@ -2378,9 +2407,9 @@ bool blk_update_request(struct request *req, int error, unsigned int nr_bytes)
total_bytes = 0;
while (req->bio) {
struct bio *bio = req->bio;
- unsigned bio_bytes = min(bio->bi_size, nr_bytes);
+ unsigned bio_bytes = min(bio->bi_iter.bi_size, nr_bytes);
- if (bio_bytes == bio->bi_size)
+ if (bio_bytes == bio->bi_iter.bi_size)
req->bio = bio->bi_next;
req_bio_endio(req, bio, bio_bytes, error);
@@ -2728,7 +2757,7 @@ void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
rq->nr_phys_segments = bio_phys_segments(q, bio);
rq->buffer = bio_data(bio);
}
- rq->__data_len = bio->bi_size;
+ rq->__data_len = bio->bi_iter.bi_size;
rq->bio = rq->biotail = bio;
if (bio->bi_bdev)
@@ -2746,10 +2775,10 @@ void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
void rq_flush_dcache_pages(struct request *rq)
{
struct req_iterator iter;
- struct bio_vec *bvec;
+ struct bio_vec bvec;
rq_for_each_segment(bvec, rq, iter)
- flush_dcache_page(bvec->bv_page);
+ flush_dcache_page(bvec.bv_page);
}
EXPORT_SYMBOL_GPL(rq_flush_dcache_pages);
#endif
diff --git a/block/blk-exec.c b/block/blk-exec.c
index c3edf9dff566..c68613bb4c79 100644
--- a/block/blk-exec.c
+++ b/block/blk-exec.c
@@ -60,8 +60,12 @@ void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk,
rq->rq_disk = bd_disk;
rq->end_io = done;
+ /*
+ * don't check dying flag for MQ because the request won't
+ * be resued after dying flag is set
+ */
if (q->mq_ops) {
- blk_mq_insert_request(q, rq, true);
+ blk_mq_insert_request(q, rq, at_head, true);
return;
}
diff --git a/block/blk-flush.c b/block/blk-flush.c
index fb6f3c0ffa49..66e2b697f5db 100644
--- a/block/blk-flush.c
+++ b/block/blk-flush.c
@@ -130,20 +130,26 @@ static void blk_flush_restore_request(struct request *rq)
blk_clear_rq_complete(rq);
}
-static void mq_flush_data_run(struct work_struct *work)
+static void mq_flush_run(struct work_struct *work)
{
struct request *rq;
- rq = container_of(work, struct request, mq_flush_data);
+ rq = container_of(work, struct request, mq_flush_work);
memset(&rq->csd, 0, sizeof(rq->csd));
blk_mq_run_request(rq, true, false);
}
-static void blk_mq_flush_data_insert(struct request *rq)
+static bool blk_flush_queue_rq(struct request *rq)
{
- INIT_WORK(&rq->mq_flush_data, mq_flush_data_run);
- kblockd_schedule_work(rq->q, &rq->mq_flush_data);
+ if (rq->q->mq_ops) {
+ INIT_WORK(&rq->mq_flush_work, mq_flush_run);
+ kblockd_schedule_work(rq->q, &rq->mq_flush_work);
+ return false;
+ } else {
+ list_add_tail(&rq->queuelist, &rq->q->queue_head);
+ return true;
+ }
}
/**
@@ -187,12 +193,7 @@ static bool blk_flush_complete_seq(struct request *rq, unsigned int seq,
case REQ_FSEQ_DATA:
list_move_tail(&rq->flush.list, &q->flush_data_in_flight);
- if (q->mq_ops)
- blk_mq_flush_data_insert(rq);
- else {
- list_add(&rq->queuelist, &q->queue_head);
- queued = true;
- }
+ queued = blk_flush_queue_rq(rq);
break;
case REQ_FSEQ_DONE:
@@ -216,9 +217,6 @@ static bool blk_flush_complete_seq(struct request *rq, unsigned int seq,
}
kicked = blk_kick_flush(q);
- /* blk_mq_run_flush will run queue */
- if (q->mq_ops)
- return queued;
return kicked | queued;
}
@@ -230,10 +228,9 @@ static void flush_end_io(struct request *flush_rq, int error)
struct request *rq, *n;
unsigned long flags = 0;
- if (q->mq_ops) {
- blk_mq_free_request(flush_rq);
+ if (q->mq_ops)
spin_lock_irqsave(&q->mq_flush_lock, flags);
- }
+
running = &q->flush_queue[q->flush_running_idx];
BUG_ON(q->flush_pending_idx == q->flush_running_idx);
@@ -263,49 +260,14 @@ static void flush_end_io(struct request *flush_rq, int error)
* kblockd.
*/
if (queued || q->flush_queue_delayed) {
- if (!q->mq_ops)
- blk_run_queue_async(q);
- else
- /*
- * This can be optimized to only run queues with requests
- * queued if necessary.
- */
- blk_mq_run_queues(q, true);
+ WARN_ON(q->mq_ops);
+ blk_run_queue_async(q);
}
q->flush_queue_delayed = 0;
if (q->mq_ops)
spin_unlock_irqrestore(&q->mq_flush_lock, flags);
}
-static void mq_flush_work(struct work_struct *work)
-{
- struct request_queue *q;
- struct request *rq;
-
- q = container_of(work, struct request_queue, mq_flush_work);
-
- /* We don't need set REQ_FLUSH_SEQ, it's for consistency */
- rq = blk_mq_alloc_request(q, WRITE_FLUSH|REQ_FLUSH_SEQ,
- __GFP_WAIT|GFP_ATOMIC, true);
- rq->cmd_type = REQ_TYPE_FS;
- rq->end_io = flush_end_io;
-
- blk_mq_run_request(rq, true, false);
-}
-
-/*
- * We can't directly use q->flush_rq, because it doesn't have tag and is not in
- * hctx->rqs[]. so we must allocate a new request, since we can't sleep here,
- * so offload the work to workqueue.
- *
- * Note: we assume a flush request finished in any hardware queue will flush
- * the whole disk cache.
- */
-static void mq_run_flush(struct request_queue *q)
-{
- kblockd_schedule_work(q, &q->mq_flush_work);
-}
-
/**
* blk_kick_flush - consider issuing flush request
* @q: request_queue being kicked
@@ -340,19 +302,31 @@ static bool blk_kick_flush(struct request_queue *q)
* different from running_idx, which means flush is in flight.
*/
q->flush_pending_idx ^= 1;
+
if (q->mq_ops) {
- mq_run_flush(q);
- return true;
+ struct blk_mq_ctx *ctx = first_rq->mq_ctx;
+ struct blk_mq_hw_ctx *hctx = q->mq_ops->map_queue(q, ctx->cpu);
+
+ blk_mq_rq_init(hctx, q->flush_rq);
+ q->flush_rq->mq_ctx = ctx;
+
+ /*
+ * Reuse the tag value from the fist waiting request,
+ * with blk-mq the tag is generated during request
+ * allocation and drivers can rely on it being inside
+ * the range they asked for.
+ */
+ q->flush_rq->tag = first_rq->tag;
+ } else {
+ blk_rq_init(q, q->flush_rq);
}
- blk_rq_init(q, &q->flush_rq);
- q->flush_rq.cmd_type = REQ_TYPE_FS;
- q->flush_rq.cmd_flags = WRITE_FLUSH | REQ_FLUSH_SEQ;
- q->flush_rq.rq_disk = first_rq->rq_disk;
- q->flush_rq.end_io = flush_end_io;
+ q->flush_rq->cmd_type = REQ_TYPE_FS;
+ q->flush_rq->cmd_flags = WRITE_FLUSH | REQ_FLUSH_SEQ;
+ q->flush_rq->rq_disk = first_rq->rq_disk;
+ q->flush_rq->end_io = flush_end_io;
- list_add_tail(&q->flush_rq.queuelist, &q->queue_head);
- return true;
+ return blk_flush_queue_rq(q->flush_rq);
}
static void flush_data_end_io(struct request *rq, int error)
@@ -548,7 +522,7 @@ int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask,
* copied from blk_rq_pos(rq).
*/
if (error_sector)
- *error_sector = bio->bi_sector;
+ *error_sector = bio->bi_iter.bi_sector;
bio_put(bio);
return ret;
@@ -558,5 +532,4 @@ EXPORT_SYMBOL(blkdev_issue_flush);
void blk_mq_init_flush(struct request_queue *q)
{
spin_lock_init(&q->mq_flush_lock);
- INIT_WORK(&q->mq_flush_work, mq_flush_work);
}
diff --git a/block/blk-integrity.c b/block/blk-integrity.c
index 03cf7179e8ef..7fbab84399e6 100644
--- a/block/blk-integrity.c
+++ b/block/blk-integrity.c
@@ -43,30 +43,32 @@ static const char *bi_unsupported_name = "unsupported";
*/
int blk_rq_count_integrity_sg(struct request_queue *q, struct bio *bio)
{
- struct bio_vec *iv, *ivprv = NULL;
+ struct bio_vec iv, ivprv = { NULL };
unsigned int segments = 0;
unsigned int seg_size = 0;
- unsigned int i = 0;
+ struct bvec_iter iter;
+ int prev = 0;
- bio_for_each_integrity_vec(iv, bio, i) {
+ bio_for_each_integrity_vec(iv, bio, iter) {
- if (ivprv) {
- if (!BIOVEC_PHYS_MERGEABLE(ivprv, iv))
+ if (prev) {
+ if (!BIOVEC_PHYS_MERGEABLE(&ivprv, &iv))
goto new_segment;
- if (!BIOVEC_SEG_BOUNDARY(q, ivprv, iv))
+ if (!BIOVEC_SEG_BOUNDARY(q, &ivprv, &iv))
goto new_segment;
- if (seg_size + iv->bv_len > queue_max_segment_size(q))
+ if (seg_size + iv.bv_len > queue_max_segment_size(q))
goto new_segment;
- seg_size += iv->bv_len;
+ seg_size += iv.bv_len;
} else {
new_segment:
segments++;
- seg_size = iv->bv_len;
+ seg_size = iv.bv_len;
}
+ prev = 1;
ivprv = iv;
}
@@ -87,24 +89,25 @@ EXPORT_SYMBOL(blk_rq_count_integrity_sg);
int blk_rq_map_integrity_sg(struct request_queue *q, struct bio *bio,
struct scatterlist *sglist)
{
- struct bio_vec *iv, *ivprv = NULL;
+ struct bio_vec iv, ivprv = { NULL };
struct scatterlist *sg = NULL;
unsigned int segments = 0;
- unsigned int i = 0;
+ struct bvec_iter iter;
+ int prev = 0;
- bio_for_each_integrity_vec(iv, bio, i) {
+ bio_for_each_integrity_vec(iv, bio, iter) {
- if (ivprv) {
- if (!BIOVEC_PHYS_MERGEABLE(ivprv, iv))
+ if (prev) {
+ if (!BIOVEC_PHYS_MERGEABLE(&ivprv, &iv))
goto new_segment;
- if (!BIOVEC_SEG_BOUNDARY(q, ivprv, iv))
+ if (!BIOVEC_SEG_BOUNDARY(q, &ivprv, &iv))
goto new_segment;
- if (sg->length + iv->bv_len > queue_max_segment_size(q))
+ if (sg->length + iv.bv_len > queue_max_segment_size(q))
goto new_segment;
- sg->length += iv->bv_len;
+ sg->length += iv.bv_len;
} else {
new_segment:
if (!sg)
@@ -114,10 +117,11 @@ new_segment:
sg = sg_next(sg);
}
- sg_set_page(sg, iv->bv_page, iv->bv_len, iv->bv_offset);
+ sg_set_page(sg, iv.bv_page, iv.bv_len, iv.bv_offset);
segments++;
}
+ prev = 1;
ivprv = iv;
}
diff --git a/block/blk-lib.c b/block/blk-lib.c
index 9b5b561cb928..97a733cf3d5f 100644
--- a/block/blk-lib.c
+++ b/block/blk-lib.c
@@ -108,17 +108,25 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
req_sects = end_sect - sector;
}
- bio->bi_sector = sector;
+ bio->bi_iter.bi_sector = sector;
bio->bi_end_io = bio_batch_end_io;
bio->bi_bdev = bdev;
bio->bi_private = &bb;
- bio->bi_size = req_sects << 9;
+ bio->bi_iter.bi_size = req_sects << 9;
nr_sects -= req_sects;
sector = end_sect;
atomic_inc(&bb.done);
submit_bio(type, bio);
+
+ /*
+ * We can loop for a long time in here, if someone does
+ * full device discards (like mkfs). Be nice and allow
+ * us to schedule out to avoid softlocking if preempt
+ * is disabled.
+ */
+ cond_resched();
}
blk_finish_plug(&plug);
@@ -174,7 +182,7 @@ int blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
break;
}
- bio->bi_sector = sector;
+ bio->bi_iter.bi_sector = sector;
bio->bi_end_io = bio_batch_end_io;
bio->bi_bdev = bdev;
bio->bi_private = &bb;
@@ -184,11 +192,11 @@ int blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
bio->bi_io_vec->bv_len = bdev_logical_block_size(bdev);
if (nr_sects > max_write_same_sectors) {
- bio->bi_size = max_write_same_sectors << 9;
+ bio->bi_iter.bi_size = max_write_same_sectors << 9;
nr_sects -= max_write_same_sectors;
sector += max_write_same_sectors;
} else {
- bio->bi_size = nr_sects << 9;
+ bio->bi_iter.bi_size = nr_sects << 9;
nr_sects = 0;
}
@@ -240,7 +248,7 @@ int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
break;
}
- bio->bi_sector = sector;
+ bio->bi_iter.bi_sector = sector;
bio->bi_bdev = bdev;
bio->bi_end_io = bio_batch_end_io;
bio->bi_private = &bb;
diff --git a/block/blk-map.c b/block/blk-map.c
index 62382ad5b010..cca6356d216d 100644
--- a/block/blk-map.c
+++ b/block/blk-map.c
@@ -20,7 +20,7 @@ int blk_rq_append_bio(struct request_queue *q, struct request *rq,
rq->biotail->bi_next = bio;
rq->biotail = bio;
- rq->__data_len += bio->bi_size;
+ rq->__data_len += bio->bi_iter.bi_size;
}
return 0;
}
@@ -76,7 +76,7 @@ static int __blk_rq_map_user(struct request_queue *q, struct request *rq,
ret = blk_rq_append_bio(q, rq, bio);
if (!ret)
- return bio->bi_size;
+ return bio->bi_iter.bi_size;
/* if it was boucned we must call the end io function */
bio_endio(bio, 0);
@@ -220,7 +220,7 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
if (IS_ERR(bio))
return PTR_ERR(bio);
- if (bio->bi_size != len) {
+ if (bio->bi_iter.bi_size != len) {
/*
* Grab an extra reference to this bio, as bio_unmap_user()
* expects to be able to drop it twice as it happens on the
diff --git a/block/blk-merge.c b/block/blk-merge.c
index 1ffc58977835..6c583f9c5b65 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -12,38 +12,47 @@
static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
struct bio *bio)
{
- struct bio_vec *bv, *bvprv = NULL;
- int cluster, i, high, highprv = 1;
+ struct bio_vec bv, bvprv = { NULL };
+ int cluster, high, highprv = 1;
unsigned int seg_size, nr_phys_segs;
struct bio *fbio, *bbio;
+ struct bvec_iter iter;
if (!bio)
return 0;
+ /*
+ * This should probably be returning 0, but blk_add_request_payload()
+ * (Christoph!!!!)
+ */
+ if (bio->bi_rw & REQ_DISCARD)
+ return 1;
+
+ if (bio->bi_rw & REQ_WRITE_SAME)
+ return 1;
+
fbio = bio;
cluster = blk_queue_cluster(q);
seg_size = 0;
nr_phys_segs = 0;
for_each_bio(bio) {
- bio_for_each_segment(bv, bio, i) {
+ bio_for_each_segment(bv, bio, iter) {
/*
* the trick here is making sure that a high page is
* never considered part of another segment, since that
* might change with the bounce page.
*/
- high = page_to_pfn(bv->bv_page) > queue_bounce_pfn(q);
- if (high || highprv)
- goto new_segment;
- if (cluster) {
- if (seg_size + bv->bv_len
+ high = page_to_pfn(bv.bv_page) > queue_bounce_pfn(q);
+ if (!high && !highprv && cluster) {
+ if (seg_size + bv.bv_len
> queue_max_segment_size(q))
goto new_segment;
- if (!BIOVEC_PHYS_MERGEABLE(bvprv, bv))
+ if (!BIOVEC_PHYS_MERGEABLE(&bvprv, &bv))
goto new_segment;
- if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bv))
+ if (!BIOVEC_SEG_BOUNDARY(q, &bvprv, &bv))
goto new_segment;
- seg_size += bv->bv_len;
+ seg_size += bv.bv_len;
bvprv = bv;
continue;
}
@@ -54,7 +63,7 @@ new_segment:
nr_phys_segs++;
bvprv = bv;
- seg_size = bv->bv_len;
+ seg_size = bv.bv_len;
highprv = high;
}
bbio = bio;
@@ -87,6 +96,9 @@ EXPORT_SYMBOL(blk_recount_segments);
static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio,
struct bio *nxt)
{
+ struct bio_vec end_bv = { NULL }, nxt_bv;
+ struct bvec_iter iter;
+
if (!blk_queue_cluster(q))
return 0;
@@ -97,34 +109,40 @@ static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio,
if (!bio_has_data(bio))
return 1;
- if (!BIOVEC_PHYS_MERGEABLE(__BVEC_END(bio), __BVEC_START(nxt)))
+ bio_for_each_segment(end_bv, bio, iter)
+ if (end_bv.bv_len == iter.bi_size)
+ break;
+
+ nxt_bv = bio_iovec(nxt);
+
+ if (!BIOVEC_PHYS_MERGEABLE(&end_bv, &nxt_bv))
return 0;
/*
* bio and nxt are contiguous in memory; check if the queue allows
* these two to be merged into one
*/
- if (BIO_SEG_BOUNDARY(q, bio, nxt))
+ if (BIOVEC_SEG_BOUNDARY(q, &end_bv, &nxt_bv))
return 1;
return 0;
}
-static void
+static inline void
__blk_segment_map_sg(struct request_queue *q, struct bio_vec *bvec,
- struct scatterlist *sglist, struct bio_vec **bvprv,
+ struct scatterlist *sglist, struct bio_vec *bvprv,
struct scatterlist **sg, int *nsegs, int *cluster)
{
int nbytes = bvec->bv_len;
- if (*bvprv && *cluster) {
+ if (*sg && *cluster) {
if ((*sg)->length + nbytes > queue_max_segment_size(q))
goto new_segment;
- if (!BIOVEC_PHYS_MERGEABLE(*bvprv, bvec))
+ if (!BIOVEC_PHYS_MERGEABLE(bvprv, bvec))
goto new_segment;
- if (!BIOVEC_SEG_BOUNDARY(q, *bvprv, bvec))
+ if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bvec))
goto new_segment;
(*sg)->length += nbytes;
@@ -150,7 +168,49 @@ new_segment:
sg_set_page(*sg, bvec->bv_page, nbytes, bvec->bv_offset);
(*nsegs)++;
}
- *bvprv = bvec;
+ *bvprv = *bvec;
+}
+
+static int __blk_bios_map_sg(struct request_queue *q, struct bio *bio,
+ struct scatterlist *sglist,
+ struct scatterlist **sg)
+{
+ struct bio_vec bvec, bvprv = { NULL };
+ struct bvec_iter iter;
+ int nsegs, cluster;
+
+ nsegs = 0;
+ cluster = blk_queue_cluster(q);
+
+ if (bio->bi_rw & REQ_DISCARD) {
+ /*
+ * This is a hack - drivers should be neither modifying the
+ * biovec, nor relying on bi_vcnt - but because of
+ * blk_add_request_payload(), a discard bio may or may not have
+ * a payload we need to set up here (thank you Christoph) and
+ * bi_vcnt is really the only way of telling if we need to.
+ */
+
+ if (bio->bi_vcnt)
+ goto single_segment;
+
+ return 0;
+ }
+
+ if (bio->bi_rw & REQ_WRITE_SAME) {
+single_segment:
+ *sg = sglist;
+ bvec = bio_iovec(bio);
+ sg_set_page(*sg, bvec.bv_page, bvec.bv_len, bvec.bv_offset);
+ return 1;
+ }
+
+ for_each_bio(bio)
+ bio_for_each_segment(bvec, bio, iter)
+ __blk_segment_map_sg(q, &bvec, sglist, &bvprv, sg,
+ &nsegs, &cluster);
+
+ return nsegs;
}
/*
@@ -160,24 +220,11 @@ new_segment:
int blk_rq_map_sg(struct request_queue *q, struct request *rq,
struct scatterlist *sglist)
{
- struct bio_vec *bvec, *bvprv;
- struct req_iterator iter;
- struct scatterlist *sg;
- int nsegs, cluster;
-
- nsegs = 0;
- cluster = blk_queue_cluster(q);
-
- /*
- * for each bio in rq
- */
- bvprv = NULL;
- sg = NULL;
- rq_for_each_segment(bvec, rq, iter) {
- __blk_segment_map_sg(q, bvec, sglist, &bvprv, &sg,
- &nsegs, &cluster);
- } /* segments in rq */
+ struct scatterlist *sg = NULL;
+ int nsegs = 0;
+ if (rq->bio)
+ nsegs = __blk_bios_map_sg(q, rq->bio, sglist, &sg);
if (unlikely(rq->cmd_flags & REQ_COPY_USER) &&
(blk_rq_bytes(rq) & q->dma_pad_mask)) {
@@ -223,21 +270,13 @@ EXPORT_SYMBOL(blk_rq_map_sg);
int blk_bio_map_sg(struct request_queue *q, struct bio *bio,
struct scatterlist *sglist)
{
- struct bio_vec *bvec, *bvprv;
- struct scatterlist *sg;
- int nsegs, cluster;
- unsigned long i;
-
- nsegs = 0;
- cluster = blk_queue_cluster(q);
-
- bvprv = NULL;
- sg = NULL;
- bio_for_each_segment(bvec, bio, i) {
- __blk_segment_map_sg(q, bvec, sglist, &bvprv, &sg,
- &nsegs, &cluster);
- } /* segments in bio */
+ struct scatterlist *sg = NULL;
+ int nsegs;
+ struct bio *next = bio->bi_next;
+ bio->bi_next = NULL;
+ nsegs = __blk_bios_map_sg(q, bio, sglist, &sg);
+ bio->bi_next = next;
if (sg)
sg_mark_end(sg);
@@ -543,9 +582,9 @@ bool blk_rq_merge_ok(struct request *rq, struct bio *bio)
int blk_try_merge(struct request *rq, struct bio *bio)
{
- if (blk_rq_pos(rq) + blk_rq_sectors(rq) == bio->bi_sector)
+ if (blk_rq_pos(rq) + blk_rq_sectors(rq) == bio->bi_iter.bi_sector)
return ELEVATOR_BACK_MERGE;
- else if (blk_rq_pos(rq) - bio_sectors(bio) == bio->bi_sector)
+ else if (blk_rq_pos(rq) - bio_sectors(bio) == bio->bi_iter.bi_sector)
return ELEVATOR_FRONT_MERGE;
return ELEVATOR_NO_MERGE;
}
diff --git a/block/blk-mq-cpu.c b/block/blk-mq-cpu.c
index 0045ace9bdf0..3146befb56aa 100644
--- a/block/blk-mq-cpu.c
+++ b/block/blk-mq-cpu.c
@@ -28,36 +28,6 @@ static int blk_mq_main_cpu_notify(struct notifier_block *self,
return NOTIFY_OK;
}
-static void blk_mq_cpu_notify(void *data, unsigned long action,
- unsigned int cpu)
-{
- if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) {
- /*
- * If the CPU goes away, ensure that we run any pending
- * completions.
- */
- struct llist_node *node;
- struct request *rq;
-
- local_irq_disable();
-
- node = llist_del_all(&per_cpu(ipi_lists, cpu));
- while (node) {
- struct llist_node *next = node->next;
-
- rq = llist_entry(node, struct request, ll_list);
- __blk_mq_end_io(rq, rq->errors);
- node = next;
- }
-
- local_irq_enable();
- }
-}
-
-static struct notifier_block __cpuinitdata blk_mq_main_cpu_notifier = {
- .notifier_call = blk_mq_main_cpu_notify,
-};
-
void blk_mq_register_cpu_notifier(struct blk_mq_cpu_notifier *notifier)
{
BUG_ON(!notifier->notify);
@@ -82,12 +52,7 @@ void blk_mq_init_cpu_notifier(struct blk_mq_cpu_notifier *notifier,
notifier->data = data;
}
-static struct blk_mq_cpu_notifier __cpuinitdata cpu_notifier = {
- .notify = blk_mq_cpu_notify,
-};
-
void __init blk_mq_cpu_init(void)
{
- register_hotcpu_notifier(&blk_mq_main_cpu_notifier);
- blk_mq_register_cpu_notifier(&cpu_notifier);
+ hotcpu_notifier(blk_mq_main_cpu_notify, 0);
}
diff --git a/block/blk-mq-sysfs.c b/block/blk-mq-sysfs.c
index ba6cf8e9aa0a..b91ce75bd35d 100644
--- a/block/blk-mq-sysfs.c
+++ b/block/blk-mq-sysfs.c
@@ -335,9 +335,22 @@ static struct kobj_type blk_mq_hw_ktype = {
void blk_mq_unregister_disk(struct gendisk *disk)
{
struct request_queue *q = disk->queue;
+ struct blk_mq_hw_ctx *hctx;
+ struct blk_mq_ctx *ctx;
+ int i, j;
+
+ queue_for_each_hw_ctx(q, hctx, i) {
+ hctx_for_each_ctx(hctx, ctx, j) {
+ kobject_del(&ctx->kobj);
+ kobject_put(&ctx->kobj);
+ }
+ kobject_del(&hctx->kobj);
+ kobject_put(&hctx->kobj);
+ }
kobject_uevent(&q->mq_kobj, KOBJ_REMOVE);
kobject_del(&q->mq_kobj);
+ kobject_put(&q->mq_kobj);
kobject_put(&disk_to_dev(disk)->kobj);
}
diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c
index d64a02fb1f73..83ae96c51a27 100644
--- a/block/blk-mq-tag.c
+++ b/block/blk-mq-tag.c
@@ -36,7 +36,8 @@ static unsigned int __blk_mq_get_tag(struct blk_mq_tags *tags, gfp_t gfp)
{
int tag;
- tag = percpu_ida_alloc(&tags->free_tags, gfp);
+ tag = percpu_ida_alloc(&tags->free_tags, (gfp & __GFP_WAIT) ?
+ TASK_UNINTERRUPTIBLE : TASK_RUNNING);
if (tag < 0)
return BLK_MQ_TAG_FAIL;
return tag + tags->nr_reserved_tags;
@@ -52,7 +53,8 @@ static unsigned int __blk_mq_get_reserved_tag(struct blk_mq_tags *tags,
return BLK_MQ_TAG_FAIL;
}
- tag = percpu_ida_alloc(&tags->reserved_tags, gfp);
+ tag = percpu_ida_alloc(&tags->reserved_tags, (gfp & __GFP_WAIT) ?
+ TASK_UNINTERRUPTIBLE : TASK_RUNNING);
if (tag < 0)
return BLK_MQ_TAG_FAIL;
return tag;
@@ -182,7 +184,7 @@ void blk_mq_free_tags(struct blk_mq_tags *tags)
ssize_t blk_mq_tag_sysfs_show(struct blk_mq_tags *tags, char *page)
{
char *orig_page = page;
- int cpu;
+ unsigned int cpu;
if (!tags)
return 0;
diff --git a/block/blk-mq.c b/block/blk-mq.c
index c79126e11030..1fa9dd153fde 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -27,8 +27,6 @@ static LIST_HEAD(all_q_list);
static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx);
-DEFINE_PER_CPU(struct llist_head, ipi_lists);
-
static struct blk_mq_ctx *__blk_mq_get_ctx(struct request_queue *q,
unsigned int cpu)
{
@@ -106,10 +104,13 @@ static int blk_mq_queue_enter(struct request_queue *q)
spin_lock_irq(q->queue_lock);
ret = wait_event_interruptible_lock_irq(q->mq_freeze_wq,
- !blk_queue_bypass(q), *q->queue_lock);
+ !blk_queue_bypass(q) || blk_queue_dying(q),
+ *q->queue_lock);
/* inc usage with lock hold to avoid freeze_queue runs here */
- if (!ret)
+ if (!ret && !blk_queue_dying(q))
__percpu_counter_add(&q->mq_usage_counter, 1, 1000000);
+ else if (blk_queue_dying(q))
+ ret = -ENODEV;
spin_unlock_irq(q->queue_lock);
return ret;
@@ -120,6 +121,22 @@ static void blk_mq_queue_exit(struct request_queue *q)
__percpu_counter_add(&q->mq_usage_counter, -1, 1000000);
}
+static void __blk_mq_drain_queue(struct request_queue *q)
+{
+ while (true) {
+ s64 count;
+
+ spin_lock_irq(q->queue_lock);
+ count = percpu_counter_sum(&q->mq_usage_counter);
+ spin_unlock_irq(q->queue_lock);
+
+ if (count == 0)
+ break;
+ blk_mq_run_queues(q, false);
+ msleep(10);
+ }
+}
+
/*
* Guarantee no request is in use, so we can change any data structure of
* the queue afterward.
@@ -133,21 +150,13 @@ static void blk_mq_freeze_queue(struct request_queue *q)
queue_flag_set(QUEUE_FLAG_BYPASS, q);
spin_unlock_irq(q->queue_lock);
- if (!drain)
- return;
-
- while (true) {
- s64 count;
-
- spin_lock_irq(q->queue_lock);
- count = percpu_counter_sum(&q->mq_usage_counter);
- spin_unlock_irq(q->queue_lock);
+ if (drain)
+ __blk_mq_drain_queue(q);
+}
- if (count == 0)
- break;
- blk_mq_run_queues(q, false);
- msleep(10);
- }
+void blk_mq_drain_queue(struct request_queue *q)
+{
+ __blk_mq_drain_queue(q);
}
static void blk_mq_unfreeze_queue(struct request_queue *q)
@@ -179,6 +188,8 @@ static void blk_mq_rq_ctx_init(struct request_queue *q, struct blk_mq_ctx *ctx,
rq->mq_ctx = ctx;
rq->cmd_flags = rw_flags;
+ rq->start_time = jiffies;
+ set_start_time_ns(rq);
ctx->rq_dispatched[rw_is_sync(rw_flags)]++;
}
@@ -215,15 +226,14 @@ static struct request *blk_mq_alloc_request_pinned(struct request_queue *q,
return rq;
}
-struct request *blk_mq_alloc_request(struct request_queue *q, int rw,
- gfp_t gfp, bool reserved)
+struct request *blk_mq_alloc_request(struct request_queue *q, int rw, gfp_t gfp)
{
struct request *rq;
if (blk_mq_queue_enter(q))
return NULL;
- rq = blk_mq_alloc_request_pinned(q, rw, gfp, reserved);
+ rq = blk_mq_alloc_request_pinned(q, rw, gfp, false);
if (rq)
blk_mq_put_ctx(rq->mq_ctx);
return rq;
@@ -247,7 +257,7 @@ EXPORT_SYMBOL(blk_mq_alloc_reserved_request);
/*
* Re-init and set pdu, if we have it
*/
-static void blk_mq_rq_init(struct blk_mq_hw_ctx *hctx, struct request *rq)
+void blk_mq_rq_init(struct blk_mq_hw_ctx *hctx, struct request *rq)
{
blk_rq_init(hctx->queue, rq);
@@ -294,7 +304,7 @@ static void blk_mq_bio_endio(struct request *rq, struct bio *bio, int error)
bio_endio(bio, error);
}
-void blk_mq_complete_request(struct request *rq, int error)
+void blk_mq_end_io(struct request *rq, int error)
{
struct bio *bio = rq->bio;
unsigned int bytes = 0;
@@ -305,7 +315,7 @@ void blk_mq_complete_request(struct request *rq, int error)
struct bio *next = bio->bi_next;
bio->bi_next = NULL;
- bytes += bio->bi_size;
+ bytes += bio->bi_iter.bi_size;
blk_mq_bio_endio(rq, bio, error);
bio = next;
}
@@ -319,87 +329,55 @@ void blk_mq_complete_request(struct request *rq, int error)
else
blk_mq_free_request(rq);
}
+EXPORT_SYMBOL(blk_mq_end_io);
-void __blk_mq_end_io(struct request *rq, int error)
-{
- if (!blk_mark_rq_complete(rq))
- blk_mq_complete_request(rq, error);
-}
-
-#if defined(CONFIG_SMP)
-
-/*
- * Called with interrupts disabled.
- */
-static void ipi_end_io(void *data)
-{
- struct llist_head *list = &per_cpu(ipi_lists, smp_processor_id());
- struct llist_node *entry, *next;
- struct request *rq;
-
- entry = llist_del_all(list);
-
- while (entry) {
- next = entry->next;
- rq = llist_entry(entry, struct request, ll_list);
- __blk_mq_end_io(rq, rq->errors);
- entry = next;
- }
-}
-
-static int ipi_remote_cpu(struct blk_mq_ctx *ctx, const int cpu,
- struct request *rq, const int error)
+static void __blk_mq_complete_request_remote(void *data)
{
- struct call_single_data *data = &rq->csd;
-
- rq->errors = error;
- rq->ll_list.next = NULL;
-
- /*
- * If the list is non-empty, an existing IPI must already
- * be "in flight". If that is the case, we need not schedule
- * a new one.
- */
- if (llist_add(&rq->ll_list, &per_cpu(ipi_lists, ctx->cpu))) {
- data->func = ipi_end_io;
- data->flags = 0;
- __smp_call_function_single(ctx->cpu, data, 0);
- }
+ struct request *rq = data;
- return true;
-}
-#else /* CONFIG_SMP */
-static int ipi_remote_cpu(struct blk_mq_ctx *ctx, const int cpu,
- struct request *rq, const int error)
-{
- return false;
+ rq->q->softirq_done_fn(rq);
}
-#endif
-/*
- * End IO on this request on a multiqueue enabled driver. We'll either do
- * it directly inline, or punt to a local IPI handler on the matching
- * remote CPU.
- */
-void blk_mq_end_io(struct request *rq, int error)
+void __blk_mq_complete_request(struct request *rq)
{
struct blk_mq_ctx *ctx = rq->mq_ctx;
int cpu;
- if (!ctx->ipi_redirect)
- return __blk_mq_end_io(rq, error);
+ if (!ctx->ipi_redirect) {
+ rq->q->softirq_done_fn(rq);
+ return;
+ }
cpu = get_cpu();
-
- if (cpu == ctx->cpu || !cpu_online(ctx->cpu) ||
- !ipi_remote_cpu(ctx, cpu, rq, error))
- __blk_mq_end_io(rq, error);
-
+ if (cpu != ctx->cpu && cpu_online(ctx->cpu)) {
+ rq->csd.func = __blk_mq_complete_request_remote;
+ rq->csd.info = rq;
+ rq->csd.flags = 0;
+ __smp_call_function_single(ctx->cpu, &rq->csd, 0);
+ } else {
+ rq->q->softirq_done_fn(rq);
+ }
put_cpu();
}
-EXPORT_SYMBOL(blk_mq_end_io);
-static void blk_mq_start_request(struct request *rq)
+/**
+ * blk_mq_complete_request - end I/O on a request
+ * @rq: the request being processed
+ *
+ * Description:
+ * Ends all I/O on a request. It does not handle partial completions.
+ * The actual completion happens out-of-order, through a IPI handler.
+ **/
+void blk_mq_complete_request(struct request *rq)
+{
+ if (unlikely(blk_should_fake_timeout(rq->q)))
+ return;
+ if (!blk_mark_rq_complete(rq))
+ __blk_mq_complete_request(rq);
+}
+EXPORT_SYMBOL(blk_mq_complete_request);
+
+static void blk_mq_start_request(struct request *rq, bool last)
{
struct request_queue *q = rq->q;
@@ -412,6 +390,25 @@ static void blk_mq_start_request(struct request *rq)
*/
rq->deadline = jiffies + q->rq_timeout;
set_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
+
+ if (q->dma_drain_size && blk_rq_bytes(rq)) {
+ /*
+ * Make sure space for the drain appears. We know we can do
+ * this because max_hw_segments has been adjusted to be one
+ * fewer than the device can handle.
+ */
+ rq->nr_phys_segments++;
+ }
+
+ /*
+ * Flag the last request in the series so that drivers know when IO
+ * should be kicked off, if they don't do it on a per-request basis.
+ *
+ * Note: the flag isn't the only condition drivers should do kick off.
+ * If drive is busy, the last request might not have the bit set.
+ */
+ if (last)
+ rq->cmd_flags |= REQ_END;
}
static void blk_mq_requeue_request(struct request *rq)
@@ -420,6 +417,11 @@ static void blk_mq_requeue_request(struct request *rq)
trace_block_rq_requeue(q, rq);
clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
+
+ rq->cmd_flags &= ~REQ_END;
+
+ if (q->dma_drain_size && blk_rq_bytes(rq))
+ rq->nr_phys_segments--;
}
struct blk_mq_timeout_data {
@@ -587,19 +589,8 @@ static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)
rq = list_first_entry(&rq_list, struct request, queuelist);
list_del_init(&rq->queuelist);
- blk_mq_start_request(rq);
- /*
- * Last request in the series. Flag it as such, this
- * enables drivers to know when IO should be kicked off,
- * if they don't do it on a per-request basis.
- *
- * Note: the flag isn't the only condition drivers
- * should do kick off. If drive is busy, the last
- * request might not have the bit set.
- */
- if (list_empty(&rq_list))
- rq->cmd_flags |= REQ_END;
+ blk_mq_start_request(rq, list_empty(&rq_list));
ret = q->mq_ops->queue_rq(hctx, rq);
switch (ret) {
@@ -617,8 +608,8 @@ static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)
break;
default:
pr_err("blk-mq: bad return on queue: %d\n", ret);
- rq->errors = -EIO;
case BLK_MQ_RQ_QUEUE_ERROR:
+ rq->errors = -EIO;
blk_mq_end_io(rq, rq->errors);
break;
}
@@ -721,13 +712,16 @@ static void blk_mq_work_fn(struct work_struct *work)
}
static void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx,
- struct request *rq)
+ struct request *rq, bool at_head)
{
struct blk_mq_ctx *ctx = rq->mq_ctx;
trace_block_rq_insert(hctx->queue, rq);
- list_add_tail(&rq->queuelist, &ctx->rq_list);
+ if (at_head)
+ list_add(&rq->queuelist, &ctx->rq_list);
+ else
+ list_add_tail(&rq->queuelist, &ctx->rq_list);
blk_mq_hctx_mark_pending(hctx, ctx);
/*
@@ -737,7 +731,7 @@ static void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx,
}
void blk_mq_insert_request(struct request_queue *q, struct request *rq,
- bool run_queue)
+ bool at_head, bool run_queue)
{
struct blk_mq_hw_ctx *hctx;
struct blk_mq_ctx *ctx, *current_ctx;
@@ -756,7 +750,7 @@ void blk_mq_insert_request(struct request_queue *q, struct request *rq,
rq->mq_ctx = ctx;
}
spin_lock(&ctx->lock);
- __blk_mq_insert_request(hctx, rq);
+ __blk_mq_insert_request(hctx, rq, at_head);
spin_unlock(&ctx->lock);
blk_mq_put_ctx(current_ctx);
@@ -788,7 +782,7 @@ void blk_mq_run_request(struct request *rq, bool run_queue, bool async)
/* ctx->cpu might be offline */
spin_lock(&ctx->lock);
- __blk_mq_insert_request(hctx, rq);
+ __blk_mq_insert_request(hctx, rq, false);
spin_unlock(&ctx->lock);
blk_mq_put_ctx(current_ctx);
@@ -826,7 +820,7 @@ static void blk_mq_insert_requests(struct request_queue *q,
rq = list_first_entry(list, struct request, queuelist);
list_del_init(&rq->queuelist);
rq->mq_ctx = ctx;
- __blk_mq_insert_request(hctx, rq);
+ __blk_mq_insert_request(hctx, rq, false);
}
spin_unlock(&ctx->lock);
@@ -916,6 +910,11 @@ static void blk_mq_make_request(struct request_queue *q, struct bio *bio)
blk_queue_bounce(q, &bio);
+ if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) {
+ bio_endio(bio, -EIO);
+ return;
+ }
+
if (use_plug && blk_attempt_plug_merge(q, bio, &request_count))
return;
@@ -978,7 +977,7 @@ static void blk_mq_make_request(struct request_queue *q, struct bio *bio)
__blk_mq_free_request(hctx, ctx, rq);
else {
blk_mq_bio_to_request(rq, bio);
- __blk_mq_insert_request(hctx, rq);
+ __blk_mq_insert_request(hctx, rq, false);
}
spin_unlock(&ctx->lock);
@@ -1091,8 +1090,8 @@ static void blk_mq_free_rq_map(struct blk_mq_hw_ctx *hctx)
struct page *page;
while (!list_empty(&hctx->page_list)) {
- page = list_first_entry(&hctx->page_list, struct page, list);
- list_del_init(&page->list);
+ page = list_first_entry(&hctx->page_list, struct page, lru);
+ list_del_init(&page->lru);
__free_pages(page, page->private);
}
@@ -1156,7 +1155,7 @@ static int blk_mq_init_rq_map(struct blk_mq_hw_ctx *hctx,
break;
page->private = this_order;
- list_add_tail(&page->list, &hctx->page_list);
+ list_add_tail(&page->lru, &hctx->page_list);
p = page_address(page);
entries_per_page = order_to_size(this_order) / rq_size;
@@ -1337,15 +1336,6 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_reg *reg,
reg->queue_depth = BLK_MQ_MAX_DEPTH;
}
- /*
- * Set aside a tag for flush requests. It will only be used while
- * another flush request is in progress but outside the driver.
- *
- * TODO: only allocate if flushes are supported
- */
- reg->queue_depth++;
- reg->reserved_tags++;
-
if (reg->queue_depth < (reg->reserved_tags + BLK_MQ_TAG_MIN))
return ERR_PTR(-EINVAL);
@@ -1388,17 +1378,27 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_reg *reg,
q->mq_ops = reg->ops;
q->queue_flags |= QUEUE_FLAG_MQ_DEFAULT;
+ q->sg_reserved_size = INT_MAX;
+
blk_queue_make_request(q, blk_mq_make_request);
blk_queue_rq_timed_out(q, reg->ops->timeout);
if (reg->timeout)
blk_queue_rq_timeout(q, reg->timeout);
+ if (reg->ops->complete)
+ blk_queue_softirq_done(q, reg->ops->complete);
+
blk_mq_init_flush(q);
blk_mq_init_cpu_queues(q, reg->nr_hw_queues);
- if (blk_mq_init_hw_queues(q, reg, driver_data))
+ q->flush_rq = kzalloc(round_up(sizeof(struct request) + reg->cmd_size,
+ cache_line_size()), GFP_KERNEL);
+ if (!q->flush_rq)
goto err_hw;
+ if (blk_mq_init_hw_queues(q, reg, driver_data))
+ goto err_flush_rq;
+
blk_mq_map_swqueue(q);
mutex_lock(&all_q_mutex);
@@ -1406,6 +1406,9 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_reg *reg,
mutex_unlock(&all_q_mutex);
return q;
+
+err_flush_rq:
+ kfree(q->flush_rq);
err_hw:
kfree(q->mq_map);
err_map:
@@ -1429,7 +1432,6 @@ void blk_mq_free_queue(struct request_queue *q)
int i;
queue_for_each_hw_ctx(q, hctx, i) {
- cancel_delayed_work_sync(&hctx->delayed_work);
kfree(hctx->ctx_map);
kfree(hctx->ctxs);
blk_mq_free_rq_map(hctx);
@@ -1451,7 +1453,6 @@ void blk_mq_free_queue(struct request_queue *q)
list_del_init(&q->all_q_node);
mutex_unlock(&all_q_mutex);
}
-EXPORT_SYMBOL(blk_mq_free_queue);
/* Basically redo blk_mq_init_queue with queue frozen */
static void blk_mq_queue_reinit(struct request_queue *q)
@@ -1495,11 +1496,6 @@ static int blk_mq_queue_reinit_notify(struct notifier_block *nb,
static int __init blk_mq_init(void)
{
- unsigned int i;
-
- for_each_possible_cpu(i)
- init_llist_head(&per_cpu(ipi_lists, i));
-
blk_mq_cpu_init();
/* Must be called after percpu_counter_hotcpu_callback() */
diff --git a/block/blk-mq.h b/block/blk-mq.h
index 52bf1f96a2c2..ed0035cd458e 100644
--- a/block/blk-mq.h
+++ b/block/blk-mq.h
@@ -22,11 +22,13 @@ struct blk_mq_ctx {
struct kobject kobj;
};
-void __blk_mq_end_io(struct request *rq, int error);
-void blk_mq_complete_request(struct request *rq, int error);
+void __blk_mq_complete_request(struct request *rq);
void blk_mq_run_request(struct request *rq, bool run_queue, bool async);
void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async);
void blk_mq_init_flush(struct request_queue *q);
+void blk_mq_drain_queue(struct request_queue *q);
+void blk_mq_free_queue(struct request_queue *q);
+void blk_mq_rq_init(struct blk_mq_hw_ctx *hctx, struct request *rq);
/*
* CPU hotplug helpers
@@ -38,7 +40,6 @@ void blk_mq_init_cpu_notifier(struct blk_mq_cpu_notifier *notifier,
void blk_mq_register_cpu_notifier(struct blk_mq_cpu_notifier *notifier);
void blk_mq_unregister_cpu_notifier(struct blk_mq_cpu_notifier *notifier);
void blk_mq_cpu_init(void);
-DECLARE_PER_CPU(struct llist_head, ipi_lists);
/*
* CPU -> queue mappings
diff --git a/block/blk-settings.c b/block/blk-settings.c
index 05e826793e4e..5d21239bc859 100644
--- a/block/blk-settings.c
+++ b/block/blk-settings.c
@@ -592,6 +592,10 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
ret = -1;
}
+ t->raid_partial_stripes_expensive =
+ max(t->raid_partial_stripes_expensive,
+ b->raid_partial_stripes_expensive);
+
/* Find lowest common alignment_offset */
t->alignment_offset = lcm(t->alignment_offset, alignment)
& (max(t->physical_block_size, t->io_min) - 1);
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index 97779522472f..7500f876dae4 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -11,6 +11,7 @@
#include "blk.h"
#include "blk-cgroup.h"
+#include "blk-mq.h"
struct queue_sysfs_entry {
struct attribute attr;
@@ -548,6 +549,8 @@ static void blk_release_queue(struct kobject *kobj)
if (q->mq_ops)
blk_mq_free_queue(q);
+ kfree(q->flush_rq);
+
blk_trace_shutdown(q);
bdi_destroy(&q->backing_dev_info);
diff --git a/block/blk-throttle.c b/block/blk-throttle.c
index 06534049afba..1474c3ab7e72 100644
--- a/block/blk-throttle.c
+++ b/block/blk-throttle.c
@@ -877,14 +877,14 @@ static bool tg_with_in_bps_limit(struct throtl_grp *tg, struct bio *bio,
do_div(tmp, HZ);
bytes_allowed = tmp;
- if (tg->bytes_disp[rw] + bio->bi_size <= bytes_allowed) {
+ if (tg->bytes_disp[rw] + bio->bi_iter.bi_size <= bytes_allowed) {
if (wait)
*wait = 0;
return 1;
}
/* Calc approx time to dispatch */
- extra_bytes = tg->bytes_disp[rw] + bio->bi_size - bytes_allowed;
+ extra_bytes = tg->bytes_disp[rw] + bio->bi_iter.bi_size - bytes_allowed;
jiffy_wait = div64_u64(extra_bytes * HZ, tg->bps[rw]);
if (!jiffy_wait)
@@ -987,7 +987,7 @@ static void throtl_charge_bio(struct throtl_grp *tg, struct bio *bio)
bool rw = bio_data_dir(bio);
/* Charge the bio to the group */
- tg->bytes_disp[rw] += bio->bi_size;
+ tg->bytes_disp[rw] += bio->bi_iter.bi_size;
tg->io_disp[rw]++;
/*
@@ -1003,8 +1003,8 @@ static void throtl_charge_bio(struct throtl_grp *tg, struct bio *bio)
*/
if (!(bio->bi_rw & REQ_THROTTLED)) {
bio->bi_rw |= REQ_THROTTLED;
- throtl_update_dispatch_stats(tg_to_blkg(tg), bio->bi_size,
- bio->bi_rw);
+ throtl_update_dispatch_stats(tg_to_blkg(tg),
+ bio->bi_iter.bi_size, bio->bi_rw);
}
}
@@ -1303,13 +1303,10 @@ static u64 tg_prfill_cpu_rwstat(struct seq_file *sf,
return __blkg_prfill_rwstat(sf, pd, &rwstat);
}
-static int tg_print_cpu_rwstat(struct cgroup_subsys_state *css,
- struct cftype *cft, struct seq_file *sf)
+static int tg_print_cpu_rwstat(struct seq_file *sf, void *v)
{
- struct blkcg *blkcg = css_to_blkcg(css);
-
- blkcg_print_blkgs(sf, blkcg, tg_prfill_cpu_rwstat, &blkcg_policy_throtl,
- cft->private, true);
+ blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), tg_prfill_cpu_rwstat,
+ &blkcg_policy_throtl, seq_cft(sf)->private, true);
return 0;
}
@@ -1335,19 +1332,17 @@ static u64 tg_prfill_conf_uint(struct seq_file *sf, struct blkg_policy_data *pd,
return __blkg_prfill_u64(sf, pd, v);
}
-static int tg_print_conf_u64(struct cgroup_subsys_state *css,
- struct cftype *cft, struct seq_file *sf)
+static int tg_print_conf_u64(struct seq_file *sf, void *v)
{
- blkcg_print_blkgs(sf, css_to_blkcg(css), tg_prfill_conf_u64,
- &blkcg_policy_throtl, cft->private, false);
+ blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), tg_prfill_conf_u64,
+ &blkcg_policy_throtl, seq_cft(sf)->private, false);
return 0;
}
-static int tg_print_conf_uint(struct cgroup_subsys_state *css,
- struct cftype *cft, struct seq_file *sf)
+static int tg_print_conf_uint(struct seq_file *sf, void *v)
{
- blkcg_print_blkgs(sf, css_to_blkcg(css), tg_prfill_conf_uint,
- &blkcg_policy_throtl, cft->private, false);
+ blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), tg_prfill_conf_uint,
+ &blkcg_policy_throtl, seq_cft(sf)->private, false);
return 0;
}
@@ -1428,40 +1423,40 @@ static struct cftype throtl_files[] = {
{
.name = "throttle.read_bps_device",
.private = offsetof(struct throtl_grp, bps[READ]),
- .read_seq_string = tg_print_conf_u64,
+ .seq_show = tg_print_conf_u64,
.write_string = tg_set_conf_u64,
.max_write_len = 256,
},
{
.name = "throttle.write_bps_device",
.private = offsetof(struct throtl_grp, bps[WRITE]),
- .read_seq_string = tg_print_conf_u64,
+ .seq_show = tg_print_conf_u64,
.write_string = tg_set_conf_u64,
.max_write_len = 256,
},
{
.name = "throttle.read_iops_device",
.private = offsetof(struct throtl_grp, iops[READ]),
- .read_seq_string = tg_print_conf_uint,
+ .seq_show = tg_print_conf_uint,
.write_string = tg_set_conf_uint,
.max_write_len = 256,
},
{
.name = "throttle.write_iops_device",
.private = offsetof(struct throtl_grp, iops[WRITE]),
- .read_seq_string = tg_print_conf_uint,
+ .seq_show = tg_print_conf_uint,
.write_string = tg_set_conf_uint,
.max_write_len = 256,
},
{
.name = "throttle.io_service_bytes",
.private = offsetof(struct tg_stats_cpu, service_bytes),
- .read_seq_string = tg_print_cpu_rwstat,
+ .seq_show = tg_print_cpu_rwstat,
},
{
.name = "throttle.io_serviced",
.private = offsetof(struct tg_stats_cpu, serviced),
- .read_seq_string = tg_print_cpu_rwstat,
+ .seq_show = tg_print_cpu_rwstat,
},
{ } /* terminate */
};
@@ -1508,7 +1503,7 @@ bool blk_throtl_bio(struct request_queue *q, struct bio *bio)
if (tg) {
if (!tg->has_rules[rw]) {
throtl_update_dispatch_stats(tg_to_blkg(tg),
- bio->bi_size, bio->bi_rw);
+ bio->bi_iter.bi_size, bio->bi_rw);
goto out_unlock_rcu;
}
}
@@ -1564,7 +1559,7 @@ bool blk_throtl_bio(struct request_queue *q, struct bio *bio)
/* out-of-limit, queue to @tg */
throtl_log(sq, "[%c] bio. bdisp=%llu sz=%u bps=%llu iodisp=%u iops=%u queued=%d/%d",
rw == READ ? 'R' : 'W',
- tg->bytes_disp[rw], bio->bi_size, tg->bps[rw],
+ tg->bytes_disp[rw], bio->bi_iter.bi_size, tg->bps[rw],
tg->io_disp[rw], tg->iops[rw],
sq->nr_queued[READ], sq->nr_queued[WRITE]);
diff --git a/block/blk-timeout.c b/block/blk-timeout.c
index bba81c9348e1..d96f7061c6fd 100644
--- a/block/blk-timeout.c
+++ b/block/blk-timeout.c
@@ -91,7 +91,7 @@ static void blk_rq_timed_out(struct request *req)
case BLK_EH_HANDLED:
/* Can we use req->errors here? */
if (q->mq_ops)
- blk_mq_complete_request(req, req->errors);
+ __blk_mq_complete_request(req);
else
__blk_complete_request(req);
break;
diff --git a/block/blk.h b/block/blk.h
index c90e1d8f7a2b..d23b415b8a28 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -113,7 +113,7 @@ static inline struct request *__elv_next_request(struct request_queue *q)
q->flush_queue_delayed = 1;
return NULL;
}
- if (unlikely(blk_queue_dying(q)) ||
+ if (unlikely(blk_queue_bypass(q)) ||
!q->elevator->type->ops.elevator_dispatch_fn(q, 0))
return NULL;
}
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 4d5cec1ad80d..744833b630c6 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -1632,11 +1632,11 @@ static u64 cfqg_prfill_weight_device(struct seq_file *sf,
return __blkg_prfill_u64(sf, pd, cfqg->dev_weight);
}
-static int cfqg_print_weight_device(struct cgroup_subsys_state *css,
- struct cftype *cft, struct seq_file *sf)
+static int cfqg_print_weight_device(struct seq_file *sf, void *v)
{
- blkcg_print_blkgs(sf, css_to_blkcg(css), cfqg_prfill_weight_device,
- &blkcg_policy_cfq, 0, false);
+ blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
+ cfqg_prfill_weight_device, &blkcg_policy_cfq,
+ 0, false);
return 0;
}
@@ -1650,26 +1650,23 @@ static u64 cfqg_prfill_leaf_weight_device(struct seq_file *sf,
return __blkg_prfill_u64(sf, pd, cfqg->dev_leaf_weight);
}
-static int cfqg_print_leaf_weight_device(struct cgroup_subsys_state *css,
- struct cftype *cft,
- struct seq_file *sf)
+static int cfqg_print_leaf_weight_device(struct seq_file *sf, void *v)
{
- blkcg_print_blkgs(sf, css_to_blkcg(css), cfqg_prfill_leaf_weight_device,
- &blkcg_policy_cfq, 0, false);
+ blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
+ cfqg_prfill_leaf_weight_device, &blkcg_policy_cfq,
+ 0, false);
return 0;
}
-static int cfq_print_weight(struct cgroup_subsys_state *css, struct cftype *cft,
- struct seq_file *sf)
+static int cfq_print_weight(struct seq_file *sf, void *v)
{
- seq_printf(sf, "%u\n", css_to_blkcg(css)->cfq_weight);
+ seq_printf(sf, "%u\n", css_to_blkcg(seq_css(sf))->cfq_weight);
return 0;
}
-static int cfq_print_leaf_weight(struct cgroup_subsys_state *css,
- struct cftype *cft, struct seq_file *sf)
+static int cfq_print_leaf_weight(struct seq_file *sf, void *v)
{
- seq_printf(sf, "%u\n", css_to_blkcg(css)->cfq_leaf_weight);
+ seq_printf(sf, "%u\n", css_to_blkcg(seq_css(sf))->cfq_leaf_weight);
return 0;
}
@@ -1762,23 +1759,17 @@ static int cfq_set_leaf_weight(struct cgroup_subsys_state *css,
return __cfq_set_weight(css, cft, val, true);
}
-static int cfqg_print_stat(struct cgroup_subsys_state *css, struct cftype *cft,
- struct seq_file *sf)
+static int cfqg_print_stat(struct seq_file *sf, void *v)
{
- struct blkcg *blkcg = css_to_blkcg(css);
-
- blkcg_print_blkgs(sf, blkcg, blkg_prfill_stat, &blkcg_policy_cfq,
- cft->private, false);
+ blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), blkg_prfill_stat,
+ &blkcg_policy_cfq, seq_cft(sf)->private, false);
return 0;
}
-static int cfqg_print_rwstat(struct cgroup_subsys_state *css,
- struct cftype *cft, struct seq_file *sf)
+static int cfqg_print_rwstat(struct seq_file *sf, void *v)
{
- struct blkcg *blkcg = css_to_blkcg(css);
-
- blkcg_print_blkgs(sf, blkcg, blkg_prfill_rwstat, &blkcg_policy_cfq,
- cft->private, true);
+ blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), blkg_prfill_rwstat,
+ &blkcg_policy_cfq, seq_cft(sf)->private, true);
return 0;
}
@@ -1798,23 +1789,19 @@ static u64 cfqg_prfill_rwstat_recursive(struct seq_file *sf,
return __blkg_prfill_rwstat(sf, pd, &sum);
}
-static int cfqg_print_stat_recursive(struct cgroup_subsys_state *css,
- struct cftype *cft, struct seq_file *sf)
+static int cfqg_print_stat_recursive(struct seq_file *sf, void *v)
{
- struct blkcg *blkcg = css_to_blkcg(css);
-
- blkcg_print_blkgs(sf, blkcg, cfqg_prfill_stat_recursive,
- &blkcg_policy_cfq, cft->private, false);
+ blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
+ cfqg_prfill_stat_recursive, &blkcg_policy_cfq,
+ seq_cft(sf)->private, false);
return 0;
}
-static int cfqg_print_rwstat_recursive(struct cgroup_subsys_state *css,
- struct cftype *cft, struct seq_file *sf)
+static int cfqg_print_rwstat_recursive(struct seq_file *sf, void *v)
{
- struct blkcg *blkcg = css_to_blkcg(css);
-
- blkcg_print_blkgs(sf, blkcg, cfqg_prfill_rwstat_recursive,
- &blkcg_policy_cfq, cft->private, true);
+ blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
+ cfqg_prfill_rwstat_recursive, &blkcg_policy_cfq,
+ seq_cft(sf)->private, true);
return 0;
}
@@ -1835,13 +1822,11 @@ static u64 cfqg_prfill_avg_queue_size(struct seq_file *sf,
}
/* print avg_queue_size */
-static int cfqg_print_avg_queue_size(struct cgroup_subsys_state *css,
- struct cftype *cft, struct seq_file *sf)
+static int cfqg_print_avg_queue_size(struct seq_file *sf, void *v)
{
- struct blkcg *blkcg = css_to_blkcg(css);
-
- blkcg_print_blkgs(sf, blkcg, cfqg_prfill_avg_queue_size,
- &blkcg_policy_cfq, 0, false);
+ blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
+ cfqg_prfill_avg_queue_size, &blkcg_policy_cfq,
+ 0, false);
return 0;
}
#endif /* CONFIG_DEBUG_BLK_CGROUP */
@@ -1851,14 +1836,14 @@ static struct cftype cfq_blkcg_files[] = {
{
.name = "weight_device",
.flags = CFTYPE_ONLY_ON_ROOT,
- .read_seq_string = cfqg_print_leaf_weight_device,
+ .seq_show = cfqg_print_leaf_weight_device,
.write_string = cfqg_set_leaf_weight_device,
.max_write_len = 256,
},
{
.name = "weight",
.flags = CFTYPE_ONLY_ON_ROOT,
- .read_seq_string = cfq_print_leaf_weight,
+ .seq_show = cfq_print_leaf_weight,
.write_u64 = cfq_set_leaf_weight,
},
@@ -1866,26 +1851,26 @@ static struct cftype cfq_blkcg_files[] = {
{
.name = "weight_device",
.flags = CFTYPE_NOT_ON_ROOT,
- .read_seq_string = cfqg_print_weight_device,
+ .seq_show = cfqg_print_weight_device,
.write_string = cfqg_set_weight_device,
.max_write_len = 256,
},
{
.name = "weight",
.flags = CFTYPE_NOT_ON_ROOT,
- .read_seq_string = cfq_print_weight,
+ .seq_show = cfq_print_weight,
.write_u64 = cfq_set_weight,
},
{
.name = "leaf_weight_device",
- .read_seq_string = cfqg_print_leaf_weight_device,
+ .seq_show = cfqg_print_leaf_weight_device,
.write_string = cfqg_set_leaf_weight_device,
.max_write_len = 256,
},
{
.name = "leaf_weight",
- .read_seq_string = cfq_print_leaf_weight,
+ .seq_show = cfq_print_leaf_weight,
.write_u64 = cfq_set_leaf_weight,
},
@@ -1893,114 +1878,114 @@ static struct cftype cfq_blkcg_files[] = {
{
.name = "time",
.private = offsetof(struct cfq_group, stats.time),
- .read_seq_string = cfqg_print_stat,
+ .seq_show = cfqg_print_stat,
},
{
.name = "sectors",
.private = offsetof(struct cfq_group, stats.sectors),
- .read_seq_string = cfqg_print_stat,
+ .seq_show = cfqg_print_stat,
},
{
.name = "io_service_bytes",
.private = offsetof(struct cfq_group, stats.service_bytes),
- .read_seq_string = cfqg_print_rwstat,
+ .seq_show = cfqg_print_rwstat,
},
{
.name = "io_serviced",
.private = offsetof(struct cfq_group, stats.serviced),
- .read_seq_string = cfqg_print_rwstat,
+ .seq_show = cfqg_print_rwstat,
},
{
.name = "io_service_time",
.private = offsetof(struct cfq_group, stats.service_time),
- .read_seq_string = cfqg_print_rwstat,
+ .seq_show = cfqg_print_rwstat,
},
{
.name = "io_wait_time",
.private = offsetof(struct cfq_group, stats.wait_time),
- .read_seq_string = cfqg_print_rwstat,
+ .seq_show = cfqg_print_rwstat,
},
{
.name = "io_merged",
.private = offsetof(struct cfq_group, stats.merged),
- .read_seq_string = cfqg_print_rwstat,
+ .seq_show = cfqg_print_rwstat,
},
{
.name = "io_queued",
.private = offsetof(struct cfq_group, stats.queued),
- .read_seq_string = cfqg_print_rwstat,
+ .seq_show = cfqg_print_rwstat,
},
/* the same statictics which cover the cfqg and its descendants */
{
.name = "time_recursive",
.private = offsetof(struct cfq_group, stats.time),
- .read_seq_string = cfqg_print_stat_recursive,
+ .seq_show = cfqg_print_stat_recursive,
},
{
.name = "sectors_recursive",
.private = offsetof(struct cfq_group, stats.sectors),
- .read_seq_string = cfqg_print_stat_recursive,
+ .seq_show = cfqg_print_stat_recursive,
},
{
.name = "io_service_bytes_recursive",
.private = offsetof(struct cfq_group, stats.service_bytes),
- .read_seq_string = cfqg_print_rwstat_recursive,
+ .seq_show = cfqg_print_rwstat_recursive,
},
{
.name = "io_serviced_recursive",
.private = offsetof(struct cfq_group, stats.serviced),
- .read_seq_string = cfqg_print_rwstat_recursive,
+ .seq_show = cfqg_print_rwstat_recursive,
},
{
.name = "io_service_time_recursive",
.private = offsetof(struct cfq_group, stats.service_time),
- .read_seq_string = cfqg_print_rwstat_recursive,
+ .seq_show = cfqg_print_rwstat_recursive,
},
{
.name = "io_wait_time_recursive",
.private = offsetof(struct cfq_group, stats.wait_time),
- .read_seq_string = cfqg_print_rwstat_recursive,
+ .seq_show = cfqg_print_rwstat_recursive,
},
{
.name = "io_merged_recursive",
.private = offsetof(struct cfq_group, stats.merged),
- .read_seq_string = cfqg_print_rwstat_recursive,
+ .seq_show = cfqg_print_rwstat_recursive,
},
{
.name = "io_queued_recursive",
.private = offsetof(struct cfq_group, stats.queued),
- .read_seq_string = cfqg_print_rwstat_recursive,
+ .seq_show = cfqg_print_rwstat_recursive,
},
#ifdef CONFIG_DEBUG_BLK_CGROUP
{
.name = "avg_queue_size",
- .read_seq_string = cfqg_print_avg_queue_size,
+ .seq_show = cfqg_print_avg_queue_size,
},
{
.name = "group_wait_time",
.private = offsetof(struct cfq_group, stats.group_wait_time),
- .read_seq_string = cfqg_print_stat,
+ .seq_show = cfqg_print_stat,
},
{
.name = "idle_time",
.private = offsetof(struct cfq_group, stats.idle_time),
- .read_seq_string = cfqg_print_stat,
+ .seq_show = cfqg_print_stat,
},
{
.name = "empty_time",
.private = offsetof(struct cfq_group, stats.empty_time),
- .read_seq_string = cfqg_print_stat,
+ .seq_show = cfqg_print_stat,
},
{
.name = "dequeue",
.private = offsetof(struct cfq_group, stats.dequeue),
- .read_seq_string = cfqg_print_stat,
+ .seq_show = cfqg_print_stat,
},
{
.name = "unaccounted_time",
.private = offsetof(struct cfq_group, stats.unaccounted_time),
- .read_seq_string = cfqg_print_stat,
+ .seq_show = cfqg_print_stat,
},
#endif /* CONFIG_DEBUG_BLK_CGROUP */
{ } /* terminate */
diff --git a/block/cmdline-parser.c b/block/cmdline-parser.c
index cc2637f8674e..9dbc67e42a99 100644
--- a/block/cmdline-parser.c
+++ b/block/cmdline-parser.c
@@ -4,8 +4,7 @@
* Written by Cai Zhiyong <caizhiyong@huawei.com>
*
*/
-#include <linux/buffer_head.h>
-#include <linux/module.h>
+#include <linux/export.h>
#include <linux/cmdline-parser.h>
static int parse_subpart(struct cmdline_subpart **subpart, char *partdef)
@@ -159,6 +158,7 @@ void cmdline_parts_free(struct cmdline_parts **parts)
*parts = next_parts;
}
}
+EXPORT_SYMBOL(cmdline_parts_free);
int cmdline_parts_parse(struct cmdline_parts **parts, const char *cmdline)
{
@@ -206,6 +206,7 @@ fail:
cmdline_parts_free(parts);
goto done;
}
+EXPORT_SYMBOL(cmdline_parts_parse);
struct cmdline_parts *cmdline_parts_find(struct cmdline_parts *parts,
const char *bdev)
@@ -214,17 +215,17 @@ struct cmdline_parts *cmdline_parts_find(struct cmdline_parts *parts,
parts = parts->next_parts;
return parts;
}
+EXPORT_SYMBOL(cmdline_parts_find);
/*
* add_part()
* 0 success.
* 1 can not add so many partitions.
*/
-void cmdline_parts_set(struct cmdline_parts *parts, sector_t disk_size,
- int slot,
- int (*add_part)(int, struct cmdline_subpart *, void *),
- void *param)
-
+int cmdline_parts_set(struct cmdline_parts *parts, sector_t disk_size,
+ int slot,
+ int (*add_part)(int, struct cmdline_subpart *, void *),
+ void *param)
{
sector_t from = 0;
struct cmdline_subpart *subpart;
@@ -247,4 +248,7 @@ void cmdline_parts_set(struct cmdline_parts *parts, sector_t disk_size,
if (add_part(slot, subpart, param))
break;
}
+
+ return slot;
}
+EXPORT_SYMBOL(cmdline_parts_set);
diff --git a/block/elevator.c b/block/elevator.c
index b7ff2861b6bd..42c45a7d6714 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -440,7 +440,7 @@ int elv_merge(struct request_queue *q, struct request **req, struct bio *bio)
/*
* See if our hash lookup can find a potential backmerge.
*/
- __rq = elv_rqhash_find(q, bio->bi_sector);
+ __rq = elv_rqhash_find(q, bio->bi_iter.bi_sector);
if (__rq && elv_rq_merge_ok(__rq, bio)) {
*req = __rq;
return ELEVATOR_BACK_MERGE;
diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
index 625e3e471d65..26487972ac54 100644
--- a/block/scsi_ioctl.c
+++ b/block/scsi_ioctl.c
@@ -323,12 +323,14 @@ static int sg_io(struct request_queue *q, struct gendisk *bd_disk,
if (hdr->iovec_count) {
size_t iov_data_len;
- struct iovec *iov;
+ struct iovec *iov = NULL;
ret = rw_copy_check_uvector(-1, hdr->dxferp, hdr->iovec_count,
0, NULL, &iov);
- if (ret < 0)
+ if (ret < 0) {
+ kfree(iov);
goto out;
+ }
iov_data_len = ret;
ret = 0;