From 1671d522cdd9933dee7dddfcf9f62c561283824a Mon Sep 17 00:00:00 2001 From: Ming Lei Date: Mon, 27 Mar 2017 20:06:57 +0800 Subject: block: rename blk_mq_freeze_queue_start() As the .q_usage_counter is used by both legacy and mq path, we need to block new I/O if queue becomes dead in blk_queue_enter(). So rename it and we can use this function in both paths. Reviewed-by: Bart Van Assche Reviewed-by: Hannes Reinecke Signed-off-by: Ming Lei Reviewed-by: Johannes Thumshirn Signed-off-by: Jens Axboe --- drivers/block/mtip32xx/mtip32xx.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/block/mtip32xx') diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c index f96ab717534c..c96c35ab39df 100644 --- a/drivers/block/mtip32xx/mtip32xx.c +++ b/drivers/block/mtip32xx/mtip32xx.c @@ -4162,7 +4162,7 @@ static int mtip_block_remove(struct driver_data *dd) dev_info(&dd->pdev->dev, "device %s surprise removal\n", dd->disk->disk_name); - blk_mq_freeze_queue_start(dd->queue); + blk_freeze_queue_start(dd->queue); blk_mq_stop_hw_queues(dd->queue); blk_mq_tagset_busy_iter(&dd->tags, mtip_no_dev_cleanup, dd); -- cgit v1.2.3 From f363b089be0a39fe4282c688118a51d21f952bc7 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Thu, 30 Mar 2017 13:39:16 -0700 Subject: blk-mq: constify struct blk_mq_ops Constify all instances of blk_mq_ops, as they are never modified. Signed-off-by: Eric Biggers Signed-off-by: Jens Axboe --- drivers/block/loop.c | 2 +- drivers/block/mtip32xx/mtip32xx.c | 2 +- drivers/block/nbd.c | 2 +- drivers/block/null_blk.c | 2 +- drivers/block/rbd.c | 2 +- drivers/block/virtio_blk.c | 2 +- drivers/block/xen-blkfront.c | 2 +- drivers/md/dm-rq.c | 2 +- drivers/mtd/ubi/block.c | 2 +- drivers/nvme/host/fc.c | 4 ++-- drivers/nvme/host/pci.c | 4 ++-- drivers/nvme/host/rdma.c | 4 ++-- drivers/nvme/target/loop.c | 4 ++-- drivers/scsi/scsi_lib.c | 2 +- 14 files changed, 18 insertions(+), 18 deletions(-) (limited to 'drivers/block/mtip32xx') diff --git a/drivers/block/loop.c b/drivers/block/loop.c index 0ecb6461ed81..cc981f34e017 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c @@ -1710,7 +1710,7 @@ static int loop_init_request(void *data, struct request *rq, return 0; } -static struct blk_mq_ops loop_mq_ops = { +static const struct blk_mq_ops loop_mq_ops = { .queue_rq = loop_queue_rq, .init_request = loop_init_request, }; diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c index c96c35ab39df..30076e7753bc 100644 --- a/drivers/block/mtip32xx/mtip32xx.c +++ b/drivers/block/mtip32xx/mtip32xx.c @@ -3889,7 +3889,7 @@ exit_handler: return BLK_EH_RESET_TIMER; } -static struct blk_mq_ops mtip_mq_ops = { +static const struct blk_mq_ops mtip_mq_ops = { .queue_rq = mtip_queue_rq, .init_request = mtip_init_cmd, .exit_request = mtip_free_cmd, diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c index 7e4287bc19e5..d3be8ea2f80d 100644 --- a/drivers/block/nbd.c +++ b/drivers/block/nbd.c @@ -965,7 +965,7 @@ static int nbd_init_request(void *data, struct request *rq, return 0; } -static struct blk_mq_ops nbd_mq_ops = { +static const struct blk_mq_ops nbd_mq_ops = { .queue_rq = nbd_queue_rq, .init_request = nbd_init_request, .timeout = nbd_xmit_timeout, diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c index 444f8d88f1a8..f93906ff31e8 100644 --- a/drivers/block/null_blk.c +++ b/drivers/block/null_blk.c @@ -398,7 +398,7 @@ static int null_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, return 0; } -static struct blk_mq_ops null_mq_ops = { +static const struct blk_mq_ops null_mq_ops = { .queue_rq = null_queue_rq, .init_hctx = null_init_hctx, .complete = null_softirq_done_fn, diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index 517838b65964..f24ade333e0c 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -4317,7 +4317,7 @@ static int rbd_init_request(void *data, struct request *rq, return 0; } -static struct blk_mq_ops rbd_mq_ops = { +static const struct blk_mq_ops rbd_mq_ops = { .queue_rq = rbd_queue_rq, .init_request = rbd_init_request, }; diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c index 1d4c9f8bc1e1..2d8290169271 100644 --- a/drivers/block/virtio_blk.c +++ b/drivers/block/virtio_blk.c @@ -597,7 +597,7 @@ static int virtblk_map_queues(struct blk_mq_tag_set *set) return blk_mq_virtio_map_queues(set, vblk->vdev, 0); } -static struct blk_mq_ops virtio_mq_ops = { +static const struct blk_mq_ops virtio_mq_ops = { .queue_rq = virtio_queue_rq, .complete = virtblk_request_done, .init_request = virtblk_init_request, diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c index 5067a0a952cb..d137ef8a72be 100644 --- a/drivers/block/xen-blkfront.c +++ b/drivers/block/xen-blkfront.c @@ -907,7 +907,7 @@ out_busy: return BLK_MQ_RQ_QUEUE_BUSY; } -static struct blk_mq_ops blkfront_mq_ops = { +static const struct blk_mq_ops blkfront_mq_ops = { .queue_rq = blkif_queue_rq, }; diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c index 28955b94d2b2..6886bf160fb2 100644 --- a/drivers/md/dm-rq.c +++ b/drivers/md/dm-rq.c @@ -761,7 +761,7 @@ static int dm_mq_queue_rq(struct blk_mq_hw_ctx *hctx, return BLK_MQ_RQ_QUEUE_OK; } -static struct blk_mq_ops dm_mq_ops = { +static const struct blk_mq_ops dm_mq_ops = { .queue_rq = dm_mq_queue_rq, .complete = dm_softirq_done, .init_request = dm_mq_init_request, diff --git a/drivers/mtd/ubi/block.c b/drivers/mtd/ubi/block.c index c80869e60909..51f2be8889b5 100644 --- a/drivers/mtd/ubi/block.c +++ b/drivers/mtd/ubi/block.c @@ -347,7 +347,7 @@ static int ubiblock_init_request(void *data, struct request *req, return 0; } -static struct blk_mq_ops ubiblock_mq_ops = { +static const struct blk_mq_ops ubiblock_mq_ops = { .queue_rq = ubiblock_queue_rq, .init_request = ubiblock_init_request, }; diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c index 9690beb15e69..5939a0a4f7a5 100644 --- a/drivers/nvme/host/fc.c +++ b/drivers/nvme/host/fc.c @@ -1948,7 +1948,7 @@ nvme_fc_complete_rq(struct request *rq) blk_mq_end_request(rq, error); } -static struct blk_mq_ops nvme_fc_mq_ops = { +static const struct blk_mq_ops nvme_fc_mq_ops = { .queue_rq = nvme_fc_queue_rq, .complete = nvme_fc_complete_rq, .init_request = nvme_fc_init_request, @@ -1959,7 +1959,7 @@ static struct blk_mq_ops nvme_fc_mq_ops = { .timeout = nvme_fc_timeout, }; -static struct blk_mq_ops nvme_fc_admin_mq_ops = { +static const struct blk_mq_ops nvme_fc_admin_mq_ops = { .queue_rq = nvme_fc_queue_rq, .complete = nvme_fc_complete_rq, .init_request = nvme_fc_init_admin_request, diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index 26a5fd05fe88..095b061416bf 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -1129,7 +1129,7 @@ static int nvme_create_queue(struct nvme_queue *nvmeq, int qid) return result; } -static struct blk_mq_ops nvme_mq_admin_ops = { +static const struct blk_mq_ops nvme_mq_admin_ops = { .queue_rq = nvme_queue_rq, .complete = nvme_complete_rq, .init_hctx = nvme_admin_init_hctx, @@ -1138,7 +1138,7 @@ static struct blk_mq_ops nvme_mq_admin_ops = { .timeout = nvme_timeout, }; -static struct blk_mq_ops nvme_mq_ops = { +static const struct blk_mq_ops nvme_mq_ops = { .queue_rq = nvme_queue_rq, .complete = nvme_complete_rq, .init_hctx = nvme_init_hctx, diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c index 779f516e7a4e..3a677a43450f 100644 --- a/drivers/nvme/host/rdma.c +++ b/drivers/nvme/host/rdma.c @@ -1517,7 +1517,7 @@ static void nvme_rdma_complete_rq(struct request *rq) blk_mq_end_request(rq, error); } -static struct blk_mq_ops nvme_rdma_mq_ops = { +static const struct blk_mq_ops nvme_rdma_mq_ops = { .queue_rq = nvme_rdma_queue_rq, .complete = nvme_rdma_complete_rq, .init_request = nvme_rdma_init_request, @@ -1528,7 +1528,7 @@ static struct blk_mq_ops nvme_rdma_mq_ops = { .timeout = nvme_rdma_timeout, }; -static struct blk_mq_ops nvme_rdma_admin_mq_ops = { +static const struct blk_mq_ops nvme_rdma_admin_mq_ops = { .queue_rq = nvme_rdma_queue_rq, .complete = nvme_rdma_complete_rq, .init_request = nvme_rdma_init_admin_request, diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c index d1f06e7768ff..4bfb285c32e8 100644 --- a/drivers/nvme/target/loop.c +++ b/drivers/nvme/target/loop.c @@ -270,7 +270,7 @@ static int nvme_loop_init_admin_hctx(struct blk_mq_hw_ctx *hctx, void *data, return 0; } -static struct blk_mq_ops nvme_loop_mq_ops = { +static const struct blk_mq_ops nvme_loop_mq_ops = { .queue_rq = nvme_loop_queue_rq, .complete = nvme_loop_complete_rq, .init_request = nvme_loop_init_request, @@ -278,7 +278,7 @@ static struct blk_mq_ops nvme_loop_mq_ops = { .timeout = nvme_loop_timeout, }; -static struct blk_mq_ops nvme_loop_admin_mq_ops = { +static const struct blk_mq_ops nvme_loop_admin_mq_ops = { .queue_rq = nvme_loop_queue_rq, .complete = nvme_loop_complete_rq, .init_request = nvme_loop_init_admin_request, diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index 19125d72f322..c1519660824b 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -2154,7 +2154,7 @@ struct request_queue *scsi_alloc_queue(struct scsi_device *sdev) return q; } -static struct blk_mq_ops scsi_mq_ops = { +static const struct blk_mq_ops scsi_mq_ops = { .queue_rq = scsi_queue_rq, .complete = scsi_softirq_done, .timeout = scsi_timeout, -- cgit v1.2.3 From 48920ff2a5a940cd07d12cc79e4a2c75f1185aee Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 5 Apr 2017 19:21:23 +0200 Subject: block: remove the discard_zeroes_data flag Now that we use the proper REQ_OP_WRITE_ZEROES operation everywhere we can kill this hack. Signed-off-by: Christoph Hellwig Reviewed-by: Martin K. Petersen Reviewed-by: Hannes Reinecke Signed-off-by: Jens Axboe --- Documentation/ABI/testing/sysfs-block | 10 ++----- Documentation/block/queue-sysfs.txt | 5 ---- block/blk-lib.c | 7 +---- block/blk-settings.c | 3 --- block/blk-sysfs.c | 2 +- block/compat_ioctl.c | 2 +- block/ioctl.c | 2 +- drivers/block/drbd/drbd_main.c | 2 -- drivers/block/drbd/drbd_nl.c | 7 +---- drivers/block/loop.c | 2 -- drivers/block/mtip32xx/mtip32xx.c | 1 - drivers/block/nbd.c | 1 - drivers/md/dm-cache-target.c | 1 - drivers/md/dm-crypt.c | 1 - drivers/md/dm-raid.c | 6 ++--- drivers/md/dm-raid1.c | 1 - drivers/md/dm-table.c | 19 ------------- drivers/md/dm-thin.c | 2 -- drivers/md/raid5.c | 50 +++++++++++------------------------ drivers/scsi/sd.c | 5 ---- drivers/target/target_core_device.c | 2 +- include/linux/blkdev.h | 15 ----------- include/linux/device-mapper.h | 5 ---- 23 files changed, 27 insertions(+), 124 deletions(-) (limited to 'drivers/block/mtip32xx') diff --git a/Documentation/ABI/testing/sysfs-block b/Documentation/ABI/testing/sysfs-block index 2da04ce6aeef..dea212db9df3 100644 --- a/Documentation/ABI/testing/sysfs-block +++ b/Documentation/ABI/testing/sysfs-block @@ -213,14 +213,8 @@ What: /sys/block//queue/discard_zeroes_data Date: May 2011 Contact: Martin K. Petersen Description: - Devices that support discard functionality may return - stale or random data when a previously discarded block - is read back. This can cause problems if the filesystem - expects discarded blocks to be explicitly cleared. If a - device reports that it deterministically returns zeroes - when a discarded area is read the discard_zeroes_data - parameter will be set to one. Otherwise it will be 0 and - the result of reading a discarded area is undefined. + Will always return 0. Don't rely on any specific behavior + for discards, and don't read this file. What: /sys/block//queue/write_same_max_bytes Date: January 2012 diff --git a/Documentation/block/queue-sysfs.txt b/Documentation/block/queue-sysfs.txt index b7f6bdc96d73..2c1e67058fd3 100644 --- a/Documentation/block/queue-sysfs.txt +++ b/Documentation/block/queue-sysfs.txt @@ -43,11 +43,6 @@ large discards are issued, setting this value lower will make Linux issue smaller discards and potentially help reduce latencies induced by large discard operations. -discard_zeroes_data (RO) ------------------------- -When read, this file will show if the discarded block are zeroed by the -device or not. If its value is '1' the blocks are zeroed otherwise not. - hw_sector_size (RO) ------------------- This is the hardware sector size of the device, in bytes. diff --git a/block/blk-lib.c b/block/blk-lib.c index b0c6c4bcf441..e8caecd71688 100644 --- a/block/blk-lib.c +++ b/block/blk-lib.c @@ -37,17 +37,12 @@ int __blkdev_issue_discard(struct block_device *bdev, sector_t sector, return -ENXIO; if (flags & BLKDEV_DISCARD_SECURE) { - if (flags & BLKDEV_DISCARD_ZERO) - return -EOPNOTSUPP; if (!blk_queue_secure_erase(q)) return -EOPNOTSUPP; op = REQ_OP_SECURE_ERASE; } else { if (!blk_queue_discard(q)) return -EOPNOTSUPP; - if ((flags & BLKDEV_DISCARD_ZERO) && - !q->limits.discard_zeroes_data) - return -EOPNOTSUPP; op = REQ_OP_DISCARD; } @@ -126,7 +121,7 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector, &bio); if (!ret && bio) { ret = submit_bio_wait(bio); - if (ret == -EOPNOTSUPP && !(flags & BLKDEV_DISCARD_ZERO)) + if (ret == -EOPNOTSUPP) ret = 0; bio_put(bio); } diff --git a/block/blk-settings.c b/block/blk-settings.c index 1e7174ffc9d4..4fa81ed383ca 100644 --- a/block/blk-settings.c +++ b/block/blk-settings.c @@ -103,7 +103,6 @@ void blk_set_default_limits(struct queue_limits *lim) lim->discard_granularity = 0; lim->discard_alignment = 0; lim->discard_misaligned = 0; - lim->discard_zeroes_data = 0; lim->logical_block_size = lim->physical_block_size = lim->io_min = 512; lim->bounce_pfn = (unsigned long)(BLK_BOUNCE_ANY >> PAGE_SHIFT); lim->alignment_offset = 0; @@ -127,7 +126,6 @@ void blk_set_stacking_limits(struct queue_limits *lim) blk_set_default_limits(lim); /* Inherit limits from component devices */ - lim->discard_zeroes_data = 1; lim->max_segments = USHRT_MAX; lim->max_discard_segments = 1; lim->max_hw_sectors = UINT_MAX; @@ -609,7 +607,6 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, t->io_opt = lcm_not_zero(t->io_opt, b->io_opt); t->cluster &= b->cluster; - t->discard_zeroes_data &= b->discard_zeroes_data; /* Physical block size a multiple of the logical block size? */ if (t->physical_block_size & (t->logical_block_size - 1)) { diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c index c47db43a40cc..fc20489f0d2b 100644 --- a/block/blk-sysfs.c +++ b/block/blk-sysfs.c @@ -208,7 +208,7 @@ static ssize_t queue_discard_max_store(struct request_queue *q, static ssize_t queue_discard_zeroes_data_show(struct request_queue *q, char *page) { - return queue_var_show(queue_discard_zeroes_data(q), page); + return queue_var_show(0, page); } static ssize_t queue_write_same_max_show(struct request_queue *q, char *page) diff --git a/block/compat_ioctl.c b/block/compat_ioctl.c index 570021a0dc1c..04325b81c2b4 100644 --- a/block/compat_ioctl.c +++ b/block/compat_ioctl.c @@ -685,7 +685,7 @@ long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg) case BLKALIGNOFF: return compat_put_int(arg, bdev_alignment_offset(bdev)); case BLKDISCARDZEROES: - return compat_put_uint(arg, bdev_discard_zeroes_data(bdev)); + return compat_put_uint(arg, 0); case BLKFLSBUF: case BLKROSET: case BLKDISCARD: diff --git a/block/ioctl.c b/block/ioctl.c index 8ea00a41be01..0de02ee67eed 100644 --- a/block/ioctl.c +++ b/block/ioctl.c @@ -547,7 +547,7 @@ int blkdev_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd, case BLKALIGNOFF: return put_int(arg, bdev_alignment_offset(bdev)); case BLKDISCARDZEROES: - return put_uint(arg, bdev_discard_zeroes_data(bdev)); + return put_uint(arg, 0); case BLKSECTGET: max_sectors = min_t(unsigned int, USHRT_MAX, queue_max_sectors(bdev_get_queue(bdev))); diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c index 8e62d9f65510..84455c365f57 100644 --- a/drivers/block/drbd/drbd_main.c +++ b/drivers/block/drbd/drbd_main.c @@ -931,7 +931,6 @@ void assign_p_sizes_qlim(struct drbd_device *device, struct p_sizes *p, struct r p->qlim->io_min = cpu_to_be32(queue_io_min(q)); p->qlim->io_opt = cpu_to_be32(queue_io_opt(q)); p->qlim->discard_enabled = blk_queue_discard(q); - p->qlim->discard_zeroes_data = queue_discard_zeroes_data(q); p->qlim->write_same_capable = !!q->limits.max_write_same_sectors; } else { q = device->rq_queue; @@ -941,7 +940,6 @@ void assign_p_sizes_qlim(struct drbd_device *device, struct p_sizes *p, struct r p->qlim->io_min = cpu_to_be32(queue_io_min(q)); p->qlim->io_opt = cpu_to_be32(queue_io_opt(q)); p->qlim->discard_enabled = 0; - p->qlim->discard_zeroes_data = 0; p->qlim->write_same_capable = 0; } } diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c index e4516d3b971d..02255a0d68b9 100644 --- a/drivers/block/drbd/drbd_nl.c +++ b/drivers/block/drbd/drbd_nl.c @@ -1199,10 +1199,6 @@ static void decide_on_discard_support(struct drbd_device *device, struct drbd_connection *connection = first_peer_device(device)->connection; bool can_do = b ? blk_queue_discard(b) : true; - if (can_do && b && !b->limits.discard_zeroes_data && !discard_zeroes_if_aligned) { - can_do = false; - drbd_info(device, "discard_zeroes_data=0 and discard_zeroes_if_aligned=no: disabling discards\n"); - } if (can_do && connection->cstate >= C_CONNECTED && !(connection->agreed_features & DRBD_FF_TRIM)) { can_do = false; drbd_info(connection, "peer DRBD too old, does not support TRIM: disabling discards\n"); @@ -1484,8 +1480,7 @@ static void sanitize_disk_conf(struct drbd_device *device, struct disk_conf *dis if (disk_conf->al_extents > drbd_al_extents_max(nbc)) disk_conf->al_extents = drbd_al_extents_max(nbc); - if (!blk_queue_discard(q) - || (!q->limits.discard_zeroes_data && !disk_conf->discard_zeroes_if_aligned)) { + if (!blk_queue_discard(q)) { if (disk_conf->rs_discard_granularity) { disk_conf->rs_discard_granularity = 0; /* disable feature */ drbd_info(device, "rs_discard_granularity feature disabled\n"); diff --git a/drivers/block/loop.c b/drivers/block/loop.c index 3bb04c1a4ba1..3081d83d2ea3 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c @@ -828,7 +828,6 @@ static void loop_config_discard(struct loop_device *lo) q->limits.discard_alignment = 0; blk_queue_max_discard_sectors(q, 0); blk_queue_max_write_zeroes_sectors(q, 0); - q->limits.discard_zeroes_data = 0; queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, q); return; } @@ -837,7 +836,6 @@ static void loop_config_discard(struct loop_device *lo) q->limits.discard_alignment = 0; blk_queue_max_discard_sectors(q, UINT_MAX >> 9); blk_queue_max_write_zeroes_sectors(q, UINT_MAX >> 9); - q->limits.discard_zeroes_data = 1; queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q); } diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c index 30076e7753bc..05e3e664ea1b 100644 --- a/drivers/block/mtip32xx/mtip32xx.c +++ b/drivers/block/mtip32xx/mtip32xx.c @@ -4025,7 +4025,6 @@ skip_create_disk: dd->queue->limits.discard_granularity = 4096; blk_queue_max_discard_sectors(dd->queue, MTIP_MAX_TRIM_ENTRY_LEN * MTIP_MAX_TRIM_ENTRIES); - dd->queue->limits.discard_zeroes_data = 0; } /* Set the capacity of the device in 512 byte sectors. */ diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c index 03ae72985c79..b02f2362fdf7 100644 --- a/drivers/block/nbd.c +++ b/drivers/block/nbd.c @@ -1110,7 +1110,6 @@ static int nbd_dev_add(int index) queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, disk->queue); disk->queue->limits.discard_granularity = 512; blk_queue_max_discard_sectors(disk->queue, UINT_MAX); - disk->queue->limits.discard_zeroes_data = 0; blk_queue_max_hw_sectors(disk->queue, 65536); disk->queue->limits.max_sectors = 256; diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c index 9c689b34e6e7..975922c8f231 100644 --- a/drivers/md/dm-cache-target.c +++ b/drivers/md/dm-cache-target.c @@ -2773,7 +2773,6 @@ static int cache_create(struct cache_args *ca, struct cache **result) ti->num_discard_bios = 1; ti->discards_supported = true; - ti->discard_zeroes_data_unsupported = true; ti->split_discard_bios = false; cache->features = ca->features; diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index 389a3637ffcc..ef1d836bd81b 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c @@ -2030,7 +2030,6 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) wake_up_process(cc->write_thread); ti->num_flush_bios = 1; - ti->discard_zeroes_data_unsupported = true; return 0; diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c index f8564d63982f..468f1380de1d 100644 --- a/drivers/md/dm-raid.c +++ b/drivers/md/dm-raid.c @@ -2813,7 +2813,9 @@ static void configure_discard_support(struct raid_set *rs) /* Assume discards not supported until after checks below. */ ti->discards_supported = false; - /* RAID level 4,5,6 require discard_zeroes_data for data integrity! */ + /* + * XXX: RAID level 4,5,6 require zeroing for safety. + */ raid456 = (rs->md.level == 4 || rs->md.level == 5 || rs->md.level == 6); for (i = 0; i < rs->raid_disks; i++) { @@ -2827,8 +2829,6 @@ static void configure_discard_support(struct raid_set *rs) return; if (raid456) { - if (!q->limits.discard_zeroes_data) - return; if (!devices_handle_discard_safely) { DMERR("raid456 discard support disabled due to discard_zeroes_data uncertainty."); DMERR("Set dm-raid.devices_handle_discard_safely=Y to override."); diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c index 2ddc2d20e62d..a95cbb80fb34 100644 --- a/drivers/md/dm-raid1.c +++ b/drivers/md/dm-raid1.c @@ -1124,7 +1124,6 @@ static int mirror_ctr(struct dm_target *ti, unsigned int argc, char **argv) ti->num_flush_bios = 1; ti->num_discard_bios = 1; ti->per_io_data_size = sizeof(struct dm_raid1_bio_record); - ti->discard_zeroes_data_unsupported = true; ms->kmirrord_wq = alloc_workqueue("kmirrord", WQ_MEM_RECLAIM, 0); if (!ms->kmirrord_wq) { diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index 5cd665c91ead..958275aca008 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c @@ -1449,22 +1449,6 @@ static bool dm_table_supports_flush(struct dm_table *t, unsigned long flush) return false; } -static bool dm_table_discard_zeroes_data(struct dm_table *t) -{ - struct dm_target *ti; - unsigned i = 0; - - /* Ensure that all targets supports discard_zeroes_data. */ - while (i < dm_table_get_num_targets(t)) { - ti = dm_table_get_target(t, i++); - - if (ti->discard_zeroes_data_unsupported) - return false; - } - - return true; -} - static int device_is_nonrot(struct dm_target *ti, struct dm_dev *dev, sector_t start, sector_t len, void *data) { @@ -1620,9 +1604,6 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, } blk_queue_write_cache(q, wc, fua); - if (!dm_table_discard_zeroes_data(t)) - q->limits.discard_zeroes_data = 0; - /* Ensure that all underlying devices are non-rotational. */ if (dm_table_all_devices_attribute(t, device_is_nonrot)) queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q); diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c index 2b266a2b5035..a5f1916f621a 100644 --- a/drivers/md/dm-thin.c +++ b/drivers/md/dm-thin.c @@ -3263,7 +3263,6 @@ static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv) * them down to the data device. The thin device's discard * processing will cause mappings to be removed from the btree. */ - ti->discard_zeroes_data_unsupported = true; if (pf.discard_enabled && pf.discard_passdown) { ti->num_discard_bios = 1; @@ -4119,7 +4118,6 @@ static int thin_ctr(struct dm_target *ti, unsigned argc, char **argv) ti->per_io_data_size = sizeof(struct dm_thin_endio_hook); /* In case the pool supports discards, pass them on. */ - ti->discard_zeroes_data_unsupported = true; if (tc->pool->pf.discard_enabled) { ti->discards_supported = true; ti->num_discard_bios = 1; diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 1725a54042bb..2efdb0d67460 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -7227,7 +7227,6 @@ static int raid5_run(struct mddev *mddev) if (mddev->queue) { int chunk_size; - bool discard_supported = true; /* read-ahead size must cover two whole stripes, which * is 2 * (datadisks) * chunksize where 'n' is the * number of raid devices @@ -7263,12 +7262,6 @@ static int raid5_run(struct mddev *mddev) blk_queue_max_discard_sectors(mddev->queue, 0xfffe * STRIPE_SECTORS); - /* - * unaligned part of discard request will be ignored, so can't - * guarantee discard_zeroes_data - */ - mddev->queue->limits.discard_zeroes_data = 0; - blk_queue_max_write_same_sectors(mddev->queue, 0); blk_queue_max_write_zeroes_sectors(mddev->queue, 0); @@ -7277,35 +7270,24 @@ static int raid5_run(struct mddev *mddev) rdev->data_offset << 9); disk_stack_limits(mddev->gendisk, rdev->bdev, rdev->new_data_offset << 9); - /* - * discard_zeroes_data is required, otherwise data - * could be lost. Consider a scenario: discard a stripe - * (the stripe could be inconsistent if - * discard_zeroes_data is 0); write one disk of the - * stripe (the stripe could be inconsistent again - * depending on which disks are used to calculate - * parity); the disk is broken; The stripe data of this - * disk is lost. - */ - if (!blk_queue_discard(bdev_get_queue(rdev->bdev)) || - !bdev_get_queue(rdev->bdev)-> - limits.discard_zeroes_data) - discard_supported = false; - /* Unfortunately, discard_zeroes_data is not currently - * a guarantee - just a hint. So we only allow DISCARD - * if the sysadmin has confirmed that only safe devices - * are in use by setting a module parameter. - */ - if (!devices_handle_discard_safely) { - if (discard_supported) { - pr_info("md/raid456: discard support disabled due to uncertainty.\n"); - pr_info("Set raid456.devices_handle_discard_safely=Y to override.\n"); - } - discard_supported = false; - } } - if (discard_supported && + /* + * zeroing is required, otherwise data + * could be lost. Consider a scenario: discard a stripe + * (the stripe could be inconsistent if + * discard_zeroes_data is 0); write one disk of the + * stripe (the stripe could be inconsistent again + * depending on which disks are used to calculate + * parity); the disk is broken; The stripe data of this + * disk is lost. + * + * We only allow DISCARD if the sysadmin has confirmed that + * only safe devices are in use by setting a module parameter. + * A better idea might be to turn DISCARD into WRITE_ZEROES + * requests, as that is required to be safe. + */ + if (devices_handle_discard_safely && mddev->queue->limits.max_discard_sectors >= (stripe >> 9) && mddev->queue->limits.discard_granularity >= stripe) queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index 001593ed0444..bcb0cb020fd2 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c @@ -644,8 +644,6 @@ static void sd_config_discard(struct scsi_disk *sdkp, unsigned int mode) unsigned int logical_block_size = sdkp->device->sector_size; unsigned int max_blocks = 0; - q->limits.discard_zeroes_data = 0; - /* * When LBPRZ is reported, discard alignment and granularity * must be fixed to the logical block size. Otherwise the block @@ -681,19 +679,16 @@ static void sd_config_discard(struct scsi_disk *sdkp, unsigned int mode) case SD_LBP_WS16: max_blocks = min_not_zero(sdkp->max_ws_blocks, (u32)SD_MAX_WS16_BLOCKS); - q->limits.discard_zeroes_data = sdkp->lbprz; break; case SD_LBP_WS10: max_blocks = min_not_zero(sdkp->max_ws_blocks, (u32)SD_MAX_WS10_BLOCKS); - q->limits.discard_zeroes_data = sdkp->lbprz; break; case SD_LBP_ZERO: max_blocks = min_not_zero(sdkp->max_ws_blocks, (u32)SD_MAX_WS10_BLOCKS); - q->limits.discard_zeroes_data = 1; break; } diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c index c754ae33bf7b..d2f089cfa9ae 100644 --- a/drivers/target/target_core_device.c +++ b/drivers/target/target_core_device.c @@ -851,7 +851,7 @@ bool target_configure_unmap_from_queue(struct se_dev_attrib *attrib, attrib->unmap_granularity = q->limits.discard_granularity / block_size; attrib->unmap_granularity_alignment = q->limits.discard_alignment / block_size; - attrib->unmap_zeroes_data = q->limits.discard_zeroes_data; + attrib->unmap_zeroes_data = 0; return true; } EXPORT_SYMBOL(target_configure_unmap_from_queue); diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 21a30f011674..ec993573e0a8 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -339,7 +339,6 @@ struct queue_limits { unsigned char misaligned; unsigned char discard_misaligned; unsigned char cluster; - unsigned char discard_zeroes_data; unsigned char raid_partial_stripes_expensive; enum blk_zoned_model zoned; }; @@ -1341,7 +1340,6 @@ extern int blkdev_issue_write_same(struct block_device *bdev, sector_t sector, sector_t nr_sects, gfp_t gfp_mask, struct page *page); #define BLKDEV_DISCARD_SECURE (1 << 0) /* issue a secure erase */ -#define BLKDEV_DISCARD_ZERO (1 << 1) /* must reliably zero data */ extern int blkdev_issue_discard(struct block_device *bdev, sector_t sector, sector_t nr_sects, gfp_t gfp_mask, unsigned long flags); @@ -1541,19 +1539,6 @@ static inline int bdev_discard_alignment(struct block_device *bdev) return q->limits.discard_alignment; } -static inline unsigned int queue_discard_zeroes_data(struct request_queue *q) -{ - if (q->limits.max_discard_sectors && q->limits.discard_zeroes_data == 1) - return 1; - - return 0; -} - -static inline unsigned int bdev_discard_zeroes_data(struct block_device *bdev) -{ - return queue_discard_zeroes_data(bdev_get_queue(bdev)); -} - static inline unsigned int bdev_write_same(struct block_device *bdev) { struct request_queue *q = bdev_get_queue(bdev); diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h index 3829bee2302a..c7ea33e38fb9 100644 --- a/include/linux/device-mapper.h +++ b/include/linux/device-mapper.h @@ -296,11 +296,6 @@ struct dm_target { * on max_io_len boundary. */ bool split_discard_bios:1; - - /* - * Set if this target does not return zeroes on discarded blocks. - */ - bool discard_zeroes_data_unsupported:1; }; /* Each target can link one of these into the table */ -- cgit v1.2.3 From 4dda4735c581db6ee8e8c0dcc8b94563dfafff2f Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 20 Apr 2017 16:03:07 +0200 Subject: mtip32xx: add a status field to struct mtip_cmd Instead of using req->errors, which will go away. Signed-off-by: Christoph Hellwig Reviewed-by: Johannes Thumshirn Signed-off-by: Jens Axboe --- drivers/block/mtip32xx/mtip32xx.c | 16 +++++++++------- drivers/block/mtip32xx/mtip32xx.h | 1 + 2 files changed, 10 insertions(+), 7 deletions(-) (limited to 'drivers/block/mtip32xx') diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c index 05e3e664ea1b..7406de29db58 100644 --- a/drivers/block/mtip32xx/mtip32xx.c +++ b/drivers/block/mtip32xx/mtip32xx.c @@ -241,7 +241,8 @@ static void mtip_async_complete(struct mtip_port *port, rq = mtip_rq_from_tag(dd, tag); - blk_mq_complete_request(rq, status); + cmd->status = status; + blk_mq_complete_request(rq, 0); } /* @@ -2910,18 +2911,19 @@ static void mtip_softirq_done_fn(struct request *rq) if (unlikely(cmd->unaligned)) up(&dd->port->cmd_slot_unal); - blk_mq_end_request(rq, rq->errors); + blk_mq_end_request(rq, cmd->status); } static void mtip_abort_cmd(struct request *req, void *data, bool reserved) { + struct mtip_cmd *cmd = blk_mq_rq_to_pdu(req); struct driver_data *dd = data; dbg_printk(MTIP_DRV_NAME " Aborting request, tag = %d\n", req->tag); clear_bit(req->tag, dd->port->cmds_to_issue); - req->errors = -EIO; + cmd->status = -EIO; mtip_softirq_done_fn(req); } @@ -3816,7 +3818,6 @@ static int mtip_queue_rq(struct blk_mq_hw_ctx *hctx, if (likely(!ret)) return BLK_MQ_RQ_QUEUE_OK; - rq->errors = ret; return BLK_MQ_RQ_QUEUE_ERROR; } @@ -4106,9 +4107,10 @@ static void mtip_no_dev_cleanup(struct request *rq, void *data, bool reserv) struct driver_data *dd = (struct driver_data *)data; struct mtip_cmd *cmd; - if (likely(!reserv)) - blk_mq_complete_request(rq, -ENODEV); - else if (test_bit(MTIP_PF_IC_ACTIVE_BIT, &dd->port->flags)) { + if (likely(!reserv)) { + cmd->status = -ENODEV; + blk_mq_complete_request(rq, 0); + } else if (test_bit(MTIP_PF_IC_ACTIVE_BIT, &dd->port->flags)) { cmd = mtip_cmd_from_tag(dd, MTIP_TAG_INTERNAL); if (cmd->comp_func) diff --git a/drivers/block/mtip32xx/mtip32xx.h b/drivers/block/mtip32xx/mtip32xx.h index 7617888f7944..57b41528a824 100644 --- a/drivers/block/mtip32xx/mtip32xx.h +++ b/drivers/block/mtip32xx/mtip32xx.h @@ -352,6 +352,7 @@ struct mtip_cmd { int retries; /* The number of retries left for this command. */ int direction; /* Data transfer direction */ + int status; }; /* Structure used to describe a port. */ -- cgit v1.2.3 From 08e0029aa2a4acdd365613ce88a1184e5351a8a1 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 20 Apr 2017 16:03:09 +0200 Subject: blk-mq: remove the error argument to blk_mq_complete_request Now that all drivers that call blk_mq_complete_requests have a ->complete callback we can remove the direct call to blk_mq_end_request, as well as the error argument to blk_mq_complete_request. Signed-off-by: Christoph Hellwig Reviewed-by: Johannes Thumshirn Reviewed-by: Bart Van Assche Signed-off-by: Jens Axboe --- block/blk-mq.c | 15 +++------------ drivers/block/loop.c | 4 ++-- drivers/block/mtip32xx/mtip32xx.c | 4 ++-- drivers/block/nbd.c | 4 ++-- drivers/block/null_blk.c | 2 +- drivers/block/virtio_blk.c | 2 +- drivers/block/xen-blkfront.c | 2 +- drivers/md/dm-rq.c | 2 +- drivers/nvme/host/core.c | 2 +- drivers/nvme/host/nvme.h | 2 +- drivers/scsi/scsi_lib.c | 2 +- include/linux/blk-mq.h | 2 +- 12 files changed, 17 insertions(+), 26 deletions(-) (limited to 'drivers/block/mtip32xx') diff --git a/block/blk-mq.c b/block/blk-mq.c index c496692ecc5b..3a2d179d49d6 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -442,17 +442,10 @@ static void blk_mq_stat_add(struct request *rq) static void __blk_mq_complete_request(struct request *rq) { - struct request_queue *q = rq->q; - if (rq->internal_tag != -1) blk_mq_sched_completed_request(rq); - blk_mq_stat_add(rq); - - if (!q->softirq_done_fn) - blk_mq_end_request(rq, rq->errors); - else - blk_mq_ipi_complete_request(rq); + blk_mq_ipi_complete_request(rq); } /** @@ -463,16 +456,14 @@ static void __blk_mq_complete_request(struct request *rq) * Ends all I/O on a request. It does not handle partial completions. * The actual completion happens out-of-order, through a IPI handler. **/ -void blk_mq_complete_request(struct request *rq, int error) +void blk_mq_complete_request(struct request *rq) { struct request_queue *q = rq->q; if (unlikely(blk_should_fake_timeout(q))) return; - if (!blk_mark_rq_complete(rq)) { - rq->errors = error; + if (!blk_mark_rq_complete(rq)) __blk_mq_complete_request(rq); - } } EXPORT_SYMBOL(blk_mq_complete_request); diff --git a/drivers/block/loop.c b/drivers/block/loop.c index 86351b3f7350..994403efee19 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c @@ -465,7 +465,7 @@ static void lo_rw_aio_complete(struct kiocb *iocb, long ret, long ret2) struct loop_cmd *cmd = container_of(iocb, struct loop_cmd, iocb); cmd->ret = ret; - blk_mq_complete_request(cmd->rq, 0); + blk_mq_complete_request(cmd->rq); } static int lo_rw_aio(struct loop_device *lo, struct loop_cmd *cmd, @@ -1685,7 +1685,7 @@ static void loop_handle_cmd(struct loop_cmd *cmd) /* complete non-aio request */ if (!cmd->use_aio || ret) { cmd->ret = ret ? -EIO : 0; - blk_mq_complete_request(cmd->rq, 0); + blk_mq_complete_request(cmd->rq); } } diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c index 7406de29db58..66a6bd83faae 100644 --- a/drivers/block/mtip32xx/mtip32xx.c +++ b/drivers/block/mtip32xx/mtip32xx.c @@ -242,7 +242,7 @@ static void mtip_async_complete(struct mtip_port *port, rq = mtip_rq_from_tag(dd, tag); cmd->status = status; - blk_mq_complete_request(rq, 0); + blk_mq_complete_request(rq); } /* @@ -4109,7 +4109,7 @@ static void mtip_no_dev_cleanup(struct request *rq, void *data, bool reserv) if (likely(!reserv)) { cmd->status = -ENODEV; - blk_mq_complete_request(rq, 0); + blk_mq_complete_request(rq); } else if (test_bit(MTIP_PF_IC_ACTIVE_BIT, &dd->port->flags)) { cmd = mtip_cmd_from_tag(dd, MTIP_TAG_INTERNAL); diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c index 09a74a66beb1..d387bef07fcc 100644 --- a/drivers/block/nbd.c +++ b/drivers/block/nbd.c @@ -635,7 +635,7 @@ static void recv_work(struct work_struct *work) break; } - blk_mq_complete_request(blk_mq_rq_from_pdu(cmd), 0); + blk_mq_complete_request(blk_mq_rq_from_pdu(cmd)); } atomic_dec(&config->recv_threads); wake_up(&config->recv_wq); @@ -651,7 +651,7 @@ static void nbd_clear_req(struct request *req, void *data, bool reserved) return; cmd = blk_mq_rq_to_pdu(req); cmd->status = -EIO; - blk_mq_complete_request(req, 0); + blk_mq_complete_request(req); } static void nbd_clear_que(struct nbd_device *nbd) diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c index 0ca4aa34edb9..d946e1eeac8e 100644 --- a/drivers/block/null_blk.c +++ b/drivers/block/null_blk.c @@ -281,7 +281,7 @@ static inline void null_handle_cmd(struct nullb_cmd *cmd) case NULL_IRQ_SOFTIRQ: switch (queue_mode) { case NULL_Q_MQ: - blk_mq_complete_request(cmd->rq, 0); + blk_mq_complete_request(cmd->rq); break; case NULL_Q_RQ: blk_complete_request(cmd->rq); diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c index dea2a58d6734..f94614257462 100644 --- a/drivers/block/virtio_blk.c +++ b/drivers/block/virtio_blk.c @@ -201,7 +201,7 @@ static void virtblk_done(struct virtqueue *vq) while ((vbr = virtqueue_get_buf(vblk->vqs[qid].vq, &len)) != NULL) { struct request *req = blk_mq_rq_from_pdu(vbr); - blk_mq_complete_request(req, 0); + blk_mq_complete_request(req); req_done = true; } if (unlikely(virtqueue_is_broken(vq))) diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c index 57866355c060..39459631667c 100644 --- a/drivers/block/xen-blkfront.c +++ b/drivers/block/xen-blkfront.c @@ -1647,7 +1647,7 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id) BUG(); } - blk_mq_complete_request(req, 0); + blk_mq_complete_request(req); } rinfo->ring.rsp_cons = i; diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c index 1173be21f6f6..bff7e3bdb4ed 100644 --- a/drivers/md/dm-rq.c +++ b/drivers/md/dm-rq.c @@ -363,7 +363,7 @@ static void dm_complete_request(struct request *rq, int error) if (!rq->q->mq_ops) blk_complete_request(rq); else - blk_mq_complete_request(rq, 0); + blk_mq_complete_request(rq); } /* diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index 805f250315ec..8dc664798293 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -117,7 +117,7 @@ void nvme_cancel_request(struct request *req, void *data, bool reserved) if (blk_queue_dying(req->q)) status |= NVME_SC_DNR; nvme_req(req)->status = status; - blk_mq_complete_request(req, 0); + blk_mq_complete_request(req); } EXPORT_SYMBOL_GPL(nvme_cancel_request); diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h index 550037f5efea..c6ef6c30e2f0 100644 --- a/drivers/nvme/host/nvme.h +++ b/drivers/nvme/host/nvme.h @@ -251,7 +251,7 @@ static inline void nvme_end_request(struct request *req, __le16 status, rq->status = le16_to_cpu(status) >> 1; rq->result = result; - blk_mq_complete_request(req, 0); + blk_mq_complete_request(req); } void nvme_complete_rq(struct request *req); diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index b9298a499e19..4a20e6098f7c 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -1904,7 +1904,7 @@ static int scsi_mq_prep_fn(struct request *req) static void scsi_mq_done(struct scsi_cmnd *cmd) { trace_scsi_dispatch_cmd_done(cmd); - blk_mq_complete_request(cmd->request, 0); + blk_mq_complete_request(cmd->request); } static int scsi_queue_rq(struct blk_mq_hw_ctx *hctx, diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index d75de612845d..0c4dadb85f62 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h @@ -228,7 +228,7 @@ void blk_mq_add_to_requeue_list(struct request *rq, bool at_head, void blk_mq_kick_requeue_list(struct request_queue *q); void blk_mq_delay_kick_requeue_list(struct request_queue *q, unsigned long msecs); void blk_mq_abort_requeue_list(struct request_queue *q); -void blk_mq_complete_request(struct request *rq, int error); +void blk_mq_complete_request(struct request *rq); bool blk_mq_queue_stopped(struct request_queue *q); void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx); -- cgit v1.2.3 From 95c55ff425faa190fd8ba55090fbdf33d8b2f0ae Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Fri, 21 Apr 2017 08:46:44 -0600 Subject: mtip32xx: fix dereference of stack garbage We need to get the command payload from the request before we attempt to dereference it. Fixes: 4dda4735c581 ("mtip32xx: add a status field to struct mtip_cmd") Reviewed-by: Christoph Hellwig Signed-off-by: Jens Axboe --- drivers/block/mtip32xx/mtip32xx.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/block/mtip32xx') diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c index 66a6bd83faae..54c8736038de 100644 --- a/drivers/block/mtip32xx/mtip32xx.c +++ b/drivers/block/mtip32xx/mtip32xx.c @@ -4108,6 +4108,7 @@ static void mtip_no_dev_cleanup(struct request *rq, void *data, bool reserv) struct mtip_cmd *cmd; if (likely(!reserv)) { + cmd = blk_mq_rq_to_pdu(rq); cmd->status = -ENODEV; blk_mq_complete_request(rq); } else if (test_bit(MTIP_PF_IC_ACTIVE_BIT, &dd->port->flags)) { -- cgit v1.2.3 From a4e84aae8139aca9fbfbced1f45c51ca81b57488 Mon Sep 17 00:00:00 2001 From: Ming Lei Date: Thu, 27 Apr 2017 07:45:18 -0600 Subject: mtip32xx: use runtime tag to initialize command header mtip32xx supposes that 'request_idx' passed to .init_request() is tag of the request, and use that as request's tag to initialize command header. After MQ IO scheduler is in, request tag assigned isn't same with the request index anymore, so cause strange hardware failure on mtip32xx, even whole system panic is triggered. This patch fixes the issue by initializing command header via request's real tag. Signed-off-by: Ming Lei Signed-off-by: Jens Axboe --- drivers/block/mtip32xx/mtip32xx.c | 36 ++++++++++++++++++++++++------------ 1 file changed, 24 insertions(+), 12 deletions(-) (limited to 'drivers/block/mtip32xx') diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c index 54c8736038de..fa3dd4a55371 100644 --- a/drivers/block/mtip32xx/mtip32xx.c +++ b/drivers/block/mtip32xx/mtip32xx.c @@ -169,6 +169,25 @@ static bool mtip_check_surprise_removal(struct pci_dev *pdev) return false; /* device present */ } +/* we have to use runtime tag to setup command header */ +static void mtip_init_cmd_header(struct request *rq) +{ + struct driver_data *dd = rq->q->queuedata; + struct mtip_cmd *cmd = blk_mq_rq_to_pdu(rq); + u32 host_cap_64 = readl(dd->mmio + HOST_CAP) & HOST_CAP_64; + + /* Point the command headers at the command tables. */ + cmd->command_header = dd->port->command_list + + (sizeof(struct mtip_cmd_hdr) * rq->tag); + cmd->command_header_dma = dd->port->command_list_dma + + (sizeof(struct mtip_cmd_hdr) * rq->tag); + + if (host_cap_64) + cmd->command_header->ctbau = __force_bit2int cpu_to_le32((cmd->command_dma >> 16) >> 16); + + cmd->command_header->ctba = __force_bit2int cpu_to_le32(cmd->command_dma & 0xFFFFFFFF); +} + static struct mtip_cmd *mtip_get_int_command(struct driver_data *dd) { struct request *rq; @@ -180,6 +199,9 @@ static struct mtip_cmd *mtip_get_int_command(struct driver_data *dd) if (IS_ERR(rq)) return NULL; + /* Internal cmd isn't submitted via .queue_rq */ + mtip_init_cmd_header(rq); + return blk_mq_rq_to_pdu(rq); } @@ -3809,6 +3831,8 @@ static int mtip_queue_rq(struct blk_mq_hw_ctx *hctx, struct request *rq = bd->rq; int ret; + mtip_init_cmd_header(rq); + if (unlikely(mtip_check_unal_depth(hctx, rq))) return BLK_MQ_RQ_QUEUE_BUSY; @@ -3839,7 +3863,6 @@ static int mtip_init_cmd(void *data, struct request *rq, unsigned int hctx_idx, { struct driver_data *dd = data; struct mtip_cmd *cmd = blk_mq_rq_to_pdu(rq); - u32 host_cap_64 = readl(dd->mmio + HOST_CAP) & HOST_CAP_64; /* * For flush requests, request_idx starts at the end of the @@ -3856,17 +3879,6 @@ static int mtip_init_cmd(void *data, struct request *rq, unsigned int hctx_idx, memset(cmd->command, 0, CMD_DMA_ALLOC_SZ); - /* Point the command headers at the command tables. */ - cmd->command_header = dd->port->command_list + - (sizeof(struct mtip_cmd_hdr) * request_idx); - cmd->command_header_dma = dd->port->command_list_dma + - (sizeof(struct mtip_cmd_hdr) * request_idx); - - if (host_cap_64) - cmd->command_header->ctbau = __force_bit2int cpu_to_le32((cmd->command_dma >> 16) >> 16); - - cmd->command_header->ctba = __force_bit2int cpu_to_le32(cmd->command_dma & 0xFFFFFFFF); - sg_init_table(cmd->sg, MTIP_MAX_SG); return 0; } -- cgit v1.2.3