summaryrefslogtreecommitdiff
path: root/block
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2021-02-28 22:23:38 +0300
committerLinus Torvalds <torvalds@linux-foundation.org>2021-02-28 22:23:38 +0300
commit3ab6608e66b16159c3a3c2d7015b9c11cd3396c1 (patch)
treea56fa037b1e82f100589760968d43145a521fd11 /block
parent5695e51619745d4fe3ec2506a2f0cd982c5e27a4 (diff)
parent5f7136db82996089cdfb2939c7664b29e9da141d (diff)
downloadlinux-3ab6608e66b16159c3a3c2d7015b9c11cd3396c1.tar.xz
Merge tag 'block-5.12-2021-02-27' of git://git.kernel.dk/linux-block
Pull more block updates from Jens Axboe: "A few stragglers (and one due to me missing it originally), and fixes for changes in this merge window mostly. In particular: - blktrace cleanups (Chaitanya, Greg) - Kill dead blk_pm_* functions (Bart) - Fixes for the bio alloc changes (Christoph) - Fix for the partition changes (Christoph, Ming) - Fix for turning off iopoll with polled IO inflight (Jeffle) - nbd disconnect fix (Josef) - loop fsync error fix (Mauricio) - kyber update depth fix (Yang) - max_sectors alignment fix (Mikulas) - Add bio_max_segs helper (Matthew)" * tag 'block-5.12-2021-02-27' of git://git.kernel.dk/linux-block: (21 commits) block: Add bio_max_segs blktrace: fix documentation for blk_fill_rw() block: memory allocations in bounce_clone_bio must not fail block: remove the gfp_mask argument to bounce_clone_bio block: fix bounce_clone_bio for passthrough bios block-crypto-fallback: use a bio_set for splitting bios block: fix logging on capacity change blk-settings: align max_sectors on "logical_block_size" boundary block: reopen the device in blkdev_reread_part block: don't skip empty device in in disk_uevent blktrace: remove debugfs file dentries from struct blk_trace nbd: handle device refs for DESTROY_ON_DISCONNECT properly kyber: introduce kyber_depth_updated() loop: fix I/O error on fsync() in detached loop devices block: fix potential IO hang when turning off io_poll block: get rid of the trace rq insert wrapper blktrace: fix blk_rq_merge documentation blktrace: fix blk_rq_issue documentation blktrace: add blk_fill_rwbs documentation comment block: remove superfluous param in blk_fill_rwbs() ...
Diffstat (limited to 'block')
-rw-r--r--block/bfq-iosched.c4
-rw-r--r--block/blk-core.c1
-rw-r--r--block/blk-crypto-fallback.c12
-rw-r--r--block/blk-map.c4
-rw-r--r--block/blk-mq-sched.c6
-rw-r--r--block/blk-mq-sched.h1
-rw-r--r--block/blk-pm.h38
-rw-r--r--block/blk-settings.c12
-rw-r--r--block/blk-sysfs.c7
-rw-r--r--block/bounce.c24
-rw-r--r--block/genhd.c4
-rw-r--r--block/ioctl.c21
-rw-r--r--block/kyber-iosched.c33
-rw-r--r--block/mq-deadline.c4
14 files changed, 79 insertions, 92 deletions
diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c
index b398dde53af9..ec482e6641ff 100644
--- a/block/bfq-iosched.c
+++ b/block/bfq-iosched.c
@@ -125,6 +125,8 @@
#include <linux/delay.h>
#include <linux/backing-dev.h>
+#include <trace/events/block.h>
+
#include "blk.h"
#include "blk-mq.h"
#include "blk-mq-tag.h"
@@ -5621,7 +5623,7 @@ static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
spin_unlock_irq(&bfqd->lock);
- blk_mq_sched_request_inserted(rq);
+ trace_block_rq_insert(rq);
spin_lock_irq(&bfqd->lock);
bfqq = bfq_init_rq(rq);
diff --git a/block/blk-core.c b/block/blk-core.c
index 5e752840b41a..fc60ff208497 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -59,6 +59,7 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_remap);
EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_complete);
EXPORT_TRACEPOINT_SYMBOL_GPL(block_split);
EXPORT_TRACEPOINT_SYMBOL_GPL(block_unplug);
+EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_insert);
DEFINE_IDA(blk_queue_ida);
diff --git a/block/blk-crypto-fallback.c b/block/blk-crypto-fallback.c
index e8327c50d7c9..c176b7af56a7 100644
--- a/block/blk-crypto-fallback.c
+++ b/block/blk-crypto-fallback.c
@@ -80,6 +80,7 @@ static struct blk_crypto_keyslot {
static struct blk_keyslot_manager blk_crypto_ksm;
static struct workqueue_struct *blk_crypto_wq;
static mempool_t *blk_crypto_bounce_page_pool;
+static struct bio_set crypto_bio_split;
/*
* This is the key we set when evicting a keyslot. This *should* be the all 0's
@@ -224,7 +225,8 @@ static bool blk_crypto_split_bio_if_needed(struct bio **bio_ptr)
if (num_sectors < bio_sectors(bio)) {
struct bio *split_bio;
- split_bio = bio_split(bio, num_sectors, GFP_NOIO, NULL);
+ split_bio = bio_split(bio, num_sectors, GFP_NOIO,
+ &crypto_bio_split);
if (!split_bio) {
bio->bi_status = BLK_STS_RESOURCE;
return false;
@@ -538,9 +540,13 @@ static int blk_crypto_fallback_init(void)
prandom_bytes(blank_key, BLK_CRYPTO_MAX_KEY_SIZE);
- err = blk_ksm_init(&blk_crypto_ksm, blk_crypto_num_keyslots);
+ err = bioset_init(&crypto_bio_split, 64, 0, 0);
if (err)
goto out;
+
+ err = blk_ksm_init(&blk_crypto_ksm, blk_crypto_num_keyslots);
+ if (err)
+ goto fail_free_bioset;
err = -ENOMEM;
blk_crypto_ksm.ksm_ll_ops = blk_crypto_ksm_ll_ops;
@@ -591,6 +597,8 @@ fail_free_wq:
destroy_workqueue(blk_crypto_wq);
fail_free_ksm:
blk_ksm_destroy(&blk_crypto_ksm);
+fail_free_bioset:
+ bioset_exit(&crypto_bio_split);
out:
return err;
}
diff --git a/block/blk-map.c b/block/blk-map.c
index 21630dccac62..369e204d14d0 100644
--- a/block/blk-map.c
+++ b/block/blk-map.c
@@ -150,9 +150,7 @@ static int bio_copy_user_iov(struct request *rq, struct rq_map_data *map_data,
bmd->is_our_pages = !map_data;
bmd->is_null_mapped = (map_data && map_data->null_mapped);
- nr_pages = DIV_ROUND_UP(offset + len, PAGE_SIZE);
- if (nr_pages > BIO_MAX_PAGES)
- nr_pages = BIO_MAX_PAGES;
+ nr_pages = bio_max_segs(DIV_ROUND_UP(offset + len, PAGE_SIZE));
ret = -ENOMEM;
bio = bio_kmalloc(gfp_mask, nr_pages);
diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c
index deff4e826e23..ddb65e9e6fd9 100644
--- a/block/blk-mq-sched.c
+++ b/block/blk-mq-sched.c
@@ -384,12 +384,6 @@ bool blk_mq_sched_try_insert_merge(struct request_queue *q, struct request *rq)
}
EXPORT_SYMBOL_GPL(blk_mq_sched_try_insert_merge);
-void blk_mq_sched_request_inserted(struct request *rq)
-{
- trace_block_rq_insert(rq);
-}
-EXPORT_SYMBOL_GPL(blk_mq_sched_request_inserted);
-
static bool blk_mq_sched_bypass_insert(struct blk_mq_hw_ctx *hctx,
bool has_sched,
struct request *rq)
diff --git a/block/blk-mq-sched.h b/block/blk-mq-sched.h
index 0476360f05f1..5b18ab915c65 100644
--- a/block/blk-mq-sched.h
+++ b/block/blk-mq-sched.h
@@ -7,7 +7,6 @@
void blk_mq_sched_assign_ioc(struct request *rq);
-void blk_mq_sched_request_inserted(struct request *rq);
bool blk_mq_sched_try_merge(struct request_queue *q, struct bio *bio,
unsigned int nr_segs, struct request **merged_request);
bool __blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio,
diff --git a/block/blk-pm.h b/block/blk-pm.h
index a2283cc9f716..8a5a0d4b357f 100644
--- a/block/blk-pm.h
+++ b/block/blk-pm.h
@@ -21,31 +21,6 @@ static inline void blk_pm_mark_last_busy(struct request *rq)
if (rq->q->dev && !(rq->rq_flags & RQF_PM))
pm_runtime_mark_last_busy(rq->q->dev);
}
-
-static inline void blk_pm_requeue_request(struct request *rq)
-{
- lockdep_assert_held(&rq->q->queue_lock);
-
- if (rq->q->dev && !(rq->rq_flags & RQF_PM))
- rq->q->nr_pending--;
-}
-
-static inline void blk_pm_add_request(struct request_queue *q,
- struct request *rq)
-{
- lockdep_assert_held(&q->queue_lock);
-
- if (q->dev && !(rq->rq_flags & RQF_PM))
- q->nr_pending++;
-}
-
-static inline void blk_pm_put_request(struct request *rq)
-{
- lockdep_assert_held(&rq->q->queue_lock);
-
- if (rq->q->dev && !(rq->rq_flags & RQF_PM))
- --rq->q->nr_pending;
-}
#else
static inline int blk_pm_resume_queue(const bool pm, struct request_queue *q)
{
@@ -55,19 +30,6 @@ static inline int blk_pm_resume_queue(const bool pm, struct request_queue *q)
static inline void blk_pm_mark_last_busy(struct request *rq)
{
}
-
-static inline void blk_pm_requeue_request(struct request *rq)
-{
-}
-
-static inline void blk_pm_add_request(struct request_queue *q,
- struct request *rq)
-{
-}
-
-static inline void blk_pm_put_request(struct request *rq)
-{
-}
#endif
#endif /* _BLOCK_BLK_PM_H_ */
diff --git a/block/blk-settings.c b/block/blk-settings.c
index 7dd8be314ac6..b4aa2f37fab6 100644
--- a/block/blk-settings.c
+++ b/block/blk-settings.c
@@ -504,6 +504,14 @@ void blk_queue_io_opt(struct request_queue *q, unsigned int opt)
}
EXPORT_SYMBOL(blk_queue_io_opt);
+static unsigned int blk_round_down_sectors(unsigned int sectors, unsigned int lbs)
+{
+ sectors = round_down(sectors, lbs >> SECTOR_SHIFT);
+ if (sectors < PAGE_SIZE >> SECTOR_SHIFT)
+ sectors = PAGE_SIZE >> SECTOR_SHIFT;
+ return sectors;
+}
+
/**
* blk_stack_limits - adjust queue_limits for stacked devices
* @t: the stacking driver limits (top device)
@@ -630,6 +638,10 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
ret = -1;
}
+ t->max_sectors = blk_round_down_sectors(t->max_sectors, t->logical_block_size);
+ t->max_hw_sectors = blk_round_down_sectors(t->max_hw_sectors, t->logical_block_size);
+ t->max_dev_sectors = blk_round_down_sectors(t->max_dev_sectors, t->logical_block_size);
+
/* Discard alignment and granularity */
if (b->discard_granularity) {
alignment = queue_limit_discard_alignment(b, start);
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index ae39c7f3d83d..0f4f0c8a7825 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -434,10 +434,13 @@ static ssize_t queue_poll_store(struct request_queue *q, const char *page,
if (ret < 0)
return ret;
- if (poll_on)
+ if (poll_on) {
blk_queue_flag_set(QUEUE_FLAG_POLL, q);
- else
+ } else {
+ blk_mq_freeze_queue(q);
blk_queue_flag_clear(QUEUE_FLAG_POLL, q);
+ blk_mq_unfreeze_queue(q);
+ }
return ret;
}
diff --git a/block/bounce.c b/block/bounce.c
index fc55314aa426..87983a35079c 100644
--- a/block/bounce.c
+++ b/block/bounce.c
@@ -214,8 +214,7 @@ static void bounce_end_io_read_isa(struct bio *bio)
__bounce_end_io_read(bio, &isa_page_pool);
}
-static struct bio *bounce_clone_bio(struct bio *bio_src, gfp_t gfp_mask,
- struct bio_set *bs)
+static struct bio *bounce_clone_bio(struct bio *bio_src)
{
struct bvec_iter iter;
struct bio_vec bv;
@@ -242,10 +241,12 @@ static struct bio *bounce_clone_bio(struct bio *bio_src, gfp_t gfp_mask,
* asking for trouble and would force extra work on
* __bio_clone_fast() anyways.
*/
-
- bio = bio_alloc_bioset(gfp_mask, bio_segments(bio_src), bs);
- if (!bio)
- return NULL;
+ if (bio_is_passthrough(bio_src))
+ bio = bio_kmalloc(GFP_NOIO | __GFP_NOFAIL,
+ bio_segments(bio_src));
+ else
+ bio = bio_alloc_bioset(GFP_NOIO, bio_segments(bio_src),
+ &bounce_bio_set);
bio->bi_bdev = bio_src->bi_bdev;
if (bio_flagged(bio_src, BIO_REMAPPED))
bio_set_flag(bio, BIO_REMAPPED);
@@ -269,11 +270,11 @@ static struct bio *bounce_clone_bio(struct bio *bio_src, gfp_t gfp_mask,
break;
}
- if (bio_crypt_clone(bio, bio_src, gfp_mask) < 0)
+ if (bio_crypt_clone(bio, bio_src, GFP_NOIO) < 0)
goto err_put;
if (bio_integrity(bio_src) &&
- bio_integrity_clone(bio, bio_src, gfp_mask) < 0)
+ bio_integrity_clone(bio, bio_src, GFP_NOIO) < 0)
goto err_put;
bio_clone_blkg_association(bio, bio_src);
@@ -296,7 +297,6 @@ static void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig,
unsigned i = 0;
bool bounce = false;
int sectors = 0;
- bool passthrough = bio_is_passthrough(*bio_orig);
bio_for_each_segment(from, *bio_orig, iter) {
if (i++ < BIO_MAX_PAGES)
@@ -307,14 +307,14 @@ static void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig,
if (!bounce)
return;
- if (!passthrough && sectors < bio_sectors(*bio_orig)) {
+ if (!bio_is_passthrough(*bio_orig) &&
+ sectors < bio_sectors(*bio_orig)) {
bio = bio_split(*bio_orig, sectors, GFP_NOIO, &bounce_bio_split);
bio_chain(bio, *bio_orig);
submit_bio_noacct(*bio_orig);
*bio_orig = bio;
}
- bio = bounce_clone_bio(*bio_orig, GFP_NOIO, passthrough ? NULL :
- &bounce_bio_set);
+ bio = bounce_clone_bio(*bio_orig);
/*
* Bvec table can't be updated by bio_for_each_segment_all(),
diff --git a/block/genhd.c b/block/genhd.c
index 36ff45bbaaaf..fcc530164b5a 100644
--- a/block/genhd.c
+++ b/block/genhd.c
@@ -74,7 +74,7 @@ bool set_capacity_and_notify(struct gendisk *disk, sector_t size)
return false;
pr_info("%s: detected capacity change from %lld to %lld\n",
- disk->disk_name, size, capacity);
+ disk->disk_name, capacity, size);
/*
* Historically we did not send a uevent for changes to/from an empty
@@ -476,7 +476,7 @@ void disk_uevent(struct gendisk *disk, enum kobject_action action)
struct disk_part_iter piter;
struct block_device *part;
- disk_part_iter_init(&piter, disk, DISK_PITER_INCL_PART0);
+ disk_part_iter_init(&piter, disk, DISK_PITER_INCL_EMPTY_PART0);
while ((part = disk_part_iter_next(&piter)))
kobject_uevent(bdev_kobj(part), action);
disk_part_iter_exit(&piter);
diff --git a/block/ioctl.c b/block/ioctl.c
index d61d652078f4..ff241e663c01 100644
--- a/block/ioctl.c
+++ b/block/ioctl.c
@@ -81,20 +81,27 @@ static int compat_blkpg_ioctl(struct block_device *bdev,
}
#endif
-static int blkdev_reread_part(struct block_device *bdev)
+static int blkdev_reread_part(struct block_device *bdev, fmode_t mode)
{
- int ret;
+ struct block_device *tmp;
if (!disk_part_scan_enabled(bdev->bd_disk) || bdev_is_partition(bdev))
return -EINVAL;
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
- mutex_lock(&bdev->bd_mutex);
- ret = bdev_disk_changed(bdev, false);
- mutex_unlock(&bdev->bd_mutex);
+ /*
+ * Reopen the device to revalidate the driver state and force a
+ * partition rescan.
+ */
+ mode &= ~FMODE_EXCL;
+ set_bit(GD_NEED_PART_SCAN, &bdev->bd_disk->state);
- return ret;
+ tmp = blkdev_get_by_dev(bdev->bd_dev, mode, NULL);
+ if (IS_ERR(tmp))
+ return PTR_ERR(tmp);
+ blkdev_put(tmp, mode);
+ return 0;
}
static int blk_ioctl_discard(struct block_device *bdev, fmode_t mode,
@@ -498,7 +505,7 @@ static int blkdev_common_ioctl(struct block_device *bdev, fmode_t mode,
bdev->bd_bdi->ra_pages = (arg * 512) / PAGE_SIZE;
return 0;
case BLKRRPART:
- return blkdev_reread_part(bdev);
+ return blkdev_reread_part(bdev, mode);
case BLKTRACESTART:
case BLKTRACESTOP:
case BLKTRACETEARDOWN:
diff --git a/block/kyber-iosched.c b/block/kyber-iosched.c
index c25c41d0d061..33d34d69cade 100644
--- a/block/kyber-iosched.c
+++ b/block/kyber-iosched.c
@@ -13,6 +13,8 @@
#include <linux/module.h>
#include <linux/sbitmap.h>
+#include <trace/events/block.h>
+
#include "blk.h"
#include "blk-mq.h"
#include "blk-mq-debugfs.h"
@@ -353,19 +355,9 @@ static void kyber_timer_fn(struct timer_list *t)
}
}
-static unsigned int kyber_sched_tags_shift(struct request_queue *q)
-{
- /*
- * All of the hardware queues have the same depth, so we can just grab
- * the shift of the first one.
- */
- return q->queue_hw_ctx[0]->sched_tags->bitmap_tags->sb.shift;
-}
-
static struct kyber_queue_data *kyber_queue_data_alloc(struct request_queue *q)
{
struct kyber_queue_data *kqd;
- unsigned int shift;
int ret = -ENOMEM;
int i;
@@ -400,9 +392,6 @@ static struct kyber_queue_data *kyber_queue_data_alloc(struct request_queue *q)
kqd->latency_targets[i] = kyber_latency_targets[i];
}
- shift = kyber_sched_tags_shift(q);
- kqd->async_depth = (1U << shift) * KYBER_ASYNC_PERCENT / 100U;
-
return kqd;
err_buckets:
@@ -458,9 +447,19 @@ static void kyber_ctx_queue_init(struct kyber_ctx_queue *kcq)
INIT_LIST_HEAD(&kcq->rq_list[i]);
}
-static int kyber_init_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
+static void kyber_depth_updated(struct blk_mq_hw_ctx *hctx)
{
struct kyber_queue_data *kqd = hctx->queue->elevator->elevator_data;
+ struct blk_mq_tags *tags = hctx->sched_tags;
+ unsigned int shift = tags->bitmap_tags->sb.shift;
+
+ kqd->async_depth = (1U << shift) * KYBER_ASYNC_PERCENT / 100U;
+
+ sbitmap_queue_min_shallow_depth(tags->bitmap_tags, kqd->async_depth);
+}
+
+static int kyber_init_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
+{
struct kyber_hctx_data *khd;
int i;
@@ -502,8 +501,7 @@ static int kyber_init_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
khd->batching = 0;
hctx->sched_data = khd;
- sbitmap_queue_min_shallow_depth(hctx->sched_tags->bitmap_tags,
- kqd->async_depth);
+ kyber_depth_updated(hctx);
return 0;
@@ -602,7 +600,7 @@ static void kyber_insert_requests(struct blk_mq_hw_ctx *hctx,
list_move_tail(&rq->queuelist, head);
sbitmap_set_bit(&khd->kcq_map[sched_domain],
rq->mq_ctx->index_hw[hctx->type]);
- blk_mq_sched_request_inserted(rq);
+ trace_block_rq_insert(rq);
spin_unlock(&kcq->lock);
}
}
@@ -1022,6 +1020,7 @@ static struct elevator_type kyber_sched = {
.completed_request = kyber_completed_request,
.dispatch_request = kyber_dispatch_request,
.has_work = kyber_has_work,
+ .depth_updated = kyber_depth_updated,
},
#ifdef CONFIG_BLK_DEBUG_FS
.queue_debugfs_attrs = kyber_queue_debugfs_attrs,
diff --git a/block/mq-deadline.c b/block/mq-deadline.c
index b57470e154c8..f3631a287466 100644
--- a/block/mq-deadline.c
+++ b/block/mq-deadline.c
@@ -18,6 +18,8 @@
#include <linux/rbtree.h>
#include <linux/sbitmap.h>
+#include <trace/events/block.h>
+
#include "blk.h"
#include "blk-mq.h"
#include "blk-mq-debugfs.h"
@@ -496,7 +498,7 @@ static void dd_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
if (blk_mq_sched_try_insert_merge(q, rq))
return;
- blk_mq_sched_request_inserted(rq);
+ trace_block_rq_insert(rq);
if (at_head || blk_rq_is_passthrough(rq)) {
if (at_head)