summaryrefslogtreecommitdiff
path: root/block
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2023-08-19 18:31:46 +0300
committerLinus Torvalds <torvalds@linux-foundation.org>2023-08-19 18:31:46 +0300
commit2383ffc41a7c701c058f6bb18030cd569aecd541 (patch)
tree9e19b98728bc9f6be09d35b0b3b18da651f9150b /block
parentaa9ea98cca3a56869df1dc6347f3e3bfe0c01f44 (diff)
parente5c0ca13659e9d18f53368d651ed7e6e433ec1cf (diff)
downloadlinux-2383ffc41a7c701c058f6bb18030cd569aecd541.tar.xz
Merge tag 'block-6.5-2023-08-19' of git://git.kernel.dk/linux
Pull block fixes from Jens Axboe: "Main thing here is the fix for the regression in flush handling which caused IO hangs/stalls for a few reporters. Hopefully that should all be sorted out now. Outside of that, just a few minor fixes for issues that were introduced in this cycle" * tag 'block-6.5-2023-08-19' of git://git.kernel.dk/linux: blk-mq: release scheduler resource when request completes blk-crypto: dynamically allocate fallback profile blk-cgroup: hold queue_lock when removing blkg->q_node drivers/rnbd: restore sysfs interface to rnbd-client
Diffstat (limited to 'block')
-rw-r--r--block/blk-cgroup.c2
-rw-r--r--block/blk-crypto-fallback.c36
-rw-r--r--block/blk-mq.c23
-rw-r--r--block/elevator.c3
4 files changed, 48 insertions, 16 deletions
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index fc49be622e05..9faafcd10e17 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -136,7 +136,9 @@ static void blkg_free_workfn(struct work_struct *work)
blkcg_policy[i]->pd_free_fn(blkg->pd[i]);
if (blkg->parent)
blkg_put(blkg->parent);
+ spin_lock_irq(&q->queue_lock);
list_del_init(&blkg->q_node);
+ spin_unlock_irq(&q->queue_lock);
mutex_unlock(&q->blkcg_mutex);
blk_put_queue(q);
diff --git a/block/blk-crypto-fallback.c b/block/blk-crypto-fallback.c
index ad9844c5b40c..e6468eab2681 100644
--- a/block/blk-crypto-fallback.c
+++ b/block/blk-crypto-fallback.c
@@ -78,7 +78,7 @@ static struct blk_crypto_fallback_keyslot {
struct crypto_skcipher *tfms[BLK_ENCRYPTION_MODE_MAX];
} *blk_crypto_keyslots;
-static struct blk_crypto_profile blk_crypto_fallback_profile;
+static struct blk_crypto_profile *blk_crypto_fallback_profile;
static struct workqueue_struct *blk_crypto_wq;
static mempool_t *blk_crypto_bounce_page_pool;
static struct bio_set crypto_bio_split;
@@ -292,7 +292,7 @@ static bool blk_crypto_fallback_encrypt_bio(struct bio **bio_ptr)
* Get a blk-crypto-fallback keyslot that contains a crypto_skcipher for
* this bio's algorithm and key.
*/
- blk_st = blk_crypto_get_keyslot(&blk_crypto_fallback_profile,
+ blk_st = blk_crypto_get_keyslot(blk_crypto_fallback_profile,
bc->bc_key, &slot);
if (blk_st != BLK_STS_OK) {
src_bio->bi_status = blk_st;
@@ -395,7 +395,7 @@ static void blk_crypto_fallback_decrypt_bio(struct work_struct *work)
* Get a blk-crypto-fallback keyslot that contains a crypto_skcipher for
* this bio's algorithm and key.
*/
- blk_st = blk_crypto_get_keyslot(&blk_crypto_fallback_profile,
+ blk_st = blk_crypto_get_keyslot(blk_crypto_fallback_profile,
bc->bc_key, &slot);
if (blk_st != BLK_STS_OK) {
bio->bi_status = blk_st;
@@ -499,7 +499,7 @@ bool blk_crypto_fallback_bio_prep(struct bio **bio_ptr)
return false;
}
- if (!__blk_crypto_cfg_supported(&blk_crypto_fallback_profile,
+ if (!__blk_crypto_cfg_supported(blk_crypto_fallback_profile,
&bc->bc_key->crypto_cfg)) {
bio->bi_status = BLK_STS_NOTSUPP;
return false;
@@ -526,7 +526,7 @@ bool blk_crypto_fallback_bio_prep(struct bio **bio_ptr)
int blk_crypto_fallback_evict_key(const struct blk_crypto_key *key)
{
- return __blk_crypto_evict_key(&blk_crypto_fallback_profile, key);
+ return __blk_crypto_evict_key(blk_crypto_fallback_profile, key);
}
static bool blk_crypto_fallback_inited;
@@ -534,7 +534,6 @@ static int blk_crypto_fallback_init(void)
{
int i;
int err;
- struct blk_crypto_profile *profile = &blk_crypto_fallback_profile;
if (blk_crypto_fallback_inited)
return 0;
@@ -545,18 +544,27 @@ static int blk_crypto_fallback_init(void)
if (err)
goto out;
- err = blk_crypto_profile_init(profile, blk_crypto_num_keyslots);
- if (err)
+ /* Dynamic allocation is needed because of lockdep_register_key(). */
+ blk_crypto_fallback_profile =
+ kzalloc(sizeof(*blk_crypto_fallback_profile), GFP_KERNEL);
+ if (!blk_crypto_fallback_profile) {
+ err = -ENOMEM;
goto fail_free_bioset;
+ }
+
+ err = blk_crypto_profile_init(blk_crypto_fallback_profile,
+ blk_crypto_num_keyslots);
+ if (err)
+ goto fail_free_profile;
err = -ENOMEM;
- profile->ll_ops = blk_crypto_fallback_ll_ops;
- profile->max_dun_bytes_supported = BLK_CRYPTO_MAX_IV_SIZE;
+ blk_crypto_fallback_profile->ll_ops = blk_crypto_fallback_ll_ops;
+ blk_crypto_fallback_profile->max_dun_bytes_supported = BLK_CRYPTO_MAX_IV_SIZE;
/* All blk-crypto modes have a crypto API fallback. */
for (i = 0; i < BLK_ENCRYPTION_MODE_MAX; i++)
- profile->modes_supported[i] = 0xFFFFFFFF;
- profile->modes_supported[BLK_ENCRYPTION_MODE_INVALID] = 0;
+ blk_crypto_fallback_profile->modes_supported[i] = 0xFFFFFFFF;
+ blk_crypto_fallback_profile->modes_supported[BLK_ENCRYPTION_MODE_INVALID] = 0;
blk_crypto_wq = alloc_workqueue("blk_crypto_wq",
WQ_UNBOUND | WQ_HIGHPRI |
@@ -597,7 +605,9 @@ fail_free_keyslots:
fail_free_wq:
destroy_workqueue(blk_crypto_wq);
fail_destroy_profile:
- blk_crypto_profile_destroy(profile);
+ blk_crypto_profile_destroy(blk_crypto_fallback_profile);
+fail_free_profile:
+ kfree(blk_crypto_fallback_profile);
fail_free_bioset:
bioset_exit(&crypto_bio_split);
out:
diff --git a/block/blk-mq.c b/block/blk-mq.c
index b04ff6f56926..953f08354c8c 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -681,6 +681,21 @@ out_queue_exit:
}
EXPORT_SYMBOL_GPL(blk_mq_alloc_request_hctx);
+static void blk_mq_finish_request(struct request *rq)
+{
+ struct request_queue *q = rq->q;
+
+ if (rq->rq_flags & RQF_USE_SCHED) {
+ q->elevator->type->ops.finish_request(rq);
+ /*
+ * For postflush request that may need to be
+ * completed twice, we should clear this flag
+ * to avoid double finish_request() on the rq.
+ */
+ rq->rq_flags &= ~RQF_USE_SCHED;
+ }
+}
+
static void __blk_mq_free_request(struct request *rq)
{
struct request_queue *q = rq->q;
@@ -707,9 +722,7 @@ void blk_mq_free_request(struct request *rq)
{
struct request_queue *q = rq->q;
- if ((rq->rq_flags & RQF_USE_SCHED) &&
- q->elevator->type->ops.finish_request)
- q->elevator->type->ops.finish_request(rq);
+ blk_mq_finish_request(rq);
if (unlikely(laptop_mode && !blk_rq_is_passthrough(rq)))
laptop_io_completion(q->disk->bdi);
@@ -1020,6 +1033,8 @@ inline void __blk_mq_end_request(struct request *rq, blk_status_t error)
if (blk_mq_need_time_stamp(rq))
__blk_mq_end_request_acct(rq, ktime_get_ns());
+ blk_mq_finish_request(rq);
+
if (rq->end_io) {
rq_qos_done(rq->q, rq);
if (rq->end_io(rq, error) == RQ_END_IO_FREE)
@@ -1074,6 +1089,8 @@ void blk_mq_end_request_batch(struct io_comp_batch *iob)
if (iob->need_ts)
__blk_mq_end_request_acct(rq, now);
+ blk_mq_finish_request(rq);
+
rq_qos_done(rq->q, rq);
/*
diff --git a/block/elevator.c b/block/elevator.c
index 8400e303fbcb..5ff093cb3cf8 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -499,6 +499,9 @@ void elv_unregister_queue(struct request_queue *q)
int elv_register(struct elevator_type *e)
{
+ /* finish request is mandatory */
+ if (WARN_ON_ONCE(!e->ops.finish_request))
+ return -EINVAL;
/* insert_requests and dispatch_request are mandatory */
if (WARN_ON_ONCE(!e->ops.insert_requests || !e->ops.dispatch_request))
return -EINVAL;