summaryrefslogtreecommitdiff
path: root/block/blk-core.c
diff options
context:
space:
mode:
Diffstat (limited to 'block/blk-core.c')
-rw-r--r--block/blk-core.c391
1 files changed, 37 insertions, 354 deletions
diff --git a/block/blk-core.c b/block/blk-core.c
index 1378d084c770..97f8bc8d3a79 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -16,7 +16,6 @@
#include <linux/module.h>
#include <linux/bio.h>
#include <linux/blkdev.h>
-#include <linux/blk-mq.h>
#include <linux/blk-pm.h>
#include <linux/blk-integrity.h>
#include <linux/highmem.h>
@@ -40,6 +39,7 @@
#include <linux/debugfs.h>
#include <linux/bpf.h>
#include <linux/psi.h>
+#include <linux/part_stat.h>
#include <linux/sched/sysctl.h>
#include <linux/blk-crypto.h>
@@ -47,7 +47,6 @@
#include <trace/events/block.h>
#include "blk.h"
-#include "blk-mq.h"
#include "blk-mq-sched.h"
#include "blk-pm.h"
#include "blk-throttle.h"
@@ -67,6 +66,7 @@ DEFINE_IDA(blk_queue_ida);
* For queue allocation
*/
struct kmem_cache *blk_requestq_cachep;
+struct kmem_cache *blk_requestq_srcu_cachep;
/*
* Controlling structure to kblockd
@@ -109,23 +109,6 @@ bool blk_queue_flag_test_and_set(unsigned int flag, struct request_queue *q)
}
EXPORT_SYMBOL_GPL(blk_queue_flag_test_and_set);
-void blk_rq_init(struct request_queue *q, struct request *rq)
-{
- memset(rq, 0, sizeof(*rq));
-
- INIT_LIST_HEAD(&rq->queuelist);
- rq->q = q;
- rq->__sector = (sector_t) -1;
- INIT_HLIST_NODE(&rq->hash);
- RB_CLEAR_NODE(&rq->rb_node);
- rq->tag = BLK_MQ_NO_TAG;
- rq->internal_tag = BLK_MQ_NO_TAG;
- rq->start_time_ns = ktime_get_ns();
- rq->part = NULL;
- blk_crypto_rq_set_defaults(rq);
-}
-EXPORT_SYMBOL(blk_rq_init);
-
#define REQ_OP_NAME(name) [REQ_OP_##name] = #name
static const char *const blk_op_name[] = {
REQ_OP_NAME(READ),
@@ -216,38 +199,15 @@ int blk_status_to_errno(blk_status_t status)
}
EXPORT_SYMBOL_GPL(blk_status_to_errno);
-void blk_print_req_error(struct request *req, blk_status_t status)
+const char *blk_status_to_str(blk_status_t status)
{
int idx = (__force int)status;
if (WARN_ON_ONCE(idx >= ARRAY_SIZE(blk_errors)))
- return;
-
- printk_ratelimited(KERN_ERR
- "%s error, dev %s, sector %llu op 0x%x:(%s) flags 0x%x "
- "phys_seg %u prio class %u\n",
- blk_errors[idx].name,
- req->rq_disk ? req->rq_disk->disk_name : "?",
- blk_rq_pos(req), req_op(req), blk_op_str(req_op(req)),
- req->cmd_flags & ~REQ_OP_MASK,
- req->nr_phys_segments,
- IOPRIO_PRIO_CLASS(req->ioprio));
+ return "<null>";
+ return blk_errors[idx].name;
}
-void blk_dump_rq_flags(struct request *rq, char *msg)
-{
- printk(KERN_INFO "%s: dev %s: flags=%llx\n", msg,
- rq->rq_disk ? rq->rq_disk->disk_name : "?",
- (unsigned long long) rq->cmd_flags);
-
- printk(KERN_INFO " sector %llu, nr/cnr %u/%u\n",
- (unsigned long long)blk_rq_pos(rq),
- blk_rq_sectors(rq), blk_rq_cur_sectors(rq));
- printk(KERN_INFO " bio %p, biotail %p, len %u\n",
- rq->bio, rq->biotail, blk_rq_bytes(rq));
-}
-EXPORT_SYMBOL(blk_dump_rq_flags);
-
/**
* blk_sync_queue - cancel any pending callbacks on a queue
* @q: the queue
@@ -478,21 +438,27 @@ static void blk_timeout_work(struct work_struct *work)
{
}
-struct request_queue *blk_alloc_queue(int node_id)
+struct request_queue *blk_alloc_queue(int node_id, bool alloc_srcu)
{
struct request_queue *q;
int ret;
- q = kmem_cache_alloc_node(blk_requestq_cachep,
- GFP_KERNEL | __GFP_ZERO, node_id);
+ q = kmem_cache_alloc_node(blk_get_queue_kmem_cache(alloc_srcu),
+ GFP_KERNEL | __GFP_ZERO, node_id);
if (!q)
return NULL;
+ if (alloc_srcu) {
+ blk_queue_flag_set(QUEUE_FLAG_HAS_SRCU, q);
+ if (init_srcu_struct(q->srcu) != 0)
+ goto fail_q;
+ }
+
q->last_merge = NULL;
q->id = ida_simple_get(&blk_queue_ida, 0, 0, GFP_KERNEL);
if (q->id < 0)
- goto fail_q;
+ goto fail_srcu;
ret = bioset_init(&q->bio_split, BIO_POOL_SIZE, 0, 0);
if (ret)
@@ -549,8 +515,11 @@ fail_split:
bioset_exit(&q->bio_split);
fail_id:
ida_simple_remove(&blk_queue_ida, q->id);
+fail_srcu:
+ if (alloc_srcu)
+ cleanup_srcu_struct(q->srcu);
fail_q:
- kmem_cache_free(blk_requestq_cachep, q);
+ kmem_cache_free(blk_get_queue_kmem_cache(alloc_srcu), q);
return NULL;
}
@@ -594,7 +563,7 @@ static int __init setup_fail_make_request(char *str)
}
__setup("fail_make_request=", setup_fail_make_request);
-static bool should_fail_request(struct block_device *part, unsigned int bytes)
+bool should_fail_request(struct block_device *part, unsigned int bytes)
{
return part->bd_make_it_fail && should_fail(&fail_make_request, bytes);
}
@@ -608,15 +577,6 @@ static int __init fail_make_request_debugfs(void)
}
late_initcall(fail_make_request_debugfs);
-
-#else /* CONFIG_FAIL_MAKE_REQUEST */
-
-static inline bool should_fail_request(struct block_device *part,
- unsigned int bytes)
-{
- return false;
-}
-
#endif /* CONFIG_FAIL_MAKE_REQUEST */
static inline bool bio_check_ro(struct bio *bio)
@@ -802,15 +762,6 @@ noinline_for_stack bool submit_bio_checks(struct bio *bio)
break;
}
- /*
- * Various block parts want %current->io_context, so allocate it up
- * front rather than dealing with lots of pain to allocate it only
- * where needed. This may fail and the block layer knows how to live
- * with it.
- */
- if (unlikely(!current->io_context))
- create_task_io_context(current, GFP_ATOMIC, q->node);
-
if (blk_throtl_bio(bio))
return false;
@@ -836,17 +787,21 @@ end_io:
static void __submit_bio_fops(struct gendisk *disk, struct bio *bio)
{
- if (unlikely(bio_queue_enter(bio) != 0))
- return;
- if (submit_bio_checks(bio) && blk_crypto_bio_prep(&bio))
- disk->fops->submit_bio(bio);
- blk_queue_exit(disk->queue);
+ if (blk_crypto_bio_prep(&bio)) {
+ if (likely(bio_queue_enter(bio) == 0)) {
+ disk->fops->submit_bio(bio);
+ blk_queue_exit(disk->queue);
+ }
+ }
}
static void __submit_bio(struct bio *bio)
{
struct gendisk *disk = bio->bi_bdev->bd_disk;
+ if (unlikely(!submit_bio_checks(bio)))
+ return;
+
if (!disk->fops->submit_bio)
blk_mq_submit_bio(bio);
else
@@ -1090,135 +1045,7 @@ int iocb_bio_iopoll(struct kiocb *kiocb, struct io_comp_batch *iob,
}
EXPORT_SYMBOL_GPL(iocb_bio_iopoll);
-/**
- * blk_cloned_rq_check_limits - Helper function to check a cloned request
- * for the new queue limits
- * @q: the queue
- * @rq: the request being checked
- *
- * Description:
- * @rq may have been made based on weaker limitations of upper-level queues
- * in request stacking drivers, and it may violate the limitation of @q.
- * Since the block layer and the underlying device driver trust @rq
- * after it is inserted to @q, it should be checked against @q before
- * the insertion using this generic function.
- *
- * Request stacking drivers like request-based dm may change the queue
- * limits when retrying requests on other queues. Those requests need
- * to be checked against the new queue limits again during dispatch.
- */
-static blk_status_t blk_cloned_rq_check_limits(struct request_queue *q,
- struct request *rq)
-{
- unsigned int max_sectors = blk_queue_get_max_sectors(q, req_op(rq));
-
- if (blk_rq_sectors(rq) > max_sectors) {
- /*
- * SCSI device does not have a good way to return if
- * Write Same/Zero is actually supported. If a device rejects
- * a non-read/write command (discard, write same,etc.) the
- * low-level device driver will set the relevant queue limit to
- * 0 to prevent blk-lib from issuing more of the offending
- * operations. Commands queued prior to the queue limit being
- * reset need to be completed with BLK_STS_NOTSUPP to avoid I/O
- * errors being propagated to upper layers.
- */
- if (max_sectors == 0)
- return BLK_STS_NOTSUPP;
-
- printk(KERN_ERR "%s: over max size limit. (%u > %u)\n",
- __func__, blk_rq_sectors(rq), max_sectors);
- return BLK_STS_IOERR;
- }
-
- /*
- * The queue settings related to segment counting may differ from the
- * original queue.
- */
- rq->nr_phys_segments = blk_recalc_rq_segments(rq);
- if (rq->nr_phys_segments > queue_max_segments(q)) {
- printk(KERN_ERR "%s: over max segments limit. (%hu > %hu)\n",
- __func__, rq->nr_phys_segments, queue_max_segments(q));
- return BLK_STS_IOERR;
- }
-
- return BLK_STS_OK;
-}
-
-/**
- * blk_insert_cloned_request - Helper for stacking drivers to submit a request
- * @q: the queue to submit the request
- * @rq: the request being queued
- */
-blk_status_t blk_insert_cloned_request(struct request_queue *q, struct request *rq)
-{
- blk_status_t ret;
-
- ret = blk_cloned_rq_check_limits(q, rq);
- if (ret != BLK_STS_OK)
- return ret;
-
- if (rq->rq_disk &&
- should_fail_request(rq->rq_disk->part0, blk_rq_bytes(rq)))
- return BLK_STS_IOERR;
-
- if (blk_crypto_insert_cloned_request(rq))
- return BLK_STS_IOERR;
-
- blk_account_io_start(rq);
-
- /*
- * Since we have a scheduler attached on the top device,
- * bypass a potential scheduler on the bottom device for
- * insert.
- */
- return blk_mq_request_issue_directly(rq, true);
-}
-EXPORT_SYMBOL_GPL(blk_insert_cloned_request);
-
-/**
- * blk_rq_err_bytes - determine number of bytes till the next failure boundary
- * @rq: request to examine
- *
- * Description:
- * A request could be merge of IOs which require different failure
- * handling. This function determines the number of bytes which
- * can be failed from the beginning of the request without
- * crossing into area which need to be retried further.
- *
- * Return:
- * The number of bytes to fail.
- */
-unsigned int blk_rq_err_bytes(const struct request *rq)
-{
- unsigned int ff = rq->cmd_flags & REQ_FAILFAST_MASK;
- unsigned int bytes = 0;
- struct bio *bio;
-
- if (!(rq->rq_flags & RQF_MIXED_MERGE))
- return blk_rq_bytes(rq);
-
- /*
- * Currently the only 'mixing' which can happen is between
- * different fastfail types. We can safely fail portions
- * which have all the failfast bits that the first one has -
- * the ones which are at least as eager to fail as the first
- * one.
- */
- for (bio = rq->bio; bio; bio = bio->bi_next) {
- if ((bio->bi_opf & ff) != ff)
- break;
- bytes += bio->bi_iter.bi_size;
- }
-
- /* this could lead to infinite loop */
- BUG_ON(blk_rq_bytes(rq) && !bytes);
- return bytes;
-}
-EXPORT_SYMBOL_GPL(blk_rq_err_bytes);
-
-static void update_io_ticks(struct block_device *part, unsigned long now,
- bool end)
+void update_io_ticks(struct block_device *part, unsigned long now, bool end)
{
unsigned long stamp;
again:
@@ -1233,30 +1060,6 @@ again:
}
}
-void __blk_account_io_done(struct request *req, u64 now)
-{
- const int sgrp = op_stat_group(req_op(req));
-
- part_stat_lock();
- update_io_ticks(req->part, jiffies, true);
- part_stat_inc(req->part, ios[sgrp]);
- part_stat_add(req->part, nsecs[sgrp], now - req->start_time_ns);
- part_stat_unlock();
-}
-
-void __blk_account_io_start(struct request *rq)
-{
- /* passthrough requests can hold bios that do not have ->bi_bdev set */
- if (rq->bio && rq->bio->bi_bdev)
- rq->part = rq->bio->bi_bdev;
- else
- rq->part = rq->rq_disk->part0;
-
- part_stat_lock();
- update_io_ticks(rq->part, jiffies, false);
- part_stat_unlock();
-}
-
static unsigned long __part_start_io_acct(struct block_device *part,
unsigned int sectors, unsigned int op)
{
@@ -1320,46 +1123,6 @@ void disk_end_io_acct(struct gendisk *disk, unsigned int op,
}
EXPORT_SYMBOL(disk_end_io_acct);
-/*
- * Steal bios from a request and add them to a bio list.
- * The request must not have been partially completed before.
- */
-void blk_steal_bios(struct bio_list *list, struct request *rq)
-{
- if (rq->bio) {
- if (list->tail)
- list->tail->bi_next = rq->bio;
- else
- list->head = rq->bio;
- list->tail = rq->biotail;
-
- rq->bio = NULL;
- rq->biotail = NULL;
- }
-
- rq->__data_len = 0;
-}
-EXPORT_SYMBOL_GPL(blk_steal_bios);
-
-#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
-/**
- * rq_flush_dcache_pages - Helper function to flush all pages in a request
- * @rq: the request to be flushed
- *
- * Description:
- * Flush all pages in @rq.
- */
-void rq_flush_dcache_pages(struct request *rq)
-{
- struct req_iterator iter;
- struct bio_vec bvec;
-
- rq_for_each_segment(bvec, rq, iter)
- flush_dcache_page(bvec.bv_page);
-}
-EXPORT_SYMBOL_GPL(rq_flush_dcache_pages);
-#endif
-
/**
* blk_lld_busy - Check if underlying low-level drivers of a device are busy
* @q : the queue of the device being checked
@@ -1388,93 +1151,6 @@ int blk_lld_busy(struct request_queue *q)
}
EXPORT_SYMBOL_GPL(blk_lld_busy);
-/**
- * blk_rq_unprep_clone - Helper function to free all bios in a cloned request
- * @rq: the clone request to be cleaned up
- *
- * Description:
- * Free all bios in @rq for a cloned request.
- */
-void blk_rq_unprep_clone(struct request *rq)
-{
- struct bio *bio;
-
- while ((bio = rq->bio) != NULL) {
- rq->bio = bio->bi_next;
-
- bio_put(bio);
- }
-}
-EXPORT_SYMBOL_GPL(blk_rq_unprep_clone);
-
-/**
- * blk_rq_prep_clone - Helper function to setup clone request
- * @rq: the request to be setup
- * @rq_src: original request to be cloned
- * @bs: bio_set that bios for clone are allocated from
- * @gfp_mask: memory allocation mask for bio
- * @bio_ctr: setup function to be called for each clone bio.
- * Returns %0 for success, non %0 for failure.
- * @data: private data to be passed to @bio_ctr
- *
- * Description:
- * Clones bios in @rq_src to @rq, and copies attributes of @rq_src to @rq.
- * Also, pages which the original bios are pointing to are not copied
- * and the cloned bios just point same pages.
- * So cloned bios must be completed before original bios, which means
- * the caller must complete @rq before @rq_src.
- */
-int blk_rq_prep_clone(struct request *rq, struct request *rq_src,
- struct bio_set *bs, gfp_t gfp_mask,
- int (*bio_ctr)(struct bio *, struct bio *, void *),
- void *data)
-{
- struct bio *bio, *bio_src;
-
- if (!bs)
- bs = &fs_bio_set;
-
- __rq_for_each_bio(bio_src, rq_src) {
- bio = bio_clone_fast(bio_src, gfp_mask, bs);
- if (!bio)
- goto free_and_out;
-
- if (bio_ctr && bio_ctr(bio, bio_src, data))
- goto free_and_out;
-
- if (rq->bio) {
- rq->biotail->bi_next = bio;
- rq->biotail = bio;
- } else {
- rq->bio = rq->biotail = bio;
- }
- bio = NULL;
- }
-
- /* Copy attributes of the original request to the clone request. */
- rq->__sector = blk_rq_pos(rq_src);
- rq->__data_len = blk_rq_bytes(rq_src);
- if (rq_src->rq_flags & RQF_SPECIAL_PAYLOAD) {
- rq->rq_flags |= RQF_SPECIAL_PAYLOAD;
- rq->special_vec = rq_src->special_vec;
- }
- rq->nr_phys_segments = rq_src->nr_phys_segments;
- rq->ioprio = rq_src->ioprio;
-
- if (rq->bio && blk_crypto_rq_bio_prep(rq, rq->bio, gfp_mask) < 0)
- goto free_and_out;
-
- return 0;
-
-free_and_out:
- if (bio)
- bio_put(bio);
- blk_rq_unprep_clone(rq);
-
- return -ENOMEM;
-}
-EXPORT_SYMBOL_GPL(blk_rq_prep_clone);
-
int kblockd_schedule_work(struct work_struct *work)
{
return queue_work(kblockd_workqueue, work);
@@ -1639,6 +1315,9 @@ int __init blk_dev_init(void)
sizeof_field(struct request, cmd_flags));
BUILD_BUG_ON(REQ_OP_BITS + REQ_FLAG_BITS > 8 *
sizeof_field(struct bio, bi_opf));
+ BUILD_BUG_ON(ALIGN(offsetof(struct request_queue, srcu),
+ __alignof__(struct request_queue)) !=
+ sizeof(struct request_queue));
/* used for unplugging and affects IO latency/throughput - HIGHPRI */
kblockd_workqueue = alloc_workqueue("kblockd",
@@ -1649,6 +1328,10 @@ int __init blk_dev_init(void)
blk_requestq_cachep = kmem_cache_create("request_queue",
sizeof(struct request_queue), 0, SLAB_PANIC, NULL);
+ blk_requestq_srcu_cachep = kmem_cache_create("request_queue_srcu",
+ sizeof(struct request_queue) +
+ sizeof(struct srcu_struct), 0, SLAB_PANIC, NULL);
+
blk_debugfs_root = debugfs_create_dir("block", NULL);
return 0;