summaryrefslogtreecommitdiff
path: root/block
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2020-12-16 23:57:51 +0300
committerLinus Torvalds <torvalds@linux-foundation.org>2020-12-16 23:57:51 +0300
commitac7ac4618cf25e0d5cd8eba83d5f600084b65b9a (patch)
treee5d28907ff72690a0463a2238b96202d751a535c /block
parent48aba79bcf6ea05148dc82ad9c40713960b00396 (diff)
parentfa94ba8a7b22890e6a17b39b9359e114fe18cd59 (diff)
downloadlinux-ac7ac4618cf25e0d5cd8eba83d5f600084b65b9a.tar.xz
Merge tag 'for-5.11/block-2020-12-14' of git://git.kernel.dk/linux-block
Pull block updates from Jens Axboe: "Another series of killing more code than what is being added, again thanks to Christoph's relentless cleanups and tech debt tackling. This contains: - blk-iocost improvements (Baolin Wang) - part0 iostat fix (Jeffle Xu) - Disable iopoll for split bios (Jeffle Xu) - block tracepoint cleanups (Christoph Hellwig) - Merging of struct block_device and hd_struct (Christoph Hellwig) - Rework/cleanup of how block device sizes are updated (Christoph Hellwig) - Simplification of gendisk lookup and removal of block device aliasing (Christoph Hellwig) - Block device ioctl cleanups (Christoph Hellwig) - Removal of bdget()/blkdev_get() as exported API (Christoph Hellwig) - Disk change rework, avoid ->revalidate_disk() (Christoph Hellwig) - sbitmap improvements (Pavel Begunkov) - Hybrid polling fix (Pavel Begunkov) - bvec iteration improvements (Pavel Begunkov) - Zone revalidation fixes (Damien Le Moal) - blk-throttle limit fix (Yu Kuai) - Various little fixes" * tag 'for-5.11/block-2020-12-14' of git://git.kernel.dk/linux-block: (126 commits) blk-mq: fix msec comment from micro to milli seconds blk-mq: update arg in comment of blk_mq_map_queue blk-mq: add helper allocating tagset->tags Revert "block: Fix a lockdep complaint triggered by request queue flushing" nvme-loop: use blk_mq_hctx_set_fq_lock_class to set loop's lock class blk-mq: add new API of blk_mq_hctx_set_fq_lock_class block: disable iopoll for split bio block: Improve blk_revalidate_disk_zones() checks sbitmap: simplify wrap check sbitmap: replace CAS with atomic and sbitmap: remove swap_lock sbitmap: optimise sbitmap_deferred_clear() blk-mq: skip hybrid polling if iopoll doesn't spin blk-iocost: Factor out the base vrate change into a separate function blk-iocost: Factor out the active iocgs' state check into a separate function blk-iocost: Move the usage ratio calculation to the correct place blk-iocost: Remove unnecessary advance declaration blk-iocost: Fix some typos in comments blktrace: fix up a kerneldoc comment block: remove the request_queue to argument request based tracepoints ...
Diffstat (limited to 'block')
-rw-r--r--block/bio.c10
-rw-r--r--block/blk-cgroup.c51
-rw-r--r--block/blk-core.c70
-rw-r--r--block/blk-flush.c32
-rw-r--r--block/blk-iocost.c287
-rw-r--r--block/blk-lib.c2
-rw-r--r--block/blk-merge.c18
-rw-r--r--block/blk-mq-sched.c2
-rw-r--r--block/blk-mq.c46
-rw-r--r--block/blk-mq.h9
-rw-r--r--block/blk-throttle.c6
-rw-r--r--block/blk-wbt.c1
-rw-r--r--block/blk-zoned.c16
-rw-r--r--block/blk.h85
-rw-r--r--block/bounce.c2
-rw-r--r--block/genhd.c565
-rw-r--r--block/ioctl.c74
-rw-r--r--block/partitions/core.c250
18 files changed, 583 insertions, 943 deletions
diff --git a/block/bio.c b/block/bio.c
index fa01bef35bb1..1f2cc1fbe283 100644
--- a/block/bio.c
+++ b/block/bio.c
@@ -608,13 +608,13 @@ void bio_truncate(struct bio *bio, unsigned new_size)
void guard_bio_eod(struct bio *bio)
{
sector_t maxsector;
- struct hd_struct *part;
+ struct block_device *part;
rcu_read_lock();
part = __disk_get_part(bio->bi_disk, bio->bi_partno);
if (part)
- maxsector = part_nr_sects_read(part);
- else
+ maxsector = bdev_nr_sectors(part);
+ else
maxsector = get_capacity(bio->bi_disk);
rcu_read_unlock();
@@ -1212,8 +1212,8 @@ void bio_copy_data_iter(struct bio *dst, struct bvec_iter *dst_iter,
flush_dcache_page(dst_bv.bv_page);
- bio_advance_iter(src, src_iter, bytes);
- bio_advance_iter(dst, dst_iter, bytes);
+ bio_advance_iter_single(src, src_iter, bytes);
+ bio_advance_iter_single(dst, dst_iter, bytes);
}
}
EXPORT_SYMBOL(bio_copy_data_iter);
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index 54fbe1e80cc4..031114d454a6 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -556,22 +556,22 @@ static struct blkcg_gq *blkg_lookup_check(struct blkcg *blkcg,
}
/**
- * blkg_conf_prep - parse and prepare for per-blkg config update
+ * blkcg_conf_open_bdev - parse and open bdev for per-blkg config update
* @inputp: input string pointer
*
* Parse the device node prefix part, MAJ:MIN, of per-blkg config update
- * from @input and get and return the matching gendisk. *@inputp is
+ * from @input and get and return the matching bdev. *@inputp is
* updated to point past the device node prefix. Returns an ERR_PTR()
* value on error.
*
* Use this function iff blkg_conf_prep() can't be used for some reason.
*/
-struct gendisk *blkcg_conf_get_disk(char **inputp)
+struct block_device *blkcg_conf_open_bdev(char **inputp)
{
char *input = *inputp;
unsigned int major, minor;
- struct gendisk *disk;
- int key_len, part;
+ struct block_device *bdev;
+ int key_len;
if (sscanf(input, "%u:%u%n", &major, &minor, &key_len) != 2)
return ERR_PTR(-EINVAL);
@@ -581,16 +581,16 @@ struct gendisk *blkcg_conf_get_disk(char **inputp)
return ERR_PTR(-EINVAL);
input = skip_spaces(input);
- disk = get_gendisk(MKDEV(major, minor), &part);
- if (!disk)
+ bdev = blkdev_get_no_open(MKDEV(major, minor));
+ if (!bdev)
return ERR_PTR(-ENODEV);
- if (part) {
- put_disk_and_module(disk);
+ if (bdev_is_partition(bdev)) {
+ blkdev_put_no_open(bdev);
return ERR_PTR(-ENODEV);
}
*inputp = input;
- return disk;
+ return bdev;
}
/**
@@ -607,18 +607,18 @@ struct gendisk *blkcg_conf_get_disk(char **inputp)
*/
int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
char *input, struct blkg_conf_ctx *ctx)
- __acquires(rcu) __acquires(&disk->queue->queue_lock)
+ __acquires(rcu) __acquires(&bdev->bd_disk->queue->queue_lock)
{
- struct gendisk *disk;
+ struct block_device *bdev;
struct request_queue *q;
struct blkcg_gq *blkg;
int ret;
- disk = blkcg_conf_get_disk(&input);
- if (IS_ERR(disk))
- return PTR_ERR(disk);
+ bdev = blkcg_conf_open_bdev(&input);
+ if (IS_ERR(bdev))
+ return PTR_ERR(bdev);
- q = disk->queue;
+ q = bdev->bd_disk->queue;
rcu_read_lock();
spin_lock_irq(&q->queue_lock);
@@ -689,7 +689,7 @@ int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
goto success;
}
success:
- ctx->disk = disk;
+ ctx->bdev = bdev;
ctx->blkg = blkg;
ctx->body = input;
return 0;
@@ -700,7 +700,7 @@ fail_unlock:
spin_unlock_irq(&q->queue_lock);
rcu_read_unlock();
fail:
- put_disk_and_module(disk);
+ blkdev_put_no_open(bdev);
/*
* If queue was bypassing, we should retry. Do so after a
* short msleep(). It isn't strictly necessary but queue
@@ -723,11 +723,11 @@ EXPORT_SYMBOL_GPL(blkg_conf_prep);
* with blkg_conf_prep().
*/
void blkg_conf_finish(struct blkg_conf_ctx *ctx)
- __releases(&ctx->disk->queue->queue_lock) __releases(rcu)
+ __releases(&ctx->bdev->bd_disk->queue->queue_lock) __releases(rcu)
{
- spin_unlock_irq(&ctx->disk->queue->queue_lock);
+ spin_unlock_irq(&ctx->bdev->bd_disk->queue->queue_lock);
rcu_read_unlock();
- put_disk_and_module(ctx->disk);
+ blkdev_put_no_open(ctx->bdev);
}
EXPORT_SYMBOL_GPL(blkg_conf_finish);
@@ -820,9 +820,9 @@ static void blkcg_fill_root_iostats(void)
class_dev_iter_init(&iter, &block_class, NULL, &disk_type);
while ((dev = class_dev_iter_next(&iter))) {
- struct gendisk *disk = dev_to_disk(dev);
- struct hd_struct *part = disk_get_part(disk, 0);
- struct blkcg_gq *blkg = blk_queue_root_blkg(disk->queue);
+ struct block_device *bdev = dev_to_bdev(dev);
+ struct blkcg_gq *blkg =
+ blk_queue_root_blkg(bdev->bd_disk->queue);
struct blkg_iostat tmp;
int cpu;
@@ -830,7 +830,7 @@ static void blkcg_fill_root_iostats(void)
for_each_possible_cpu(cpu) {
struct disk_stats *cpu_dkstats;
- cpu_dkstats = per_cpu_ptr(part->dkstats, cpu);
+ cpu_dkstats = per_cpu_ptr(bdev->bd_stats, cpu);
tmp.ios[BLKG_IOSTAT_READ] +=
cpu_dkstats->ios[STAT_READ];
tmp.ios[BLKG_IOSTAT_WRITE] +=
@@ -849,7 +849,6 @@ static void blkcg_fill_root_iostats(void)
blkg_iostat_set(&blkg->iostat.cur, &tmp);
u64_stats_update_end(&blkg->iostat.sync);
}
- disk_put_part(part);
}
}
diff --git a/block/blk-core.c b/block/blk-core.c
index 2db8bda43b6e..96e5fcd7f071 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -666,9 +666,9 @@ static int __init setup_fail_make_request(char *str)
}
__setup("fail_make_request=", setup_fail_make_request);
-static bool should_fail_request(struct hd_struct *part, unsigned int bytes)
+static bool should_fail_request(struct block_device *part, unsigned int bytes)
{
- return part->make_it_fail && should_fail(&fail_make_request, bytes);
+ return part->bd_make_it_fail && should_fail(&fail_make_request, bytes);
}
static int __init fail_make_request_debugfs(void)
@@ -683,7 +683,7 @@ late_initcall(fail_make_request_debugfs);
#else /* CONFIG_FAIL_MAKE_REQUEST */
-static inline bool should_fail_request(struct hd_struct *part,
+static inline bool should_fail_request(struct block_device *part,
unsigned int bytes)
{
return false;
@@ -691,11 +691,11 @@ static inline bool should_fail_request(struct hd_struct *part,
#endif /* CONFIG_FAIL_MAKE_REQUEST */
-static inline bool bio_check_ro(struct bio *bio, struct hd_struct *part)
+static inline bool bio_check_ro(struct bio *bio, struct block_device *part)
{
const int op = bio_op(bio);
- if (part->policy && op_is_write(op)) {
+ if (part->bd_read_only && op_is_write(op)) {
char b[BDEVNAME_SIZE];
if (op_is_flush(bio->bi_opf) && !bio_sectors(bio))
@@ -703,7 +703,7 @@ static inline bool bio_check_ro(struct bio *bio, struct hd_struct *part)
WARN_ONCE(1,
"Trying to write to read-only block-device %s (partno %d)\n",
- bio_devname(bio, b), part->partno);
+ bio_devname(bio, b), part->bd_partno);
/* Older lvm-tools actually trigger this */
return false;
}
@@ -713,7 +713,7 @@ static inline bool bio_check_ro(struct bio *bio, struct hd_struct *part)
static noinline int should_fail_bio(struct bio *bio)
{
- if (should_fail_request(&bio->bi_disk->part0, bio->bi_iter.bi_size))
+ if (should_fail_request(bio->bi_disk->part0, bio->bi_iter.bi_size))
return -EIO;
return 0;
}
@@ -742,7 +742,7 @@ static inline int bio_check_eod(struct bio *bio, sector_t maxsector)
*/
static inline int blk_partition_remap(struct bio *bio)
{
- struct hd_struct *p;
+ struct block_device *p;
int ret = -EIO;
rcu_read_lock();
@@ -755,11 +755,12 @@ static inline int blk_partition_remap(struct bio *bio)
goto out;
if (bio_sectors(bio)) {
- if (bio_check_eod(bio, part_nr_sects_read(p)))
+ if (bio_check_eod(bio, bdev_nr_sectors(p)))
goto out;
- bio->bi_iter.bi_sector += p->start_sect;
- trace_block_bio_remap(bio->bi_disk->queue, bio, part_devt(p),
- bio->bi_iter.bi_sector - p->start_sect);
+ bio->bi_iter.bi_sector += p->bd_start_sect;
+ trace_block_bio_remap(bio, p->bd_dev,
+ bio->bi_iter.bi_sector -
+ p->bd_start_sect);
}
bio->bi_partno = 0;
ret = 0;
@@ -829,7 +830,7 @@ static noinline_for_stack bool submit_bio_checks(struct bio *bio)
if (unlikely(blk_partition_remap(bio)))
goto end_io;
} else {
- if (unlikely(bio_check_ro(bio, &bio->bi_disk->part0)))
+ if (unlikely(bio_check_ro(bio, bio->bi_disk->part0)))
goto end_io;
if (unlikely(bio_check_eod(bio, get_capacity(bio->bi_disk))))
goto end_io;
@@ -906,7 +907,7 @@ static noinline_for_stack bool submit_bio_checks(struct bio *bio)
blkcg_bio_issue_init(bio);
if (!bio_flagged(bio, BIO_TRACE_COMPLETION)) {
- trace_block_bio_queue(q, bio);
+ trace_block_bio_queue(bio);
/* Now that enqueuing has been traced, we need to trace
* completion as well.
*/
@@ -1201,7 +1202,7 @@ blk_status_t blk_insert_cloned_request(struct request_queue *q, struct request *
return ret;
if (rq->rq_disk &&
- should_fail_request(&rq->rq_disk->part0, blk_rq_bytes(rq)))
+ should_fail_request(rq->rq_disk->part0, blk_rq_bytes(rq)))
return BLK_STS_IOERR;
if (blk_crypto_insert_cloned_request(rq))
@@ -1260,17 +1261,18 @@ unsigned int blk_rq_err_bytes(const struct request *rq)
}
EXPORT_SYMBOL_GPL(blk_rq_err_bytes);
-static void update_io_ticks(struct hd_struct *part, unsigned long now, bool end)
+static void update_io_ticks(struct block_device *part, unsigned long now,
+ bool end)
{
unsigned long stamp;
again:
- stamp = READ_ONCE(part->stamp);
+ stamp = READ_ONCE(part->bd_stamp);
if (unlikely(stamp != now)) {
- if (likely(cmpxchg(&part->stamp, stamp, now) == stamp))
+ if (likely(cmpxchg(&part->bd_stamp, stamp, now) == stamp))
__part_stat_add(part, io_ticks, end ? now - stamp : 1);
}
- if (part->partno) {
- part = &part_to_disk(part)->part0;
+ if (part->bd_partno) {
+ part = bdev_whole(part);
goto again;
}
}
@@ -1279,11 +1281,9 @@ static void blk_account_io_completion(struct request *req, unsigned int bytes)
{
if (req->part && blk_do_io_stat(req)) {
const int sgrp = op_stat_group(req_op(req));
- struct hd_struct *part;
part_stat_lock();
- part = req->part;
- part_stat_add(part, sectors[sgrp], bytes >> 9);
+ part_stat_add(req->part, sectors[sgrp], bytes >> 9);
part_stat_unlock();
}
}
@@ -1298,17 +1298,12 @@ void blk_account_io_done(struct request *req, u64 now)
if (req->part && blk_do_io_stat(req) &&
!(req->rq_flags & RQF_FLUSH_SEQ)) {
const int sgrp = op_stat_group(req_op(req));
- struct hd_struct *part;
part_stat_lock();
- part = req->part;
-
- update_io_ticks(part, jiffies, true);
- part_stat_inc(part, ios[sgrp]);
- part_stat_add(part, nsecs[sgrp], now - req->start_time_ns);
+ update_io_ticks(req->part, jiffies, true);
+ part_stat_inc(req->part, ios[sgrp]);
+ part_stat_add(req->part, nsecs[sgrp], now - req->start_time_ns);
part_stat_unlock();
-
- hd_struct_put(part);
}
}
@@ -1324,7 +1319,7 @@ void blk_account_io_start(struct request *rq)
part_stat_unlock();
}
-static unsigned long __part_start_io_acct(struct hd_struct *part,
+static unsigned long __part_start_io_acct(struct block_device *part,
unsigned int sectors, unsigned int op)
{
const int sgrp = op_stat_group(op);
@@ -1340,7 +1335,7 @@ static unsigned long __part_start_io_acct(struct hd_struct *part,
return now;
}
-unsigned long part_start_io_acct(struct gendisk *disk, struct hd_struct **part,
+unsigned long part_start_io_acct(struct gendisk *disk, struct block_device **part,
struct bio *bio)
{
*part = disk_map_sector_rcu(disk, bio->bi_iter.bi_sector);
@@ -1352,11 +1347,11 @@ EXPORT_SYMBOL_GPL(part_start_io_acct);
unsigned long disk_start_io_acct(struct gendisk *disk, unsigned int sectors,
unsigned int op)
{
- return __part_start_io_acct(&disk->part0, sectors, op);
+ return __part_start_io_acct(disk->part0, sectors, op);
}
EXPORT_SYMBOL(disk_start_io_acct);
-static void __part_end_io_acct(struct hd_struct *part, unsigned int op,
+static void __part_end_io_acct(struct block_device *part, unsigned int op,
unsigned long start_time)
{
const int sgrp = op_stat_group(op);
@@ -1370,18 +1365,17 @@ static void __part_end_io_acct(struct hd_struct *part, unsigned int op,
part_stat_unlock();
}
-void part_end_io_acct(struct hd_struct *part, struct bio *bio,
+void part_end_io_acct(struct block_device *part, struct bio *bio,
unsigned long start_time)
{
__part_end_io_acct(part, bio_op(bio), start_time);
- hd_struct_put(part);
}
EXPORT_SYMBOL_GPL(part_end_io_acct);
void disk_end_io_acct(struct gendisk *disk, unsigned int op,
unsigned long start_time)
{
- __part_end_io_acct(&disk->part0, op, start_time);
+ __part_end_io_acct(disk->part0, op, start_time);
}
EXPORT_SYMBOL(disk_end_io_acct);
diff --git a/block/blk-flush.c b/block/blk-flush.c
index fd5cee9f1a3b..76c1624cb06c 100644
--- a/block/blk-flush.c
+++ b/block/blk-flush.c
@@ -69,7 +69,6 @@
#include <linux/blkdev.h>
#include <linux/gfp.h>
#include <linux/blk-mq.h>
-#include <linux/lockdep.h>
#include "blk.h"
#include "blk-mq.h"
@@ -139,7 +138,7 @@ static void blk_flush_queue_rq(struct request *rq, bool add_front)
static void blk_account_io_flush(struct request *rq)
{
- struct hd_struct *part = &rq->rq_disk->part0;
+ struct block_device *part = rq->rq_disk->part0;
part_stat_lock();
part_stat_inc(part, ios[STAT_FLUSH]);
@@ -474,9 +473,6 @@ struct blk_flush_queue *blk_alloc_flush_queue(int node, int cmd_size,
INIT_LIST_HEAD(&fq->flush_queue[1]);
INIT_LIST_HEAD(&fq->flush_data_in_flight);
- lockdep_register_key(&fq->key);
- lockdep_set_class(&fq->mq_flush_lock, &fq->key);
-
return fq;
fail_rq:
@@ -491,7 +487,31 @@ void blk_free_flush_queue(struct blk_flush_queue *fq)
if (!fq)
return;
- lockdep_unregister_key(&fq->key);
kfree(fq->flush_rq);
kfree(fq);
}
+
+/*
+ * Allow driver to set its own lock class to fq->mq_flush_lock for
+ * avoiding lockdep complaint.
+ *
+ * flush_end_io() may be called recursively from some driver, such as
+ * nvme-loop, so lockdep may complain 'possible recursive locking' because
+ * all 'struct blk_flush_queue' instance share same mq_flush_lock lock class
+ * key. We need to assign different lock class for these driver's
+ * fq->mq_flush_lock for avoiding the lockdep warning.
+ *
+ * Use dynamically allocated lock class key for each 'blk_flush_queue'
+ * instance is over-kill, and more worse it introduces horrible boot delay
+ * issue because synchronize_rcu() is implied in lockdep_unregister_key which
+ * is called for each hctx release. SCSI probing may synchronously create and
+ * destroy lots of MQ request_queues for non-existent devices, and some robot
+ * test kernel always enable lockdep option. It is observed that more than half
+ * an hour is taken during SCSI MQ probe with per-fq lock class.
+ */
+void blk_mq_hctx_set_fq_lock_class(struct blk_mq_hw_ctx *hctx,
+ struct lock_class_key *key)
+{
+ lockdep_set_class(&hctx->fq->mq_flush_lock, key);
+}
+EXPORT_SYMBOL_GPL(blk_mq_hctx_set_fq_lock_class);
diff --git a/block/blk-iocost.c b/block/blk-iocost.c
index bbe86d1199dc..ffa418c0dcb1 100644
--- a/block/blk-iocost.c
+++ b/block/blk-iocost.c
@@ -39,7 +39,7 @@
* On top of that, a size cost proportional to the length of the IO is
* added. While simple, this model captures the operational
* characteristics of a wide varienty of devices well enough. Default
- * paramters for several different classes of devices are provided and the
+ * parameters for several different classes of devices are provided and the
* parameters can be configured from userspace via
* /sys/fs/cgroup/io.cost.model.
*
@@ -77,7 +77,7 @@
*
* This constitutes the basis of IO capacity distribution. Each cgroup's
* vtime is running at a rate determined by its hweight. A cgroup tracks
- * the vtime consumed by past IOs and can issue a new IO iff doing so
+ * the vtime consumed by past IOs and can issue a new IO if doing so
* wouldn't outrun the current device vtime. Otherwise, the IO is
* suspended until the vtime has progressed enough to cover it.
*
@@ -155,7 +155,7 @@
* Instead of debugfs or other clumsy monitoring mechanisms, this
* controller uses a drgn based monitoring script -
* tools/cgroup/iocost_monitor.py. For details on drgn, please see
- * https://github.com/osandov/drgn. The ouput looks like the following.
+ * https://github.com/osandov/drgn. The output looks like the following.
*
* sdb RUN per=300ms cur_per=234.218:v203.695 busy= +1 vrate= 62.12%
* active weight hweight% inflt% dbt delay usages%
@@ -370,8 +370,6 @@ enum {
AUTOP_SSD_FAST,
};
-struct ioc_gq;
-
struct ioc_params {
u32 qos[NR_QOS_PARAMS];
u64 i_lcoefs[NR_I_LCOEFS];
@@ -492,7 +490,7 @@ struct ioc_gq {
/*
* `vtime` is this iocg's vtime cursor which progresses as IOs are
* issued. If lagging behind device vtime, the delta represents
- * the currently available IO budget. If runnning ahead, the
+ * the currently available IO budget. If running ahead, the
* overage.
*
* `vtime_done` is the same but progressed on completion rather
@@ -973,6 +971,58 @@ done:
ioc->vtime_err = clamp(ioc->vtime_err, -vperiod, vperiod);
}
+static void ioc_adjust_base_vrate(struct ioc *ioc, u32 rq_wait_pct,
+ int nr_lagging, int nr_shortages,
+ int prev_busy_level, u32 *missed_ppm)
+{
+ u64 vrate = ioc->vtime_base_rate;
+ u64 vrate_min = ioc->vrate_min, vrate_max = ioc->vrate_max;
+
+ if (!ioc->busy_level || (ioc->busy_level < 0 && nr_lagging)) {
+ if (ioc->busy_level != prev_busy_level || nr_lagging)
+ trace_iocost_ioc_vrate_adj(ioc, atomic64_read(&ioc->vtime_rate),
+ missed_ppm, rq_wait_pct,
+ nr_lagging, nr_shortages);
+
+ return;
+ }
+
+ /* rq_wait signal is always reliable, ignore user vrate_min */
+ if (rq_wait_pct > RQ_WAIT_BUSY_PCT)
+ vrate_min = VRATE_MIN;
+
+ /*
+ * If vrate is out of bounds, apply clamp gradually as the
+ * bounds can change abruptly. Otherwise, apply busy_level
+ * based adjustment.
+ */
+ if (vrate < vrate_min) {
+ vrate = div64_u64(vrate * (100 + VRATE_CLAMP_ADJ_PCT), 100);
+ vrate = min(vrate, vrate_min);
+ } else if (vrate > vrate_max) {
+ vrate = div64_u64(vrate * (100 - VRATE_CLAMP_ADJ_PCT), 100);
+ vrate = max(vrate, vrate_max);
+ } else {
+ int idx = min_t(int, abs(ioc->busy_level),
+ ARRAY_SIZE(vrate_adj_pct) - 1);
+ u32 adj_pct = vrate_adj_pct[idx];
+
+ if (ioc->busy_level > 0)
+ adj_pct = 100 - adj_pct;
+ else
+ adj_pct = 100 + adj_pct;
+
+ vrate = clamp(DIV64_U64_ROUND_UP(vrate * adj_pct, 100),
+ vrate_min, vrate_max);
+ }
+
+ trace_iocost_ioc_vrate_adj(ioc, vrate, missed_ppm, rq_wait_pct,
+ nr_lagging, nr_shortages);
+
+ ioc->vtime_base_rate = vrate;
+ ioc_refresh_margins(ioc);
+}
+
/* take a snapshot of the current [v]time and vrate */
static void ioc_now(struct ioc *ioc, struct ioc_now *now)
{
@@ -1046,7 +1096,7 @@ static void __propagate_weights(struct ioc_gq *iocg, u32 active, u32 inuse,
/*
* The delta between inuse and active sums indicates that
- * that much of weight is being given away. Parent's inuse
+ * much of weight is being given away. Parent's inuse
* and active should reflect the ratio.
*/
if (parent->child_active_sum) {
@@ -2071,40 +2121,21 @@ static void ioc_forgive_debts(struct ioc *ioc, u64 usage_us_sum, int nr_debtors,
}
}
-static void ioc_timer_fn(struct timer_list *timer)
+/*
+ * Check the active iocgs' state to avoid oversleeping and deactive
+ * idle iocgs.
+ *
+ * Since waiters determine the sleep durations based on the vrate
+ * they saw at the time of sleep, if vrate has increased, some
+ * waiters could be sleeping for too long. Wake up tardy waiters
+ * which should have woken up in the last period and expire idle
+ * iocgs.
+ */
+static int ioc_check_iocgs(struct ioc *ioc, struct ioc_now *now)
{
- struct ioc *ioc = container_of(timer, struct ioc, timer);
+ int nr_debtors = 0;
struct ioc_gq *iocg, *tiocg;
- struct ioc_now now;
- LIST_HEAD(surpluses);
- int nr_debtors = 0, nr_shortages = 0, nr_lagging = 0;
- u64 usage_us_sum = 0;
- u32 ppm_rthr = MILLION - ioc->params.qos[QOS_RPPM];
- u32 ppm_wthr = MILLION - ioc->params.qos[QOS_WPPM];
- u32 missed_ppm[2], rq_wait_pct;
- u64 period_vtime;
- int prev_busy_level;
-
- /* how were the latencies during the period? */
- ioc_lat_stat(ioc, missed_ppm, &rq_wait_pct);
-
- /* take care of active iocgs */
- spin_lock_irq(&ioc->lock);
-
- ioc_now(ioc, &now);
- period_vtime = now.vnow - ioc->period_at_vtime;
- if (WARN_ON_ONCE(!period_vtime)) {
- spin_unlock_irq(&ioc->lock);
- return;
- }
-
- /*
- * Waiters determine the sleep durations based on the vrate they
- * saw at the time of sleep. If vrate has increased, some waiters
- * could be sleeping for too long. Wake up tardy waiters which
- * should have woken up in the last period and expire idle iocgs.
- */
list_for_each_entry_safe(iocg, tiocg, &ioc->active_iocgs, active_list) {
if (!waitqueue_active(&iocg->waitq) && !iocg->abs_vdebt &&
!iocg->delay && !iocg_is_idle(iocg))
@@ -2114,24 +2145,24 @@ static void ioc_timer_fn(struct timer_list *timer)
/* flush wait and indebt stat deltas */
if (iocg->wait_since) {
- iocg->local_stat.wait_us += now.now - iocg->wait_since;
- iocg->wait_since = now.now;
+ iocg->local_stat.wait_us += now->now - iocg->wait_since;
+ iocg->wait_since = now->now;
}
if (iocg->indebt_since) {
iocg->local_stat.indebt_us +=
- now.now - iocg->indebt_since;
- iocg->indebt_since = now.now;
+ now->now - iocg->indebt_since;
+ iocg->indebt_since = now->now;
}
if (iocg->indelay_since) {
iocg->local_stat.indelay_us +=
- now.now - iocg->indelay_since;
- iocg->indelay_since = now.now;
+ now->now - iocg->indelay_since;
+ iocg->indelay_since = now->now;
}
if (waitqueue_active(&iocg->waitq) || iocg->abs_vdebt ||
iocg->delay) {
/* might be oversleeping vtime / hweight changes, kick */
- iocg_kick_waitq(iocg, true, &now);
+ iocg_kick_waitq(iocg, true, now);
if (iocg->abs_vdebt || iocg->delay)
nr_debtors++;
} else if (iocg_is_idle(iocg)) {
@@ -2145,7 +2176,7 @@ static void ioc_timer_fn(struct timer_list *timer)
* error and throw away. On reactivation, it'll start
* with the target budget.
*/
- excess = now.vnow - vtime - ioc->margins.target;
+ excess = now->vnow - vtime - ioc->margins.target;
if (excess > 0) {
u32 old_hwi;
@@ -2154,13 +2185,46 @@ static void ioc_timer_fn(struct timer_list *timer)
WEIGHT_ONE);
}
- __propagate_weights(iocg, 0, 0, false, &now);
+ __propagate_weights(iocg, 0, 0, false, now);
list_del_init(&iocg->active_list);
}
spin_unlock(&iocg->waitq.lock);
}
+
commit_weights(ioc);
+ return nr_debtors;
+}
+
+static void ioc_timer_fn(struct timer_list *timer)
+{
+ struct ioc *ioc = container_of(timer, struct ioc, timer);
+ struct ioc_gq *iocg, *tiocg;
+ struct ioc_now now;
+ LIST_HEAD(surpluses);
+ int nr_debtors, nr_shortages = 0, nr_lagging = 0;
+ u64 usage_us_sum = 0;
+ u32 ppm_rthr = MILLION - ioc->params.qos[QOS_RPPM];
+ u32 ppm_wthr = MILLION - ioc->params.qos[QOS_WPPM];
+ u32 missed_ppm[2], rq_wait_pct;
+ u64 period_vtime;
+ int prev_busy_level;
+
+ /* how were the latencies during the period? */
+ ioc_lat_stat(ioc, missed_ppm, &rq_wait_pct);
+
+ /* take care of active iocgs */
+ spin_lock_irq(&ioc->lock);
+
+ ioc_now(ioc, &now);
+
+ period_vtime = now.vnow - ioc->period_at_vtime;
+ if (WARN_ON_ONCE(!period_vtime)) {
+ spin_unlock_irq(&ioc->lock);
+ return;
+ }
+
+ nr_debtors = ioc_check_iocgs(ioc, &now);
/*
* Wait and indebt stat are flushed above and the donation calculation
@@ -2170,8 +2234,8 @@ static void ioc_timer_fn(struct timer_list *timer)
/* calc usage and see whether some weights need to be moved around */
list_for_each_entry(iocg, &ioc->active_iocgs, active_list) {
- u64 vdone, vtime, usage_us, usage_dur;
- u32 usage, hw_active, hw_inuse;
+ u64 vdone, vtime, usage_us;
+ u32 hw_active, hw_inuse;
/*
* Collect unused and wind vtime closer to vnow to prevent
@@ -2202,30 +2266,32 @@ static void ioc_timer_fn(struct timer_list *timer)
usage_us = iocg->usage_delta_us;
usage_us_sum += usage_us;
- if (vdone != vtime) {
- u64 inflight_us = DIV64_U64_ROUND_UP(
- cost_to_abs_cost(vtime - vdone, hw_inuse),
- ioc->vtime_base_rate);
- usage_us = max(usage_us, inflight_us);
- }
-
- /* convert to hweight based usage ratio */
- if (time_after64(iocg->activated_at, ioc->period_at))
- usage_dur = max_t(u64, now.now - iocg->activated_at, 1);
- else
- usage_dur = max_t(u64, now.now - ioc->period_at, 1);
-
- usage = clamp_t(u32,
- DIV64_U64_ROUND_UP(usage_us * WEIGHT_ONE,
- usage_dur),
- 1, WEIGHT_ONE);
-
/* see whether there's surplus vtime */
WARN_ON_ONCE(!list_empty(&iocg->surplus_list));
if (hw_inuse < hw_active ||
(!waitqueue_active(&iocg->waitq) &&
time_before64(vtime, now.vnow - ioc->margins.low))) {
- u32 hwa, old_hwi, hwm, new_hwi;
+ u32 hwa, old_hwi, hwm, new_hwi, usage;
+ u64 usage_dur;
+
+ if (vdone != vtime) {
+ u64 inflight_us = DIV64_U64_ROUND_UP(
+ cost_to_abs_cost(vtime - vdone, hw_inuse),
+ ioc->vtime_base_rate);
+
+ usage_us = max(usage_us, inflight_us);
+ }
+
+ /* convert to hweight based usage ratio */
+ if (time_after64(iocg->activated_at, ioc->period_at))
+ usage_dur = max_t(u64, now.now - iocg->activated_at, 1);
+ else
+ usage_dur = max_t(u64, now.now - ioc->period_at, 1);
+
+ usage = clamp_t(u32,
+ DIV64_U64_ROUND_UP(usage_us * WEIGHT_ONE,
+ usage_dur),
+ 1, WEIGHT_ONE);
/*
* Already donating or accumulated enough to start.
@@ -2309,51 +2375,8 @@ static void ioc_timer_fn(struct timer_list *timer)
ioc->busy_level = clamp(ioc->busy_level, -1000, 1000);
- if (ioc->busy_level > 0 || (ioc->busy_level < 0 && !nr_lagging)) {
- u64 vrate = ioc->vtime_base_rate;
- u64 vrate_min = ioc->vrate_min, vrate_max = ioc->vrate_max;
-
- /* rq_wait signal is always reliable, ignore user vrate_min */
- if (rq_wait_pct > RQ_WAIT_BUSY_PCT)
- vrate_min = VRATE_MIN;
-
- /*
- * If vrate is out of bounds, apply clamp gradually as the
- * bounds can change abruptly. Otherwise, apply busy_level
- * based adjustment.
- */
- if (vrate < vrate_min) {
- vrate = div64_u64(vrate * (100 + VRATE_CLAMP_ADJ_PCT),
- 100);
- vrate = min(vrate, vrate_min);
- } else if (vrate > vrate_max) {
- vrate = div64_u64(vrate * (100 - VRATE_CLAMP_ADJ_PCT),
- 100);
- vrate = max(vrate, vrate_max);
- } else {
- int idx = min_t(int, abs(ioc->busy_level),
- ARRAY_SIZE(vrate_adj_pct) - 1);
- u32 adj_pct = vrate_adj_pct[idx];
-
- if (ioc->busy_level > 0)
- adj_pct = 100 - adj_pct;
- else
- adj_pct = 100 + adj_pct;
-
- vrate = clamp(DIV64_U64_ROUND_UP(vrate * adj_pct, 100),
- vrate_min, vrate_max);
- }
-
- trace_iocost_ioc_vrate_adj(ioc, vrate, missed_ppm, rq_wait_pct,
- nr_lagging, nr_shortages);
-
- ioc->vtime_base_rate = vrate;
- ioc_refresh_margins(ioc);
- } else if (ioc->busy_level != prev_busy_level || nr_lagging) {
- trace_iocost_ioc_vrate_adj(ioc, atomic64_read(&ioc->vtime_rate),
- missed_ppm, rq_wait_pct, nr_lagging,
- nr_shortages);
- }
+ ioc_adjust_base_vrate(ioc, rq_wait_pct, nr_lagging, nr_shortages,
+ prev_busy_level, missed_ppm);
ioc_refresh_params(ioc, false);
@@ -2400,7 +2423,7 @@ static u64 adjust_inuse_and_calc_cost(struct ioc_gq *iocg, u64 vtime,
return cost;
/*
- * We only increase inuse during period and do so iff the margin has
+ * We only increase inuse during period and do so if the margin has
* deteriorated since the previous adjustment.
*/
if (margin >= iocg->saved_margin || margin >= margins->low ||
@@ -3120,23 +3143,23 @@ static const match_table_t qos_tokens = {
static ssize_t ioc_qos_write(struct kernfs_open_file *of, char *input,
size_t nbytes, loff_t off)
{
- struct gendisk *disk;
+ struct block_device *bdev;
struct ioc *ioc;
u32 qos[NR_QOS_PARAMS];
bool enable, user;
char *p;
int ret;
- disk = blkcg_conf_get_disk(&input);
- if (IS_ERR(disk))
- return PTR_ERR(disk);
+ bdev = blkcg_conf_open_bdev(&input);
+ if (IS_ERR(bdev))
+ return PTR_ERR(bdev);
- ioc = q_to_ioc(disk->queue);
+ ioc = q_to_ioc(bdev->bd_disk->queue);
if (!ioc) {
- ret = blk_iocost_init(disk->queue);
+ ret = blk_iocost_init(bdev->bd_disk->queue);
if (ret)
goto err;
- ioc = q_to_ioc(disk->queue);
+ ioc = q_to_ioc(bdev->bd_disk->queue);
}
spin_lock_irq(&ioc->lock);
@@ -3231,12 +3254,12 @@ static ssize_t ioc_qos_write(struct kernfs_open_file *of, char *input,
ioc_refresh_params(ioc, true);
spin_unlock_irq(&ioc->lock);
- put_disk_and_module(disk);
+ blkdev_put_no_open(bdev);
return nbytes;
einval:
ret = -EINVAL;
err:
- put_disk_and_module(disk);
+ blkdev_put_no_open(bdev);
return ret;
}
@@ -3287,23 +3310,23 @@ static const match_table_t i_lcoef_tokens = {
static ssize_t ioc_cost_model_write(struct kernfs_open_file *of, char *input,
size_t nbytes, loff_t off)
{
- struct gendisk *disk;
+ struct block_device *bdev;
struct ioc *ioc;
u64 u[NR_I_LCOEFS];
bool user;
char *p;
int ret;
- disk = blkcg_conf_get_disk(&input);
- if (IS_ERR(disk))
- return PTR_ERR(disk);
+ bdev = blkcg_conf_open_bdev(&input);
+ if (IS_ERR(bdev))
+ return PTR_ERR(bdev);
- ioc = q_to_ioc(disk->queue);
+ ioc = q_to_ioc(bdev->bd_disk->queue);
if (!ioc) {
- ret = blk_iocost_init(disk->queue);
+ ret = blk_iocost_init(bdev->bd_disk->queue);
if (ret)
goto err;
- ioc = q_to_ioc(disk->queue);
+ ioc = q_to_ioc(bdev->bd_disk->queue);
}
spin_lock_irq(&ioc->lock);
@@ -3356,13 +3379,13 @@ static ssize_t ioc_cost_model_write(struct kernfs_open_file *of, char *input,
ioc_refresh_params(ioc, true);
spin_unlock_irq(&ioc->lock);
- put_disk_and_module(disk);
+ blkdev_put_no_open(bdev);
return nbytes;
einval:
ret = -EINVAL;
err:
- put_disk_and_module(disk);
+ blkdev_put_no_open(bdev);
return ret;
}
diff --git a/block/blk-lib.c b/block/blk-lib.c
index e90614fd8d6a..752f9c722062 100644
--- a/block/blk-lib.c
+++ b/block/blk-lib.c
@@ -65,7 +65,7 @@ int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
/* In case the discard request is in a partition */
if (bdev_is_partition(bdev))
- part_offset = bdev->bd_part->start_sect;
+ part_offset = bdev->bd_start_sect;
while (nr_sects) {
sector_t granularity_aligned_lba, req_sects;
diff --git a/block/blk-merge.c b/block/blk-merge.c
index 97b7c2821565..808768f6b174 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -279,6 +279,14 @@ static struct bio *blk_bio_segment_split(struct request_queue *q,
return NULL;
split:
*segs = nsegs;
+
+ /*
+ * Bio splitting may cause subtle trouble such as hang when doing sync
+ * iopoll in direct IO routine. Given performance gain of iopoll for
+ * big IO can be trival, disable iopoll when split needed.
+ */
+ bio->bi_opf &= ~REQ_HIPRI;
+
return bio_split(bio, sectors, GFP_NOIO, bs);
}
@@ -338,7 +346,7 @@ void __blk_queue_split(struct bio **bio, unsigned int *nr_segs)
split->bi_opf |= REQ_NOMERGE;
bio_chain(split, *bio);
- trace_block_split(q, split, (*bio)->bi_iter.bi_sector);
+ trace_block_split(split, (*bio)->bi_iter.bi_sector);
submit_bio_noacct(*bio);
*bio = split;
}
@@ -683,8 +691,6 @@ static void blk_account_io_merge_request(struct request *req)
part_stat_lock();
part_stat_inc(req->part, merges[op_stat_group(req_op(req))]);
part_stat_unlock();
-
- hd_struct_put(req->part);
}
}
@@ -801,7 +807,7 @@ static struct request *attempt_merge(struct request_queue *q,
*/
blk_account_io_merge_request(next);
- trace_block_rq_merge(q, next);
+ trace_block_rq_merge(next);
/*
* ownership of bio passed from next to req, return 'next' for
@@ -924,7 +930,7 @@ static enum bio_merge_status bio_attempt_back_merge(struct request *req,
if (!ll_back_merge_fn(req, bio, nr_segs))
return BIO_MERGE_FAILED;
- trace_block_bio_backmerge(req->q, req, bio);
+ trace_block_bio_backmerge(bio);
rq_qos_merge(req->q, req, bio);
if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff)
@@ -948,7 +954,7 @@ static enum bio_merge_status bio_attempt_front_merge(struct request *req,
if (!ll_front_merge_fn(req, bio, nr_segs))
return BIO_MERGE_FAILED;
- trace_block_bio_frontmerge(req->q, req, bio);
+ trace_block_bio_frontmerge(bio);
rq_qos_merge(req->q, req, bio);
if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff)
diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c
index d1eafe2c045c..deff4e826e23 100644
--- a/block/blk-mq-sched.c
+++ b/block/blk-mq-sched.c
@@ -386,7 +386,7 @@ EXPORT_SYMBOL_GPL(blk_mq_sched_try_insert_merge);
void blk_mq_sched_request_inserted(struct request *rq)
{
- trace_block_rq_insert(rq->q, rq);
+ trace_block_rq_insert(rq);
}
EXPORT_SYMBOL_GPL(blk_mq_sched_request_inserted);
diff --git a/block/blk-mq.c b/block/blk-mq.c
index d35b3c0c876a..14a44699e9b6 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -95,7 +95,7 @@ static void blk_mq_hctx_clear_pending(struct blk_mq_hw_ctx *hctx,
}
struct mq_inflight {
- struct hd_struct *part;
+ struct block_device *part;
unsigned int inflight[2];
};
@@ -105,13 +105,15 @@ static bool blk_mq_check_inflight(struct blk_mq_hw_ctx *hctx,
{
struct mq_inflight *mi = priv;
- if (rq->part == mi->part && blk_mq_rq_state(rq) == MQ_RQ_IN_FLIGHT)
+ if ((!mi->part->bd_partno || rq->part == mi->part) &&
+ blk_mq_rq_state(rq) == MQ_RQ_IN_FLIGHT)
mi->inflight[rq_data_dir(rq)]++;
return true;
}
-unsigned int blk_mq_in_flight(struct request_queue *q, struct hd_struct *part)
+unsigned int blk_mq_in_flight(struct request_queue *q,
+ struct block_device *part)
{
struct mq_inflight mi = { .part = part };
@@ -120,8 +122,8 @@ unsigned int blk_mq_in_flight(struct request_queue *q, struct hd_struct *part)
return mi.inflight[0] + mi.inflight[1];
}
-void blk_mq_in_flight_rw(struct request_queue *q, struct hd_struct *part,
- unsigned int inflight[2])
+void blk_mq_in_flight_rw(struct request_queue *q, struct block_device *part,
+ unsigned int inflight[2])
{
struct mq_inflight mi = { .part = part };
@@ -729,7 +731,7 @@ void blk_mq_start_request(struct request *rq)
{
struct request_queue *q = rq->q;
- trace_block_rq_issue(q, rq);
+ trace_block_rq_issue(rq);
if (test_bit(QUEUE_FLAG_STATS, &q->queue_flags)) {
rq->io_start_time_ns = ktime_get_ns();
@@ -756,7 +758,7 @@ static void __blk_mq_requeue_request(struct request *rq)
blk_mq_put_driver_tag(rq);
- trace_block_rq_requeue(q, rq);
+ trace_block_rq_requeue(rq);
rq_qos_requeue(q, rq);
if (blk_mq_request_started(rq)) {
@@ -1590,7 +1592,7 @@ select_cpu:
* __blk_mq_delay_run_hw_queue - Run (or schedule to run) a hardware queue.
* @hctx: Pointer to the hardware queue to run.
* @async: If we want to run the queue asynchronously.
- * @msecs: Microseconds of delay to wait before running the queue.
+ * @msecs: Milliseconds of delay to wait before running the queue.
*
* If !@async, try to run the queue now. Else, run the queue asynchronously and
* with a delay of @msecs.
@@ -1619,7 +1621,7 @@ static void __blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async,
/**
* blk_mq_delay_run_hw_queue - Run a hardware queue asynchronously.
* @hctx: Pointer to the hardware queue to run.
- * @msecs: Microseconds of delay to wait before running the queue.
+ * @msecs: Milliseconds of delay to wait before running the queue.
*
* Run a hardware queue asynchronously with a delay of @msecs.
*/
@@ -1683,7 +1685,7 @@ EXPORT_SYMBOL(blk_mq_run_hw_queues);
/**
* blk_mq_delay_run_hw_queues - Run all hardware queues asynchronously.
* @q: Pointer to the request queue to run.
- * @msecs: Microseconds of delay to wait before running the queues.
+ * @msecs: Milliseconds of delay to wait before running the queues.
*/
void blk_mq_delay_run_hw_queues(struct request_queue *q, unsigned long msecs)
{
@@ -1817,7 +1819,7 @@ static inline void __blk_mq_insert_req_list(struct blk_mq_hw_ctx *hctx,
lockdep_assert_held(&ctx->lock);
- trace_block_rq_insert(hctx->queue, rq);
+ trace_block_rq_insert(rq);
if (at_head)
list_add(&rq->queuelist, &ctx->rq_lists[type]);
@@ -1874,7 +1876,7 @@ void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
*/
list_for_each_entry(rq, list, queuelist) {
BUG_ON(rq->mq_ctx != ctx);
- trace_block_rq_insert(hctx->queue, rq);
+ trace_block_rq_insert(rq);
}
spin_lock(&ctx->lock);
@@ -2155,6 +2157,7 @@ blk_qc_t blk_mq_submit_bio(struct bio *bio)
unsigned int nr_segs;
blk_qc_t cookie;
blk_status_t ret;
+ bool hipri;
blk_queue_bounce(q, &bio);
__blk_queue_split(&bio, &nr_segs);
@@ -2171,6 +2174,8 @@ blk_qc_t blk_mq_submit_bio(struct bio *bio)
rq_qos_throttle(q, bio);
+ hipri = bio->bi_opf & REQ_HIPRI;
+
data.cmd_flags = bio->bi_opf;
rq = __blk_mq_alloc_request(&data);
if (unlikely(!rq)) {
@@ -2180,7 +2185,7 @@ blk_qc_t blk_mq_submit_bio(struct bio *bio)
goto queue_exit;
}
- trace_block_getrq(q, bio, bio->bi_opf);
+ trace_block_getrq(bio);
rq_qos_track(q, rq, bio);
@@ -2263,6 +2268,8 @@ blk_qc_t blk_mq_submit_bio(struct bio *bio)
blk_mq_sched_insert_request(rq, false, true, true);
}
+ if (!hipri)
+ return BLK_QC_T_NONE;
return cookie;
queue_exit:
blk_queue_exit(q);
@@ -3373,6 +3380,12 @@ static int blk_mq_realloc_tag_set_tags(struct blk_mq_tag_set *set,
return 0;
}
+static int blk_mq_alloc_tag_set_tags(struct blk_mq_tag_set *set,
+ int new_nr_hw_queues)
+{
+ return blk_mq_realloc_tag_set_tags(set, 0, new_nr_hw_queues);
+}
+
/*
* Alloc a tag set to be associated with one or more request queues.
* May fail with EINVAL for various error conditions. May adjust the
@@ -3426,7 +3439,7 @@ int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set)
if (set->nr_maps == 1 && set->nr_hw_queues > nr_cpu_ids)
set->nr_hw_queues = nr_cpu_ids;
- if (blk_mq_realloc_tag_set_tags(set, 0, set->nr_hw_queues) < 0)
+ if (blk_mq_alloc_tag_set_tags(set, set->nr_hw_queues) < 0)
return -ENOMEM;
ret = -ENOMEM;
@@ -3861,9 +3874,10 @@ int blk_poll(struct request_queue *q, blk_qc_t cookie, bool spin)
* the state. Like for the other success return cases, the
* caller is responsible for checking if the IO completed. If
* the IO isn't complete, we'll get called again and will go
- * straight to the busy poll loop.
+ * straight to the busy poll loop. If specified not to spin,
+ * we also should not sleep.
*/
- if (blk_mq_poll_hybrid(q, hctx, cookie))
+ if (spin && blk_mq_poll_hybrid(q, hctx, cookie))
return 1;
hctx->poll_considered++;
diff --git a/block/blk-mq.h b/block/blk-mq.h
index a52703c98b77..c1458d9502f1 100644
--- a/block/blk-mq.h
+++ b/block/blk-mq.h
@@ -99,7 +99,7 @@ static inline struct blk_mq_hw_ctx *blk_mq_map_queue_type(struct request_queue *
* blk_mq_map_queue() - map (cmd_flags,type) to hardware queue
* @q: request queue
* @flags: request command flags
- * @cpu: cpu ctx
+ * @ctx: software queue cpu ctx
*/
static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q,
unsigned int flags,
@@ -182,9 +182,10 @@ static inline bool blk_mq_hw_queue_mapped(struct blk_mq_hw_ctx *hctx)
return hctx->nr_ctx && hctx->tags;
}
-unsigned int blk_mq_in_flight(struct request_queue *q, struct hd_struct *part);
-void blk_mq_in_flight_rw(struct request_queue *q, struct hd_struct *part,
- unsigned int inflight[2]);
+unsigned int blk_mq_in_flight(struct request_queue *q,
+ struct block_device *part);
+void blk_mq_in_flight_rw(struct request_queue *q, struct block_device *part,
+ unsigned int inflight[2]);
static inline void blk_mq_put_dispatch_budget(struct request_queue *q)
{
diff --git a/block/blk-throttle.c b/block/blk-throttle.c
index b771c4299982..d52cac9f3a7c 100644
--- a/block/blk-throttle.c
+++ b/block/blk-throttle.c
@@ -587,6 +587,7 @@ static void throtl_pd_online(struct blkg_policy_data *pd)
tg_update_has_rules(tg);
}
+#ifdef CONFIG_BLK_DEV_THROTTLING_LOW
static void blk_throtl_update_limit_valid(struct throtl_data *td)
{
struct cgroup_subsys_state *pos_css;
@@ -607,6 +608,11 @@ static void blk_throtl_update_limit_valid(struct throtl_data *td)
td->limit_valid[LIMIT_LOW] = low_valid;
}
+#else
+static inline void blk_throtl_update_limit_valid(struct throtl_data *td)
+{
+}
+#endif
static void throtl_upgrade_state(struct throtl_data *td);
static void throtl_pd_offline(struct blkg_policy_data *pd)
diff --git a/block/blk-wbt.c b/block/blk-wbt.c
index fd410086fe1d..0321ca83e73f 100644
--- a/block/blk-wbt.c
+++ b/block/blk-wbt.c
@@ -835,7 +835,6 @@ int wbt_init(struct request_queue *q)
rwb->enable_state = WBT_STATE_ON_DEFAULT;
rwb->wc = 1;
rwb->rq_depth.default_depth = RWB_DEF_DEPTH;
- wbt_update_limits(rwb);
/*
* Assign rwb and add the stats callback.
diff --git a/block/blk-zoned.c b/block/blk-zoned.c
index 6817a673e5ce..7a68b6e4300c 100644
--- a/block/blk-zoned.c
+++ b/block/blk-zoned.c
@@ -508,15 +508,29 @@ int blk_revalidate_disk_zones(struct gendisk *disk,
noio_flag = memalloc_noio_save();
ret = disk->fops->report_zones(disk, 0, UINT_MAX,
blk_revalidate_zone_cb, &args);
+ if (!ret) {
+ pr_warn("%s: No zones reported\n", disk->disk_name);
+ ret = -ENODEV;
+ }
memalloc_noio_restore(noio_flag);
/*
+ * If zones where reported, make sure that the entire disk capacity
+ * has been checked.
+ */
+ if (ret > 0 && args.sector != get_capacity(disk)) {
+ pr_warn("%s: Missing zones from sector %llu\n",
+ disk->disk_name, args.sector);
+ ret = -ENODEV;
+ }
+
+ /*
* Install the new bitmaps and update nr_zones only once the queue is
* stopped and all I/Os are completed (i.e. a scheduler is not
* referencing the bitmaps).
*/
blk_mq_freeze_queue(q);
- if (ret >= 0) {
+ if (ret > 0) {
blk_queue_chunk_sectors(q, args.zone_sectors);
q->nr_zones = args.nr_zones;
swap(q->seq_zones_wlock, args.seq_zones_wlock);
diff --git a/block/blk.h b/block/blk.h
index dfab98465db9..d23d018fd2cd 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -25,7 +25,6 @@ struct blk_flush_queue {
struct list_head flush_data_in_flight;
struct request *flush_rq;
- struct lock_class_key key;
spinlock_t mq_flush_lock;
};
@@ -215,7 +214,7 @@ static inline void elevator_exit(struct request_queue *q,
__elevator_exit(q, e);
}
-struct hd_struct *__disk_get_part(struct gendisk *disk, int partno);
+struct block_device *__disk_get_part(struct gendisk *disk, int partno);
ssize_t part_size_show(struct device *dev, struct device_attribute *attr,
char *buf);
@@ -348,97 +347,21 @@ void blk_queue_free_zone_bitmaps(struct request_queue *q);
static inline void blk_queue_free_zone_bitmaps(struct request_queue *q) {}
#endif
-struct hd_struct *disk_map_sector_rcu(struct gendisk *disk, sector_t sector);
+struct block_device *disk_map_sector_rcu(struct gendisk *disk, sector_t sector);
-int blk_alloc_devt(struct hd_struct *part, dev_t *devt);
+int blk_alloc_devt(struct block_device *part, dev_t *devt);
void blk_free_devt(dev_t devt);
-void blk_invalidate_devt(dev_t devt);
char *disk_name(struct gendisk *hd, int partno, char *buf);
#define ADDPART_FLAG_NONE 0
#define ADDPART_FLAG_RAID 1
#define ADDPART_FLAG_WHOLEDISK 2
-void delete_partition(struct hd_struct *part);
+void delete_partition(struct block_device *part);
int bdev_add_partition(struct block_device *bdev, int partno,
sector_t start, sector_t length);
int bdev_del_partition(struct block_device *bdev, int partno);
int bdev_resize_partition(struct block_device *bdev, int partno,
sector_t start, sector_t length);
int disk_expand_part_tbl(struct gendisk *disk, int target);
-int hd_ref_init(struct hd_struct *part);
-
-/* no need to get/put refcount of part0 */
-static inline int hd_struct_try_get(struct hd_struct *part)
-{
- if (part->partno)
- return percpu_ref_tryget_live(&part->ref);
- return 1;
-}
-
-static inline void hd_struct_put(struct hd_struct *part)
-{
- if (part->partno)
- percpu_ref_put(&part->ref);
-}
-
-static inline void hd_free_part(struct hd_struct *part)
-{
- free_percpu(part->dkstats);
- kfree(part->info);
- percpu_ref_exit(&part->ref);
-}
-
-/*
- * Any access of part->nr_sects which is not protected by partition
- * bd_mutex or gendisk bdev bd_mutex, should be done using this
- * accessor function.
- *
- * Code written along the lines of i_size_read() and i_size_write().
- * CONFIG_PREEMPTION case optimizes the case of UP kernel with preemption
- * on.
- */
-static inline sector_t part_nr_sects_read(struct hd_struct *part)
-{
-#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
- sector_t nr_sects;
- unsigned seq;
- do {
- seq = read_seqcount_begin(&part->nr_sects_seq);
- nr_sects = part->nr_sects;
- } while (read_seqcount_retry(&part->nr_sects_seq, seq));
- return nr_sects;
-#elif BITS_PER_LONG==32 && defined(CONFIG_PREEMPTION)
- sector_t nr_sects;
-
- preempt_disable();
- nr_sects = part->nr_sects;
- preempt_enable();
- return nr_sects;
-#else
- return part->nr_sects;
-#endif
-}
-
-/*
- * Should be called with mutex lock held (typically bd_mutex) of partition
- * to provide mutual exlusion among writers otherwise seqcount might be
- * left in wrong state leaving the readers spinning infinitely.
- */
-static inline void part_nr_sects_write(struct hd_struct *part, sector_t size)
-{
-#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
- preempt_disable();
- write_seqcount_begin(&part->nr_sects_seq);
- part->nr_sects = size;
- write_seqcount_end(&part->nr_sects_seq);
- preempt_enable();
-#elif BITS_PER_LONG==32 && defined(CONFIG_PREEMPTION)
- preempt_disable();
- part->nr_sects = size;
- preempt_enable();
-#else
- part->nr_sects = size;
-#endif
-}
int bio_add_hw_page(struct request_queue *q, struct bio *bio,
struct page *page, unsigned int len, unsigned int offset,
diff --git a/block/bounce.c b/block/bounce.c
index 162a6eee8999..d3f51acd6e3b 100644
--- a/block/bounce.c
+++ b/block/bounce.c
@@ -340,7 +340,7 @@ static void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig,
}
}
- trace_block_bio_bounce(q, *bio_orig);
+ trace_block_bio_bounce(*bio_orig);
bio->bi_flags |= (1 << BIO_BOUNCED);
diff --git a/block/genhd.c b/block/genhd.c
index 9387f050c248..b84b8671e627 100644
--- a/block/genhd.c
+++ b/block/genhd.c
@@ -17,7 +17,6 @@
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/kmod.h>
-#include <linux/kobj_map.h>
#include <linux/mutex.h>
#include <linux/idr.h>
#include <linux/log2.h>
@@ -26,17 +25,13 @@
#include "blk.h"
-static DEFINE_MUTEX(block_class_lock);
static struct kobject *block_depr;
+DECLARE_RWSEM(bdev_lookup_sem);
+
/* for extended dynamic devt allocation, currently only one major is used */
#define NR_EXT_DEVT (1 << MINORBITS)
-
-/* For extended devt allocation. ext_devt_lock prevents look up
- * results from going away underneath its user.
- */
-static DEFINE_SPINLOCK(ext_devt_lock);
-static DEFINE_IDR(ext_devt_idr);
+static DEFINE_IDA(ext_devt_ida);
static void disk_check_events(struct disk_events *ev,
unsigned int *clearing_ptr);
@@ -45,30 +40,49 @@ static void disk_add_events(struct gendisk *disk);
static void disk_del_events(struct gendisk *disk);
static void disk_release_events(struct gendisk *disk);
+void set_capacity(struct gendisk *disk, sector_t sectors)
+{
+ struct block_device *bdev = disk->part0;
+
+ spin_lock(&bdev->bd_size_lock);
+ i_size_write(bdev->bd_inode, (loff_t)sectors << SECTOR_SHIFT);
+ spin_unlock(&bdev->bd_size_lock);
+}
+EXPORT_SYMBOL(set_capacity);
+
/*
- * Set disk capacity and notify if the size is not currently
- * zero and will not be set to zero
+ * Set disk capacity and notify if the size is not currently zero and will not
+ * be set to zero. Returns true if a uevent was sent, otherwise false.
*/
-bool set_capacity_revalidate_and_notify(struct gendisk *disk, sector_t size,
- bool update_bdev)
+bool set_capacity_and_notify(struct gendisk *disk, sector_t size)
{
sector_t capacity = get_capacity(disk);
+ char *envp[] = { "RESIZE=1", NULL };
set_capacity(disk, size);
- if (update_bdev)
- revalidate_disk_size(disk, true);
- if (capacity != size && capacity != 0 && size != 0) {
- char *envp[] = { "RESIZE=1", NULL };
+ /*
+ * Only print a message and send a uevent if the gendisk is user visible
+ * and alive. This avoids spamming the log and udev when setting the
+ * initial capacity during probing.
+ */
+ if (size == capacity ||
+ (disk->flags & (GENHD_FL_UP | GENHD_FL_HIDDEN)) != GENHD_FL_UP)
+ return false;
- kobject_uevent_env(&disk_to_dev(disk)->kobj, KOBJ_CHANGE, envp);
- return true;
- }
+ pr_info("%s: detected capacity change from %lld to %lld\n",
+ disk->disk_name, size, capacity);
- return false;
+ /*
+ * Historically we did not send a uevent for changes to/from an empty
+ * device.
+ */
+ if (!capacity || !size)
+ return false;
+ kobject_uevent_env(&disk_to_dev(disk)->kobj, KOBJ_CHANGE, envp);
+ return true;
}
-
-EXPORT_SYMBOL_GPL(set_capacity_revalidate_and_notify);
+EXPORT_SYMBOL_GPL(set_capacity_and_notify);
/*
* Format the device name of the indicated disk into the supplied buffer and
@@ -92,13 +106,14 @@ const char *bdevname(struct block_device *bdev, char *buf)
}
EXPORT_SYMBOL(bdevname);
-static void part_stat_read_all(struct hd_struct *part, struct disk_stats *stat)
+static void part_stat_read_all(struct block_device *part,
+ struct disk_stats *stat)
{
int cpu;
memset(stat, 0, sizeof(struct disk_stats));
for_each_possible_cpu(cpu) {
- struct disk_stats *ptr = per_cpu_ptr(part->dkstats, cpu);
+ struct disk_stats *ptr = per_cpu_ptr(part->bd_stats, cpu);
int group;
for (group = 0; group < NR_STAT_GROUPS; group++) {
@@ -112,7 +127,7 @@ static void part_stat_read_all(struct hd_struct *part, struct disk_stats *stat)
}
}
-static unsigned int part_in_flight(struct hd_struct *part)
+static unsigned int part_in_flight(struct block_device *part)
{
unsigned int inflight = 0;
int cpu;
@@ -127,7 +142,8 @@ static unsigned int part_in_flight(struct hd_struct *part)
return inflight;
}
-static void part_in_flight_rw(struct hd_struct *part, unsigned int inflight[2])
+static void part_in_flight_rw(struct block_device *part,
+ unsigned int inflight[2])
{
int cpu;
@@ -143,7 +159,7 @@ static void part_in_flight_rw(struct hd_struct *part, unsigned int inflight[2])
inflight[1] = 0;
}
-struct hd_struct *__disk_get_part(struct gendisk *disk, int partno)
+struct block_device *__disk_get_part(struct gendisk *disk, int partno)
{
struct disk_part_tbl *ptbl = rcu_dereference(disk->part_tbl);
@@ -153,33 +169,6 @@ struct hd_struct *__disk_get_part(struct gendisk *disk, int partno)
}
/**
- * disk_get_part - get partition
- * @disk: disk to look partition from
- * @partno: partition number
- *
- * Look for partition @partno from @disk. If found, increment
- * reference count and return it.
- *
- * CONTEXT:
- * Don't care.
- *
- * RETURNS:
- * Pointer to the found partition on success, NULL if not found.
- */
-struct hd_struct *disk_get_part(struct gendisk *disk, int partno)
-{
- struct hd_struct *part;
-
- rcu_read_lock();
- part = __disk_get_part(disk, partno);
- if (part)
- get_device(part_to_dev(part));
- rcu_read_unlock();
-
- return part;
-}
-
-/**
* disk_part_iter_init - initialize partition iterator
* @piter: iterator to initialize
* @disk: disk to iterate over
@@ -223,14 +212,13 @@ EXPORT_SYMBOL_GPL(disk_part_iter_init);
* CONTEXT:
* Don't care.
*/
-struct hd_struct *disk_part_iter_next(struct disk_part_iter *piter)
+struct block_device *disk_part_iter_next(struct disk_part_iter *piter)
{
struct disk_part_tbl *ptbl;
int inc, end;
/* put the last partition */
- disk_put_part(piter->part);
- piter->part = NULL;
+ disk_part_iter_exit(piter);
/* get part_tbl */
rcu_read_lock();
@@ -251,19 +239,20 @@ struct hd_struct *disk_part_iter_next(struct disk_part_iter *piter)
/* iterate to the next partition */
for (; piter->idx != end; piter->idx += inc) {
- struct hd_struct *part;
+ struct block_device *part;
part = rcu_dereference(ptbl->part[piter->idx]);
if (!part)
continue;
- if (!part_nr_sects_read(part) &&
+ if (!bdev_nr_sectors(part) &&
!(piter->flags & DISK_PITER_INCL_EMPTY) &&
!(piter->flags & DISK_PITER_INCL_EMPTY_PART0 &&
piter->idx == 0))
continue;
- get_device(part_to_dev(part));
- piter->part = part;
+ piter->part = bdgrab(part);
+ if (!piter->part)
+ continue;
piter->idx += inc;
break;
}
@@ -285,15 +274,16 @@ EXPORT_SYMBOL_GPL(disk_part_iter_next);
*/
void disk_part_iter_exit(struct disk_part_iter *piter)
{
- disk_put_part(piter->part);
+ if (piter->part)
+ bdput(piter->part);
piter->part = NULL;
}
EXPORT_SYMBOL_GPL(disk_part_iter_exit);
-static inline int sector_in_part(struct hd_struct *part, sector_t sector)
+static inline int sector_in_part(struct block_device *part, sector_t sector)
{
- return part->start_sect <= sector &&
- sector < part->start_sect + part_nr_sects_read(part);
+ return part->bd_start_sect <= sector &&
+ sector < part->bd_start_sect + bdev_nr_sectors(part);
}
/**
@@ -305,44 +295,34 @@ static inline int sector_in_part(struct hd_struct *part, sector_t sector)
* primarily used for stats accounting.
*
* CONTEXT:
- * RCU read locked. The returned partition pointer is always valid
- * because its refcount is grabbed except for part0, which lifetime
- * is same with the disk.
+ * RCU read locked.
*
* RETURNS:
* Found partition on success, part0 is returned if no partition matches
* or the matched partition is being deleted.
*/
-struct hd_struct *disk_map_sector_rcu(struct gendisk *disk, sector_t sector)
+struct block_device *disk_map_sector_rcu(struct gendisk *disk, sector_t sector)
{
struct disk_part_tbl *ptbl;
- struct hd_struct *part;
+ struct block_device *part;
int i;
rcu_read_lock();
ptbl = rcu_dereference(disk->part_tbl);
part = rcu_dereference(ptbl->last_lookup);
- if (part && sector_in_part(part, sector) && hd_struct_try_get(part))
+ if (part && sector_in_part(part, sector))
goto out_unlock;
for (i = 1; i < ptbl->len; i++) {
part = rcu_dereference(ptbl->part[i]);
-
if (part && sector_in_part(part, sector)) {
- /*
- * only live partition can be cached for lookup,
- * so use-after-free on cached & deleting partition
- * can be avoided
- */
- if (!hd_struct_try_get(part))
- break;
rcu_assign_pointer(ptbl->last_lookup, part);
goto out_unlock;
}
}
- part = &disk->part0;
+ part = disk->part0;
out_unlock:
rcu_read_unlock();
return part;
@@ -393,7 +373,9 @@ static struct blk_major_name {
struct blk_major_name *next;
int major;
char name[16];
+ void (*probe)(dev_t devt);
} *major_names[BLKDEV_MAJOR_HASH_SIZE];
+static DEFINE_MUTEX(major_names_lock);
/* index in the above - for now: assume no multimajor ranges */
static inline int major_to_index(unsigned major)
@@ -406,20 +388,21 @@ void blkdev_show(struct seq_file *seqf, off_t offset)
{
struct blk_major_name *dp;
- mutex_lock(&block_class_lock);
+ mutex_lock(&major_names_lock);
for (dp = major_names[major_to_index(offset)]; dp; dp = dp->next)
if (dp->major == offset)
seq_printf(seqf, "%3d %s\n", dp->major, dp->name);
- mutex_unlock(&block_class_lock);
+ mutex_unlock(&major_names_lock);
}
#endif /* CONFIG_PROC_FS */
/**
- * register_blkdev - register a new block device
+ * __register_blkdev - register a new block device
*
* @major: the requested major device number [1..BLKDEV_MAJOR_MAX-1]. If
* @major = 0, try to allocate any unused major number.
* @name: the name of the new block device as a zero terminated string
+ * @probe: allback that is called on access to any minor number of @major
*
* The @name must be unique within the system.
*
@@ -433,13 +416,16 @@ void blkdev_show(struct seq_file *seqf, off_t offset)
*
* See Documentation/admin-guide/devices.txt for the list of allocated
* major numbers.
+ *
+ * Use register_blkdev instead for any new code.
*/
-int register_blkdev(unsigned int major, const char *name)
+int __register_blkdev(unsigned int major, const char *name,
+ void (*probe)(dev_t devt))
{
struct blk_major_name **n, *p;
int index, ret = 0;
- mutex_lock(&block_class_lock);
+ mutex_lock(&major_names_lock);
/* temporary */
if (major == 0) {
@@ -473,6 +459,7 @@ int register_blkdev(unsigned int major, const char *name)
}
p->major = major;
+ p->probe = probe;
strlcpy(p->name, name, sizeof(p->name));
p->next = NULL;
index = major_to_index(major);
@@ -492,11 +479,10 @@ int register_blkdev(unsigned int major, const char *name)
kfree(p);
}
out:
- mutex_unlock(&block_class_lock);
+ mutex_unlock(&major_names_lock);
return ret;
}
-
-EXPORT_SYMBOL(register_blkdev);
+EXPORT_SYMBOL(__register_blkdev);
void unregister_blkdev(unsigned int major, const char *name)
{
@@ -504,7 +490,7 @@ void unregister_blkdev(unsigned int major, const char *name)
struct blk_major_name *p = NULL;
int index = major_to_index(major);
- mutex_lock(&block_class_lock);
+ mutex_lock(&major_names_lock);
for (n = &major_names[index]; *n; n = &(*n)->next)
if ((*n)->major == major)
break;
@@ -514,14 +500,12 @@ void unregister_blkdev(unsigned int major, const char *name)
p = *n;
*n = p->next;
}
- mutex_unlock(&block_class_lock);
+ mutex_unlock(&major_names_lock);
kfree(p);
}
EXPORT_SYMBOL(unregister_blkdev);
-static struct kobj_map *bdev_map;
-
/**
* blk_mangle_minor - scatter minor numbers apart
* @minor: minor number to mangle
@@ -555,8 +539,8 @@ static int blk_mangle_minor(int minor)
}
/**
- * blk_alloc_devt - allocate a dev_t for a partition
- * @part: partition to allocate dev_t for
+ * blk_alloc_devt - allocate a dev_t for a block device
+ * @bdev: block device to allocate dev_t for
* @devt: out parameter for resulting dev_t
*
* Allocate a dev_t for block device.
@@ -568,25 +552,18 @@ static int blk_mangle_minor(int minor)
* CONTEXT:
* Might sleep.
*/
-int blk_alloc_devt(struct hd_struct *part, dev_t *devt)
+int blk_alloc_devt(struct block_device *bdev, dev_t *devt)
{
- struct gendisk *disk = part_to_disk(part);
+ struct gendisk *disk = bdev->bd_disk;
int idx;
/* in consecutive minor range? */
- if (part->partno < disk->minors) {
- *devt = MKDEV(disk->major, disk->first_minor + part->partno);
+ if (bdev->bd_partno < disk->minors) {
+ *devt = MKDEV(disk->major, disk->first_minor + bdev->bd_partno);
return 0;
}
- /* allocate ext devt */
- idr_preload(GFP_KERNEL);
-
- spin_lock_bh(&ext_devt_lock);
- idx = idr_alloc(&ext_devt_idr, part, 0, NR_EXT_DEVT, GFP_NOWAIT);
- spin_unlock_bh(&ext_devt_lock);
-
- idr_preload_end();
+ idx = ida_alloc_range(&ext_devt_ida, 0, NR_EXT_DEVT, GFP_KERNEL);
if (idx < 0)
return idx == -ENOSPC ? -EBUSY : idx;
@@ -605,26 +582,8 @@ int blk_alloc_devt(struct hd_struct *part, dev_t *devt)
*/
void blk_free_devt(dev_t devt)
{
- if (devt == MKDEV(0, 0))
- return;
-
- if (MAJOR(devt) == BLOCK_EXT_MAJOR) {
- spin_lock_bh(&ext_devt_lock);
- idr_remove(&ext_devt_idr, blk_mangle_minor(MINOR(devt)));
- spin_unlock_bh(&ext_devt_lock);
- }
-}
-
-/*
- * We invalidate devt by assigning NULL pointer for devt in idr.
- */
-void blk_invalidate_devt(dev_t devt)
-{
- if (MAJOR(devt) == BLOCK_EXT_MAJOR) {
- spin_lock_bh(&ext_devt_lock);
- idr_replace(&ext_devt_idr, NULL, blk_mangle_minor(MINOR(devt)));
- spin_unlock_bh(&ext_devt_lock);
- }
+ if (MAJOR(devt) == BLOCK_EXT_MAJOR)
+ ida_free(&ext_devt_ida, blk_mangle_minor(MINOR(devt)));
}
static char *bdevt_str(dev_t devt, char *buf)
@@ -639,43 +598,6 @@ static char *bdevt_str(dev_t devt, char *buf)
return buf;
}
-/*
- * Register device numbers dev..(dev+range-1)
- * range must be nonzero
- * The hash chain is sorted on range, so that subranges can override.
- */
-void blk_register_region(dev_t devt, unsigned long range, struct module *module,
- struct kobject *(*probe)(dev_t, int *, void *),
- int (*lock)(dev_t, void *), void *data)
-{
- kobj_map(bdev_map, devt, range, module, probe, lock, data);
-}
-
-EXPORT_SYMBOL(blk_register_region);
-
-void blk_unregister_region(dev_t devt, unsigned long range)
-{
- kobj_unmap(bdev_map, devt, range);
-}
-
-EXPORT_SYMBOL(blk_unregister_region);
-
-static struct kobject *exact_match(dev_t devt, int *partno, void *data)
-{
- struct gendisk *p = data;
-
- return &disk_to_dev(p)->kobj;
-}
-
-static int exact_lock(dev_t devt, void *data)
-{
- struct gendisk *p = data;
-
- if (!get_disk_and_module(p))
- return -1;
- return 0;
-}
-
static void disk_scan_partitions(struct gendisk *disk)
{
struct block_device *bdev;
@@ -694,7 +616,7 @@ static void register_disk(struct device *parent, struct gendisk *disk,
{
struct device *ddev = disk_to_dev(disk);
struct disk_part_iter piter;
- struct hd_struct *part;
+ struct block_device *part;
int err;
ddev->parent = parent;
@@ -726,7 +648,8 @@ static void register_disk(struct device *parent, struct gendisk *disk,
*/
pm_runtime_set_memalloc_noio(ddev, true);
- disk->part0.holder_dir = kobject_create_and_add("holders", &ddev->kobj);
+ disk->part0->bd_holder_dir =
+ kobject_create_and_add("holders", &ddev->kobj);
disk->slave_dir = kobject_create_and_add("slaves", &ddev->kobj);
if (disk->flags & GENHD_FL_HIDDEN) {
@@ -743,7 +666,7 @@ static void register_disk(struct device *parent, struct gendisk *disk,
/* announce possible partitions */
disk_part_iter_init(&piter, disk, 0);
while ((part = disk_part_iter_next(&piter)))
- kobject_uevent(&part_to_dev(part)->kobj, KOBJ_ADD);
+ kobject_uevent(bdev_kobj(part), KOBJ_ADD);
disk_part_iter_exit(&piter);
if (disk->queue->backing_dev_info->dev) {
@@ -792,7 +715,7 @@ static void __device_add_disk(struct device *parent, struct gendisk *disk,
disk->flags |= GENHD_FL_UP;
- retval = blk_alloc_devt(&disk->part0, &devt);
+ retval = blk_alloc_devt(disk->part0, &devt);
if (retval) {
WARN_ON(1);
return;
@@ -819,8 +742,7 @@ static void __device_add_disk(struct device *parent, struct gendisk *disk,
ret = bdi_register(bdi, "%u:%u", MAJOR(devt), MINOR(devt));
WARN_ON(ret);
bdi_set_owner(bdi, dev);
- blk_register_region(disk_devt(disk), disk->minors, NULL,
- exact_match, exact_lock, disk);
+ bdev_add(disk->part0, devt);
}
register_disk(parent, disk, groups);
if (register_queue)
@@ -850,23 +772,16 @@ void device_add_disk_no_queue_reg(struct device *parent, struct gendisk *disk)
}
EXPORT_SYMBOL(device_add_disk_no_queue_reg);
-static void invalidate_partition(struct gendisk *disk, int partno)
+static void invalidate_partition(struct block_device *bdev)
{
- struct block_device *bdev;
-
- bdev = bdget_disk(disk, partno);
- if (!bdev)
- return;
-
fsync_bdev(bdev);
__invalidate_device(bdev, true);
/*
- * Unhash the bdev inode for this device so that it gets evicted as soon
- * as last inode reference is dropped.
+ * Unhash the bdev inode for this device so that it can't be looked
+ * up any more even if openers still hold references to it.
*/
remove_inode_hash(bdev->bd_inode);
- bdput(bdev);
}
/**
@@ -891,10 +806,13 @@ static void invalidate_partition(struct gendisk *disk, int partno)
void del_gendisk(struct gendisk *disk)
{
struct disk_part_iter piter;
- struct hd_struct *part;
+ struct block_device *part;
might_sleep();
+ if (WARN_ON_ONCE(!disk->queue))
+ return;
+
blk_integrity_del(disk);
disk_del_events(disk);
@@ -902,50 +820,39 @@ void del_gendisk(struct gendisk *disk)
* Block lookups of the disk until all bdevs are unhashed and the
* disk is marked as dead (GENHD_FL_UP cleared).
*/
- down_write(&disk->lookup_sem);
+ down_write(&bdev_lookup_sem);
+
/* invalidate stuff */
disk_part_iter_init(&piter, disk,
DISK_PITER_INCL_EMPTY | DISK_PITER_REVERSE);
while ((part = disk_part_iter_next(&piter))) {
- invalidate_partition(disk, part->partno);
+ invalidate_partition(part);
delete_partition(part);
}
disk_part_iter_exit(&piter);
- invalidate_partition(disk, 0);
+ invalidate_partition(disk->part0);
set_capacity(disk, 0);
disk->flags &= ~GENHD_FL_UP;
- up_write(&disk->lookup_sem);
+ up_write(&bdev_lookup_sem);
- if (!(disk->flags & GENHD_FL_HIDDEN))
+ if (!(disk->flags & GENHD_FL_HIDDEN)) {
sysfs_remove_link(&disk_to_dev(disk)->kobj, "bdi");
- if (disk->queue) {
+
/*
* Unregister bdi before releasing device numbers (as they can
* get reused and we'd get clashes in sysfs).
*/
- if (!(disk->flags & GENHD_FL_HIDDEN))
- bdi_unregister(disk->queue->backing_dev_info);
- blk_unregister_queue(disk);
- } else {
- WARN_ON(1);
+ bdi_unregister(disk->queue->backing_dev_info);
}
- if (!(disk->flags & GENHD_FL_HIDDEN))
- blk_unregister_region(disk_devt(disk), disk->minors);
- /*
- * Remove gendisk pointer from idr so that it cannot be looked up
- * while RCU period before freeing gendisk is running to prevent
- * use-after-free issues. Note that the device number stays
- * "in-use" until we really free the gendisk.
- */
- blk_invalidate_devt(disk_devt(disk));
+ blk_unregister_queue(disk);
- kobject_put(disk->part0.holder_dir);
+ kobject_put(disk->part0->bd_holder_dir);
kobject_put(disk->slave_dir);
- part_stat_set_all(&disk->part0, 0);
- disk->part0.stamp = 0;
+ part_stat_set_all(disk->part0, 0);
+ disk->part0->bd_stamp = 0;
if (!sysfs_deprecated)
sysfs_remove_link(block_depr, dev_name(disk_to_dev(disk)));
pm_runtime_set_memalloc_noio(disk_to_dev(disk), false);
@@ -978,57 +885,24 @@ static ssize_t disk_badblocks_store(struct device *dev,
return badblocks_store(disk->bb, page, len, 0);
}
-/**
- * get_gendisk - get partitioning information for a given device
- * @devt: device to get partitioning information for
- * @partno: returned partition index
- *
- * This function gets the structure containing partitioning
- * information for the given device @devt.
- *
- * Context: can sleep
- */
-struct gendisk *get_gendisk(dev_t devt, int *partno)
+void blk_request_module(dev_t devt)
{
- struct gendisk *disk = NULL;
-
- might_sleep();
-
- if (MAJOR(devt) != BLOCK_EXT_MAJOR) {
- struct kobject *kobj;
-
- kobj = kobj_lookup(bdev_map, devt, partno);
- if (kobj)
- disk = dev_to_disk(kobj_to_dev(kobj));
- } else {
- struct hd_struct *part;
+ unsigned int major = MAJOR(devt);
+ struct blk_major_name **n;
- spin_lock_bh(&ext_devt_lock);
- part = idr_find(&ext_devt_idr, blk_mangle_minor(MINOR(devt)));
- if (part && get_disk_and_module(part_to_disk(part))) {
- *partno = part->partno;
- disk = part_to_disk(part);
+ mutex_lock(&major_names_lock);
+ for (n = &major_names[major_to_index(major)]; *n; n = &(*n)->next) {
+ if ((*n)->major == major && (*n)->probe) {
+ (*n)->probe(devt);
+ mutex_unlock(&major_names_lock);
+ return;
}
- spin_unlock_bh(&ext_devt_lock);
}
+ mutex_unlock(&major_names_lock);
- if (!disk)
- return NULL;
-
- /*
- * Synchronize with del_gendisk() to not return disk that is being
- * destroyed.
- */
- down_read(&disk->lookup_sem);
- if (unlikely((disk->flags & GENHD_FL_HIDDEN) ||
- !(disk->flags & GENHD_FL_UP))) {
- up_read(&disk->lookup_sem);
- put_disk_and_module(disk);
- disk = NULL;
- } else {
- up_read(&disk->lookup_sem);
- }
- return disk;
+ if (request_module("block-major-%d-%d", MAJOR(devt), MINOR(devt)) > 0)
+ /* Make old-style 2.4 aliases work */
+ request_module("block-major-%d", MAJOR(devt));
}
/**
@@ -1046,17 +920,16 @@ struct gendisk *get_gendisk(dev_t devt, int *partno)
*/
struct block_device *bdget_disk(struct gendisk *disk, int partno)
{
- struct hd_struct *part;
struct block_device *bdev = NULL;
- part = disk_get_part(disk, partno);
- if (part)
- bdev = bdget_part(part);
- disk_put_part(part);
+ rcu_read_lock();
+ bdev = __disk_get_part(disk, partno);
+ if (bdev && !bdgrab(bdev))
+ bdev = NULL;
+ rcu_read_unlock();
return bdev;
}
-EXPORT_SYMBOL(bdget_disk);
/*
* print a full list of all partitions - intended for places where the root
@@ -1072,7 +945,7 @@ void __init printk_all_partitions(void)
while ((dev = class_dev_iter_next(&iter))) {
struct gendisk *disk = dev_to_disk(dev);
struct disk_part_iter piter;
- struct hd_struct *part;
+ struct block_device *part;
char name_buf[BDEVNAME_SIZE];
char devt_buf[BDEVT_SIZE];
@@ -1091,13 +964,14 @@ void __init printk_all_partitions(void)
*/
disk_part_iter_init(&piter, disk, DISK_PITER_INCL_PART0);
while ((part = disk_part_iter_next(&piter))) {
- bool is_part0 = part == &disk->part0;
+ bool is_part0 = part == disk->part0;
printk("%s%s %10llu %s %s", is_part0 ? "" : " ",
- bdevt_str(part_devt(part), devt_buf),
- (unsigned long long)part_nr_sects_read(part) >> 1
- , disk_name(disk, part->partno, name_buf),
- part->info ? part->info->uuid : "");
+ bdevt_str(part->bd_dev, devt_buf),
+ bdev_nr_sectors(part) >> 1,
+ disk_name(disk, part->bd_partno, name_buf),
+ part->bd_meta_info ?
+ part->bd_meta_info->uuid : "");
if (is_part0) {
if (dev->parent && dev->parent->driver)
printk(" driver: %s\n",
@@ -1173,7 +1047,7 @@ static int show_partition(struct seq_file *seqf, void *v)
{
struct gendisk *sgp = v;
struct disk_part_iter piter;
- struct hd_struct *part;
+ struct block_device *part;
char buf[BDEVNAME_SIZE];
/* Don't show non-partitionable removeable devices or empty devices */
@@ -1187,9 +1061,9 @@ static int show_partition(struct seq_file *seqf, void *v)
disk_part_iter_init(&piter, sgp, DISK_PITER_INCL_PART0);
while ((part = disk_part_iter_next(&piter)))
seq_printf(seqf, "%4d %7d %10llu %s\n",
- MAJOR(part_devt(part)), MINOR(part_devt(part)),
- (unsigned long long)part_nr_sects_read(part) >> 1,
- disk_name(sgp, part->partno, buf));
+ MAJOR(part->bd_dev), MINOR(part->bd_dev),
+ bdev_nr_sectors(part) >> 1,
+ disk_name(sgp, part->bd_partno, buf));
disk_part_iter_exit(&piter);
return 0;
@@ -1203,15 +1077,6 @@ static const struct seq_operations partitions_op = {
};
#endif
-
-static struct kobject *base_probe(dev_t devt, int *partno, void *data)
-{
- if (request_module("block-major-%d-%d", MAJOR(devt), MINOR(devt)) > 0)
- /* Make old-style 2.4 aliases work */
- request_module("block-major-%d", MAJOR(devt));
- return NULL;
-}
-
static int __init genhd_device_init(void)
{
int error;
@@ -1220,7 +1085,6 @@ static int __init genhd_device_init(void)
error = class_register(&block_class);
if (unlikely(error))
return error;
- bdev_map = kobj_map_init(base_probe, &block_class_lock);
blk_dev_init();
register_blkdev(BLOCK_EXT_MAJOR, "blkext");
@@ -1278,25 +1142,22 @@ static ssize_t disk_ro_show(struct device *dev,
ssize_t part_size_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct hd_struct *p = dev_to_part(dev);
-
- return sprintf(buf, "%llu\n",
- (unsigned long long)part_nr_sects_read(p));
+ return sprintf(buf, "%llu\n", bdev_nr_sectors(dev_to_bdev(dev)));
}
ssize_t part_stat_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct hd_struct *p = dev_to_part(dev);
- struct request_queue *q = part_to_disk(p)->queue;
+ struct block_device *bdev = dev_to_bdev(dev);
+ struct request_queue *q = bdev->bd_disk->queue;
struct disk_stats stat;
unsigned int inflight;
- part_stat_read_all(p, &stat);
+ part_stat_read_all(bdev, &stat);
if (queue_is_mq(q))
- inflight = blk_mq_in_flight(q, p);
+ inflight = blk_mq_in_flight(q, bdev);
else
- inflight = part_in_flight(p);
+ inflight = part_in_flight(bdev);
return sprintf(buf,
"%8lu %8lu %8llu %8u "
@@ -1331,14 +1192,14 @@ ssize_t part_stat_show(struct device *dev,
ssize_t part_inflight_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
- struct hd_struct *p = dev_to_part(dev);
- struct request_queue *q = part_to_disk(p)->queue;
+ struct block_device *bdev = dev_to_bdev(dev);
+ struct request_queue *q = bdev->bd_disk->queue;
unsigned int inflight[2];
if (queue_is_mq(q))
- blk_mq_in_flight_rw(q, p, inflight);
+ blk_mq_in_flight_rw(q, bdev, inflight);
else
- part_in_flight_rw(p, inflight);
+ part_in_flight_rw(bdev, inflight);
return sprintf(buf, "%8u %8u\n", inflight[0], inflight[1]);
}
@@ -1386,20 +1247,17 @@ static DEVICE_ATTR(badblocks, 0644, disk_badblocks_show, disk_badblocks_store);
ssize_t part_fail_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct hd_struct *p = dev_to_part(dev);
-
- return sprintf(buf, "%d\n", p->make_it_fail);
+ return sprintf(buf, "%d\n", dev_to_bdev(dev)->bd_make_it_fail);
}
ssize_t part_fail_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
- struct hd_struct *p = dev_to_part(dev);
int i;
if (count > 0 && sscanf(buf, "%d", &i) > 0)
- p->make_it_fail = (i == 0) ? 0 : 1;
+ dev_to_bdev(dev)->bd_make_it_fail = i;
return count;
}
@@ -1538,11 +1396,6 @@ int disk_expand_part_tbl(struct gendisk *disk, int partno)
*
* This function releases all allocated resources of the gendisk.
*
- * The struct gendisk refcount is incremented with get_gendisk() or
- * get_disk_and_module(), and its refcount is decremented with
- * put_disk_and_module() or put_disk(). Once the refcount reaches 0 this
- * function is called.
- *
* Drivers which used __device_add_disk() have a gendisk with a request_queue
* assigned. Since the request_queue sits on top of the gendisk for these
* drivers we also call blk_put_queue() for them, and we expect the
@@ -1561,7 +1414,7 @@ static void disk_release(struct device *dev)
disk_release_events(disk);
kfree(disk->random);
disk_replace_part_tbl(disk, NULL);
- hd_free_part(&disk->part0);
+ bdput(disk->part0);
if (disk->queue)
blk_put_queue(disk->queue);
kfree(disk);
@@ -1599,7 +1452,7 @@ static int diskstats_show(struct seq_file *seqf, void *v)
{
struct gendisk *gp = v;
struct disk_part_iter piter;
- struct hd_struct *hd;
+ struct block_device *hd;
char buf[BDEVNAME_SIZE];
unsigned int inflight;
struct disk_stats stat;
@@ -1627,8 +1480,8 @@ static int diskstats_show(struct seq_file *seqf, void *v)
"%lu %lu %lu %u "
"%lu %u"
"\n",
- MAJOR(part_devt(hd)), MINOR(part_devt(hd)),
- disk_name(gp, hd->partno, buf),
+ MAJOR(hd->bd_dev), MINOR(hd->bd_dev),
+ disk_name(gp, hd->bd_partno, buf),
stat.ios[STAT_READ],
stat.merges[STAT_READ],
stat.sectors[STAT_READ],
@@ -1686,7 +1539,7 @@ dev_t blk_lookup_devt(const char *name, int partno)
class_dev_iter_init(&iter, &block_class, NULL, &disk_type);
while ((dev = class_dev_iter_next(&iter))) {
struct gendisk *disk = dev_to_disk(dev);
- struct hd_struct *part;
+ struct block_device *part;
if (strcmp(dev_name(dev), name))
continue;
@@ -1699,13 +1552,12 @@ dev_t blk_lookup_devt(const char *name, int partno)
MINOR(dev->devt) + partno);
break;
}
- part = disk_get_part(disk, partno);
+ part = bdget_disk(disk, partno);
if (part) {
- devt = part_devt(part);
- disk_put_part(part);
+ devt = part->bd_dev;
+ bdput(part);
break;
}
- disk_put_part(part);
}
class_dev_iter_exit(&iter);
return devt;
@@ -1727,32 +1579,16 @@ struct gendisk *__alloc_disk_node(int minors, int node_id)
if (!disk)
return NULL;
- disk->part0.dkstats = alloc_percpu(struct disk_stats);
- if (!disk->part0.dkstats)
+ disk->part0 = bdev_alloc(disk, 0);
+ if (!disk->part0)
goto out_free_disk;
- init_rwsem(&disk->lookup_sem);
disk->node_id = node_id;
- if (disk_expand_part_tbl(disk, 0)) {
- free_percpu(disk->part0.dkstats);
- goto out_free_disk;
- }
+ if (disk_expand_part_tbl(disk, 0))
+ goto out_bdput;
ptbl = rcu_dereference_protected(disk->part_tbl, 1);
- rcu_assign_pointer(ptbl->part[0], &disk->part0);
-
- /*
- * set_capacity() and get_capacity() currently don't use
- * seqcounter to read/update the part0->nr_sects. Still init
- * the counter as we can read the sectors in IO submission
- * patch using seqence counters.
- *
- * TODO: Ideally set_capacity() and get_capacity() should be
- * converted to make use of bd_mutex and sequence counters.
- */
- hd_sects_seq_init(&disk->part0);
- if (hd_ref_init(&disk->part0))
- goto out_free_part0;
+ rcu_assign_pointer(ptbl->part[0], disk->part0);
disk->minors = minors;
rand_initialize_disk(disk);
@@ -1761,8 +1597,8 @@ struct gendisk *__alloc_disk_node(int minors, int node_id)
device_initialize(disk_to_dev(disk));
return disk;
-out_free_part0:
- hd_free_part(&disk->part0);
+out_bdput:
+ bdput(disk->part0);
out_free_disk:
kfree(disk);
return NULL;
@@ -1770,35 +1606,6 @@ out_free_disk:
EXPORT_SYMBOL(__alloc_disk_node);
/**
- * get_disk_and_module - increments the gendisk and gendisk fops module refcount
- * @disk: the struct gendisk to increment the refcount for
- *
- * This increments the refcount for the struct gendisk, and the gendisk's
- * fops module owner.
- *
- * Context: Any context.
- */
-struct kobject *get_disk_and_module(struct gendisk *disk)
-{
- struct module *owner;
- struct kobject *kobj;
-
- if (!disk->fops)
- return NULL;
- owner = disk->fops->owner;
- if (owner && !try_module_get(owner))
- return NULL;
- kobj = kobject_get_unless_zero(&disk_to_dev(disk)->kobj);
- if (kobj == NULL) {
- module_put(owner);
- return NULL;
- }
- return kobj;
-
-}
-EXPORT_SYMBOL(get_disk_and_module);
-
-/**
* put_disk - decrements the gendisk refcount
* @disk: the struct gendisk to decrement the refcount for
*
@@ -1811,31 +1618,10 @@ EXPORT_SYMBOL(get_disk_and_module);
void put_disk(struct gendisk *disk)
{
if (disk)
- kobject_put(&disk_to_dev(disk)->kobj);
+ put_device(disk_to_dev(disk));
}
EXPORT_SYMBOL(put_disk);
-/**
- * put_disk_and_module - decrements the module and gendisk refcount
- * @disk: the struct gendisk to decrement the refcount for
- *
- * This is a counterpart of get_disk_and_module() and thus also of
- * get_gendisk().
- *
- * Context: Any context, but the last reference must not be dropped from
- * atomic context.
- */
-void put_disk_and_module(struct gendisk *disk)
-{
- if (disk) {
- struct module *owner = disk->fops->owner;
-
- put_disk(disk);
- module_put(owner);
- }
-}
-EXPORT_SYMBOL(put_disk_and_module);
-
static void set_disk_ro_uevent(struct gendisk *gd, int ro)
{
char event[] = "DISK_RO=1";
@@ -1846,26 +1632,19 @@ static void set_disk_ro_uevent(struct gendisk *gd, int ro)
kobject_uevent_env(&disk_to_dev(gd)->kobj, KOBJ_CHANGE, envp);
}
-void set_device_ro(struct block_device *bdev, int flag)
-{
- bdev->bd_part->policy = flag;
-}
-
-EXPORT_SYMBOL(set_device_ro);
-
void set_disk_ro(struct gendisk *disk, int flag)
{
struct disk_part_iter piter;
- struct hd_struct *part;
+ struct block_device *part;
- if (disk->part0.policy != flag) {
+ if (disk->part0->bd_read_only != flag) {
set_disk_ro_uevent(disk, flag);
- disk->part0.policy = flag;
+ disk->part0->bd_read_only = flag;
}
disk_part_iter_init(&piter, disk, DISK_PITER_INCL_EMPTY);
while ((part = disk_part_iter_next(&piter)))
- part->policy = flag;
+ part->bd_read_only = flag;
disk_part_iter_exit(&piter);
}
@@ -1875,7 +1654,7 @@ int bdev_read_only(struct block_device *bdev)
{
if (!bdev)
return 0;
- return bdev->bd_part->policy;
+ return bdev->bd_read_only;
}
EXPORT_SYMBOL(bdev_read_only);
diff --git a/block/ioctl.c b/block/ioctl.c
index 3fbc382eb926..d61d652078f4 100644
--- a/block/ioctl.c
+++ b/block/ioctl.c
@@ -35,15 +35,6 @@ static int blkpg_do_ioctl(struct block_device *bdev,
start = p.start >> SECTOR_SHIFT;
length = p.length >> SECTOR_SHIFT;
- /* check for fit in a hd_struct */
- if (sizeof(sector_t) < sizeof(long long)) {
- long pstart = start, plength = length;
-
- if (pstart != start || plength != length || pstart < 0 ||
- plength < 0 || p.pno > 65535)
- return -EINVAL;
- }
-
switch (op) {
case BLKPG_ADD_PARTITION:
/* check if partition is aligned to blocksize */
@@ -219,23 +210,6 @@ static int compat_put_ulong(compat_ulong_t __user *argp, compat_ulong_t val)
}
#endif
-int __blkdev_driver_ioctl(struct block_device *bdev, fmode_t mode,
- unsigned cmd, unsigned long arg)
-{
- struct gendisk *disk = bdev->bd_disk;
-
- if (disk->fops->ioctl)
- return disk->fops->ioctl(bdev, mode, cmd, arg);
-
- return -ENOTTY;
-}
-/*
- * For the record: _GPL here is only because somebody decided to slap it
- * on the previous export. Sheer idiocy, since it wasn't copyrightable
- * at all and could be open-coded without any exports by anybody who cares.
- */
-EXPORT_SYMBOL_GPL(__blkdev_driver_ioctl);
-
#ifdef CONFIG_COMPAT
/*
* This is the equivalent of compat_ptr_ioctl(), to be used by block
@@ -346,38 +320,11 @@ static int blkdev_pr_clear(struct block_device *bdev,
return ops->pr_clear(bdev, c.key);
}
-/*
- * Is it an unrecognized ioctl? The correct returns are either
- * ENOTTY (final) or ENOIOCTLCMD ("I don't know this one, try a
- * fallback"). ENOIOCTLCMD gets turned into ENOTTY by the ioctl
- * code before returning.
- *
- * Confused drivers sometimes return EINVAL, which is wrong. It
- * means "I understood the ioctl command, but the parameters to
- * it were wrong".
- *
- * We should aim to just fix the broken drivers, the EINVAL case
- * should go away.
- */
-static inline int is_unrecognized_ioctl(int ret)
-{
- return ret == -EINVAL ||
- ret == -ENOTTY ||
- ret == -ENOIOCTLCMD;
-}
-
static int blkdev_flushbuf(struct block_device *bdev, fmode_t mode,
unsigned cmd, unsigned long arg)
{
- int ret;
-
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
-
- ret = __blkdev_driver_ioctl(bdev, mode, cmd, arg);
- if (!is_unrecognized_ioctl(ret))
- return ret;
-
fsync_bdev(bdev);
invalidate_bdev(bdev);
return 0;
@@ -391,12 +338,14 @@ static int blkdev_roset(struct block_device *bdev, fmode_t mode,
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
- ret = __blkdev_driver_ioctl(bdev, mode, cmd, arg);
- if (!is_unrecognized_ioctl(ret))
- return ret;
if (get_user(n, (int __user *)arg))
return -EFAULT;
- set_device_ro(bdev, n);
+ if (bdev->bd_disk->fops->set_read_only) {
+ ret = bdev->bd_disk->fops->set_read_only(bdev, n);
+ if (ret)
+ return ret;
+ }
+ bdev->bd_read_only = n;
return 0;
}
@@ -619,10 +568,12 @@ int blkdev_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
}
ret = blkdev_common_ioctl(bdev, mode, cmd, arg, argp);
- if (ret == -ENOIOCTLCMD)
- return __blkdev_driver_ioctl(bdev, mode, cmd, arg);
+ if (ret != -ENOIOCTLCMD)
+ return ret;
- return ret;
+ if (!bdev->bd_disk->fops->ioctl)
+ return -ENOTTY;
+ return bdev->bd_disk->fops->ioctl(bdev, mode, cmd, arg);
}
EXPORT_SYMBOL_GPL(blkdev_ioctl); /* for /dev/raw */
@@ -639,8 +590,7 @@ long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg)
{
int ret;
void __user *argp = compat_ptr(arg);
- struct inode *inode = file->f_mapping->host;
- struct block_device *bdev = inode->i_bdev;
+ struct block_device *bdev = I_BDEV(file->f_mapping->host);
struct gendisk *disk = bdev->bd_disk;
fmode_t mode = file->f_mode;
loff_t size;
diff --git a/block/partitions/core.c b/block/partitions/core.c
index a02e22411594..deca253583bd 100644
--- a/block/partitions/core.c
+++ b/block/partitions/core.c
@@ -85,6 +85,13 @@ static int (*check_part[])(struct parsed_partitions *) = {
NULL
};
+static void bdev_set_nr_sectors(struct block_device *bdev, sector_t sectors)
+{
+ spin_lock(&bdev->bd_size_lock);
+ i_size_write(bdev->bd_inode, (loff_t)sectors << SECTOR_SHIFT);
+ spin_unlock(&bdev->bd_size_lock);
+}
+
static struct parsed_partitions *allocate_partitions(struct gendisk *hd)
{
struct parsed_partitions *state;
@@ -175,44 +182,39 @@ static struct parsed_partitions *check_partition(struct gendisk *hd,
static ssize_t part_partition_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct hd_struct *p = dev_to_part(dev);
-
- return sprintf(buf, "%d\n", p->partno);
+ return sprintf(buf, "%d\n", dev_to_bdev(dev)->bd_partno);
}
static ssize_t part_start_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct hd_struct *p = dev_to_part(dev);
-
- return sprintf(buf, "%llu\n",(unsigned long long)p->start_sect);
+ return sprintf(buf, "%llu\n", dev_to_bdev(dev)->bd_start_sect);
}
static ssize_t part_ro_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct hd_struct *p = dev_to_part(dev);
- return sprintf(buf, "%d\n", p->policy ? 1 : 0);
+ return sprintf(buf, "%d\n", dev_to_bdev(dev)->bd_read_only);
}
static ssize_t part_alignment_offset_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct hd_struct *p = dev_to_part(dev);
+ struct block_device *bdev = dev_to_bdev(dev);
return sprintf(buf, "%u\n",
- queue_limit_alignment_offset(&part_to_disk(p)->queue->limits,
- p->start_sect));
+ queue_limit_alignment_offset(&bdev->bd_disk->queue->limits,
+ bdev->bd_start_sect));
}
static ssize_t part_discard_alignment_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct hd_struct *p = dev_to_part(dev);
+ struct block_device *bdev = dev_to_bdev(dev);
return sprintf(buf, "%u\n",
- queue_limit_discard_alignment(&part_to_disk(p)->queue->limits,
- p->start_sect));
+ queue_limit_discard_alignment(&bdev->bd_disk->queue->limits,
+ bdev->bd_start_sect));
}
static DEVICE_ATTR(partition, 0444, part_partition_show, NULL);
@@ -257,19 +259,17 @@ static const struct attribute_group *part_attr_groups[] = {
static void part_release(struct device *dev)
{
- struct hd_struct *p = dev_to_part(dev);
blk_free_devt(dev->devt);
- hd_free_part(p);
- kfree(p);
+ bdput(dev_to_bdev(dev));
}
static int part_uevent(struct device *dev, struct kobj_uevent_env *env)
{
- struct hd_struct *part = dev_to_part(dev);
+ struct block_device *part = dev_to_bdev(dev);
- add_uevent_var(env, "PARTN=%u", part->partno);
- if (part->info && part->info->volname[0])
- add_uevent_var(env, "PARTNAME=%s", part->info->volname);
+ add_uevent_var(env, "PARTN=%u", part->bd_partno);
+ if (part->bd_meta_info && part->bd_meta_info->volname[0])
+ add_uevent_var(env, "PARTNAME=%s", part->bd_meta_info->volname);
return 0;
}
@@ -280,73 +280,29 @@ struct device_type part_type = {
.uevent = part_uevent,
};
-static void hd_struct_free_work(struct work_struct *work)
-{
- struct hd_struct *part =
- container_of(to_rcu_work(work), struct hd_struct, rcu_work);
- struct gendisk *disk = part_to_disk(part);
-
- /*
- * Release the disk reference acquired in delete_partition here.
- * We can't release it in hd_struct_free because the final put_device
- * needs process context and thus can't be run directly from a
- * percpu_ref ->release handler.
- */
- put_device(disk_to_dev(disk));
-
- part->start_sect = 0;
- part->nr_sects = 0;
- part_stat_set_all(part, 0);
- put_device(part_to_dev(part));
-}
-
-static void hd_struct_free(struct percpu_ref *ref)
-{
- struct hd_struct *part = container_of(ref, struct hd_struct, ref);
- struct gendisk *disk = part_to_disk(part);
- struct disk_part_tbl *ptbl =
- rcu_dereference_protected(disk->part_tbl, 1);
-
- rcu_assign_pointer(ptbl->last_lookup, NULL);
-
- INIT_RCU_WORK(&part->rcu_work, hd_struct_free_work);
- queue_rcu_work(system_wq, &part->rcu_work);
-}
-
-int hd_ref_init(struct hd_struct *part)
-{
- if (percpu_ref_init(&part->ref, hd_struct_free, 0, GFP_KERNEL))
- return -ENOMEM;
- return 0;
-}
-
/*
* Must be called either with bd_mutex held, before a disk can be opened or
* after all disk users are gone.
*/
-void delete_partition(struct hd_struct *part)
+void delete_partition(struct block_device *part)
{
- struct gendisk *disk = part_to_disk(part);
+ struct gendisk *disk = part->bd_disk;
struct disk_part_tbl *ptbl =
rcu_dereference_protected(disk->part_tbl, 1);
- /*
- * ->part_tbl is referenced in this part's release handler, so
- * we have to hold the disk device
- */
- get_device(disk_to_dev(disk));
- rcu_assign_pointer(ptbl->part[part->partno], NULL);
- kobject_put(part->holder_dir);
- device_del(part_to_dev(part));
+ rcu_assign_pointer(ptbl->part[part->bd_partno], NULL);
+ rcu_assign_pointer(ptbl->last_lookup, NULL);
+
+ kobject_put(part->bd_holder_dir);
+ device_del(&part->bd_device);
/*
- * Remove gendisk pointer from idr so that it cannot be looked up
- * while RCU period before freeing gendisk is running to prevent
- * use-after-free issues. Note that the device number stays
- * "in-use" until we really free the gendisk.
+ * Remove the block device from the inode hash, so that it cannot be
+ * looked up any more even when openers still hold references.
*/
- blk_invalidate_devt(part_devt(part));
- percpu_ref_kill(&part->ref);
+ remove_inode_hash(part->bd_inode);
+
+ put_device(&part->bd_device);
}
static ssize_t whole_disk_show(struct device *dev,
@@ -360,14 +316,14 @@ static DEVICE_ATTR(whole_disk, 0444, whole_disk_show, NULL);
* Must be called either with bd_mutex held, before a disk can be opened or
* after all disk users are gone.
*/
-static struct hd_struct *add_partition(struct gendisk *disk, int partno,
+static struct block_device *add_partition(struct gendisk *disk, int partno,
sector_t start, sector_t len, int flags,
struct partition_meta_info *info)
{
- struct hd_struct *p;
dev_t devt = MKDEV(0, 0);
struct device *ddev = disk_to_dev(disk);
struct device *pdev;
+ struct block_device *bdev;
struct disk_part_tbl *ptbl;
const char *dname;
int err;
@@ -398,36 +354,22 @@ static struct hd_struct *add_partition(struct gendisk *disk, int partno,
if (ptbl->part[partno])
return ERR_PTR(-EBUSY);
- p = kzalloc(sizeof(*p), GFP_KERNEL);
- if (!p)
- return ERR_PTR(-EBUSY);
-
- p->dkstats = alloc_percpu(struct disk_stats);
- if (!p->dkstats) {
- err = -ENOMEM;
- goto out_free;
- }
-
- hd_sects_seq_init(p);
- pdev = part_to_dev(p);
+ bdev = bdev_alloc(disk, partno);
+ if (!bdev)
+ return ERR_PTR(-ENOMEM);
- p->start_sect = start;
- p->nr_sects = len;
- p->partno = partno;
- p->policy = get_disk_ro(disk);
+ bdev->bd_start_sect = start;
+ bdev_set_nr_sectors(bdev, len);
+ bdev->bd_read_only = get_disk_ro(disk);
if (info) {
- struct partition_meta_info *pinfo;
-
- pinfo = kzalloc_node(sizeof(*pinfo), GFP_KERNEL, disk->node_id);
- if (!pinfo) {
- err = -ENOMEM;
- goto out_free_stats;
- }
- memcpy(pinfo, info, sizeof(*info));
- p->info = pinfo;
+ err = -ENOMEM;
+ bdev->bd_meta_info = kmemdup(info, sizeof(*info), GFP_KERNEL);
+ if (!bdev->bd_meta_info)
+ goto out_bdput;
}
+ pdev = &bdev->bd_device;
dname = dev_name(ddev);
if (isdigit(dname[strlen(dname) - 1]))
dev_set_name(pdev, "%sp%d", dname, partno);
@@ -439,9 +381,9 @@ static struct hd_struct *add_partition(struct gendisk *disk, int partno,
pdev->type = &part_type;
pdev->parent = ddev;
- err = blk_alloc_devt(p, &devt);
+ err = blk_alloc_devt(bdev, &devt);
if (err)
- goto out_free_info;
+ goto out_bdput;
pdev->devt = devt;
/* delay uevent until 'holders' subdir is created */
@@ -451,8 +393,8 @@ static struct hd_struct *add_partition(struct gendisk *disk, int partno,
goto out_put;
err = -ENOMEM;
- p->holder_dir = kobject_create_and_add("holders", &pdev->kobj);
- if (!p->holder_dir)
+ bdev->bd_holder_dir = kobject_create_and_add("holders", &pdev->kobj);
+ if (!bdev->bd_holder_dir)
goto out_del;
dev_set_uevent_suppress(pdev, 0);
@@ -462,32 +404,20 @@ static struct hd_struct *add_partition(struct gendisk *disk, int partno,
goto out_del;
}
- err = hd_ref_init(p);
- if (err) {
- if (flags & ADDPART_FLAG_WHOLEDISK)
- goto out_remove_file;
- goto out_del;
- }
-
/* everything is up and running, commence */
- rcu_assign_pointer(ptbl->part[partno], p);
+ bdev_add(bdev, devt);
+ rcu_assign_pointer(ptbl->part[partno], bdev);
/* suppress uevent if the disk suppresses it */
if (!dev_get_uevent_suppress(ddev))
kobject_uevent(&pdev->kobj, KOBJ_ADD);
- return p;
-
-out_free_info:
- kfree(p->info);
-out_free_stats:
- free_percpu(p->dkstats);
-out_free:
- kfree(p);
+ return bdev;
+
+out_bdput:
+ bdput(bdev);
return ERR_PTR(err);
-out_remove_file:
- device_remove_file(pdev, &dev_attr_whole_disk);
out_del:
- kobject_put(p->holder_dir);
+ kobject_put(bdev->bd_holder_dir);
device_del(pdev);
out_put:
put_device(pdev);
@@ -498,14 +428,14 @@ static bool partition_overlaps(struct gendisk *disk, sector_t start,
sector_t length, int skip_partno)
{
struct disk_part_iter piter;
- struct hd_struct *part;
+ struct block_device *part;
bool overlap = false;
disk_part_iter_init(&piter, disk, DISK_PITER_INCL_EMPTY);
while ((part = disk_part_iter_next(&piter))) {
- if (part->partno == skip_partno ||
- start >= part->start_sect + part->nr_sects ||
- start + length <= part->start_sect)
+ if (part->bd_partno == skip_partno ||
+ start >= part->bd_start_sect + bdev_nr_sectors(part) ||
+ start + length <= part->bd_start_sect)
continue;
overlap = true;
break;
@@ -518,7 +448,7 @@ static bool partition_overlaps(struct gendisk *disk, sector_t start,
int bdev_add_partition(struct block_device *bdev, int partno,
sector_t start, sector_t length)
{
- struct hd_struct *part;
+ struct block_device *part;
mutex_lock(&bdev->bd_mutex);
if (partition_overlaps(bdev->bd_disk, start, length, -1)) {
@@ -534,77 +464,59 @@ int bdev_add_partition(struct block_device *bdev, int partno,
int bdev_del_partition(struct block_device *bdev, int partno)
{
- struct block_device *bdevp;
- struct hd_struct *part = NULL;
+ struct block_device *part;
int ret;
- bdevp = bdget_disk(bdev->bd_disk, partno);
- if (!bdevp)
+ part = bdget_disk(bdev->bd_disk, partno);
+ if (!part)
return -ENXIO;
- mutex_lock(&bdevp->bd_mutex);
+ mutex_lock(&part->bd_mutex);
mutex_lock_nested(&bdev->bd_mutex, 1);
- ret = -ENXIO;
- part = disk_get_part(bdev->bd_disk, partno);
- if (!part)
- goto out_unlock;
-
ret = -EBUSY;
- if (bdevp->bd_openers)
+ if (part->bd_openers)
goto out_unlock;
- sync_blockdev(bdevp);
- invalidate_bdev(bdevp);
+ sync_blockdev(part);
+ invalidate_bdev(part);
delete_partition(part);
ret = 0;
out_unlock:
mutex_unlock(&bdev->bd_mutex);
- mutex_unlock(&bdevp->bd_mutex);
- bdput(bdevp);
- if (part)
- disk_put_part(part);
+ mutex_unlock(&part->bd_mutex);
+ bdput(part);
return ret;
}
int bdev_resize_partition(struct block_device *bdev, int partno,
sector_t start, sector_t length)
{
- struct block_device *bdevp;
- struct hd_struct *part;
+ struct block_device *part;
int ret = 0;
- part = disk_get_part(bdev->bd_disk, partno);
+ part = bdget_disk(bdev->bd_disk, partno);
if (!part)
return -ENXIO;
- ret = -ENOMEM;
- bdevp = bdget_part(part);
- if (!bdevp)
- goto out_put_part;
-
- mutex_lock(&bdevp->bd_mutex);
+ mutex_lock(&part->bd_mutex);
mutex_lock_nested(&bdev->bd_mutex, 1);
-
ret = -EINVAL;
- if (start != part->start_sect)
+ if (start != part->bd_start_sect)
goto out_unlock;
ret = -EBUSY;
if (partition_overlaps(bdev->bd_disk, start, length, partno))
goto out_unlock;
- part_nr_sects_write(part, length);
- bd_set_nr_sectors(bdevp, length);
+ bdev_set_nr_sectors(part, length);
ret = 0;
out_unlock:
- mutex_unlock(&bdevp->bd_mutex);
+ mutex_unlock(&part->bd_mutex);
mutex_unlock(&bdev->bd_mutex);
- bdput(bdevp);
-out_put_part:
- disk_put_part(part);
+ bdput(part);
return ret;
}
@@ -627,7 +539,7 @@ static bool disk_unlock_native_capacity(struct gendisk *disk)
int blk_drop_partitions(struct block_device *bdev)
{
struct disk_part_iter piter;
- struct hd_struct *part;
+ struct block_device *part;
if (bdev->bd_part_count)
return -EBUSY;
@@ -652,7 +564,7 @@ static bool blk_add_partition(struct gendisk *disk, struct block_device *bdev,
{
sector_t size = state->parts[p].size;
sector_t from = state->parts[p].from;
- struct hd_struct *part;
+ struct block_device *part;
if (!size)
return true;
@@ -692,7 +604,7 @@ static bool blk_add_partition(struct gendisk *disk, struct block_device *bdev,
if (IS_BUILTIN(CONFIG_BLK_DEV_MD) &&
(state->parts[p].flags & ADDPART_FLAG_RAID))
- md_autodetect_dev(part_to_dev(part)->devt);
+ md_autodetect_dev(part->bd_dev);
return true;
}