summaryrefslogtreecommitdiff
path: root/block/partitions/core.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2021-06-30 22:12:56 +0300
committerLinus Torvalds <torvalds@linux-foundation.org>2021-06-30 22:12:56 +0300
commitdf668a5fe461bb9d7e899c538acc7197746038f4 (patch)
tree315a71104f5cea7feeb56c9f2c768453408b72f7 /block/partitions/core.c
parentdf04fbe8680bfe07f3d7487eccff9f768bb02533 (diff)
parent2705dfb2094777e405e065105e307074af8965c1 (diff)
downloadlinux-df668a5fe461bb9d7e899c538acc7197746038f4.tar.xz
Merge tag 'for-5.14/block-2021-06-29' of git://git.kernel.dk/linux-block
Pull core block updates from Jens Axboe: - disk events cleanup (Christoph) - gendisk and request queue allocation simplifications (Christoph) - bdev_disk_changed cleanups (Christoph) - IO priority improvements (Bart) - Chained bio completion trace fix (Edward) - blk-wbt fixes (Jan) - blk-wbt enable/disable fix (Zhang) - Scheduler dispatch improvements (Jan, Ming) - Shared tagset scheduler improvements (John) - BFQ updates (Paolo, Luca, Pietro) - BFQ lock inversion fix (Jan) - Documentation improvements (Kir) - CLONE_IO block cgroup fix (Tejun) - Remove of ancient and deprecated block dump feature (zhangyi) - Discard merge fix (Ming) - Misc fixes or followup fixes (Colin, Damien, Dan, Long, Max, Thomas, Yang) * tag 'for-5.14/block-2021-06-29' of git://git.kernel.dk/linux-block: (129 commits) block: fix discard request merge block/mq-deadline: Remove a WARN_ON_ONCE() call blk-mq: update hctx->dispatch_busy in case of real scheduler blk: Fix lock inversion between ioc lock and bfqd lock bfq: Remove merged request already in bfq_requests_merged() block: pass a gendisk to bdev_disk_changed block: move bdev_disk_changed block: add the events* attributes to disk_attrs block: move the disk events code to a separate file block: fix trace completion for chained bio block/partitions/msdos: Fix typo inidicator -> indicator block, bfq: reset waker pointer with shared queues block, bfq: check waker only for queues with no in-flight I/O block, bfq: avoid delayed merge of async queues block, bfq: boost throughput by extending queue-merging times block, bfq: consider also creation time in delayed stable merge block, bfq: fix delayed stable merge check block, bfq: let also stably merged queues enjoy weight raising blk-wbt: make sure throttle is enabled properly blk-wbt: introduce a new disable state to prevent false positive by rwb_enabled() ...
Diffstat (limited to 'block/partitions/core.c')
-rw-r--r--block/partitions/core.c129
1 files changed, 89 insertions, 40 deletions
diff --git a/block/partitions/core.c b/block/partitions/core.c
index dc60ecf46fe6..347c56a51d87 100644
--- a/block/partitions/core.c
+++ b/block/partitions/core.c
@@ -120,8 +120,7 @@ static void free_partitions(struct parsed_partitions *state)
kfree(state);
}
-static struct parsed_partitions *check_partition(struct gendisk *hd,
- struct block_device *bdev)
+static struct parsed_partitions *check_partition(struct gendisk *hd)
{
struct parsed_partitions *state;
int i, res, err;
@@ -136,7 +135,7 @@ static struct parsed_partitions *check_partition(struct gendisk *hd,
}
state->pp_buf[0] = '\0';
- state->bdev = bdev;
+ state->bdev = hd->part0;
disk_name(hd, 0, state->name);
snprintf(state->pp_buf, PAGE_SIZE, " %s:", state->name);
if (isdigit(state->name[strlen(state->name)-1]))
@@ -260,7 +259,8 @@ static const struct attribute_group *part_attr_groups[] = {
static void part_release(struct device *dev)
{
- blk_free_devt(dev->devt);
+ if (MAJOR(dev->devt) == BLOCK_EXT_MAJOR)
+ blk_free_ext_minor(MINOR(dev->devt));
bdput(dev_to_bdev(dev));
}
@@ -282,7 +282,7 @@ struct device_type part_type = {
};
/*
- * Must be called either with bd_mutex held, before a disk can be opened or
+ * Must be called either with open_mutex held, before a disk can be opened or
* after all disk users are gone.
*/
static void delete_partition(struct block_device *part)
@@ -311,7 +311,7 @@ static ssize_t whole_disk_show(struct device *dev,
static DEVICE_ATTR(whole_disk, 0444, whole_disk_show, NULL);
/*
- * Must be called either with bd_mutex held, before a disk can be opened or
+ * Must be called either with open_mutex held, before a disk can be opened or
* after all disk users are gone.
*/
static struct block_device *add_partition(struct gendisk *disk, int partno,
@@ -325,10 +325,8 @@ static struct block_device *add_partition(struct gendisk *disk, int partno,
const char *dname;
int err;
- /*
- * disk_max_parts() won't be zero, either GENHD_FL_EXT_DEVT is set
- * or 'minors' is passed to alloc_disk().
- */
+ lockdep_assert_held(&disk->open_mutex);
+
if (partno >= disk_max_parts(disk))
return ERR_PTR(-EINVAL);
@@ -379,9 +377,15 @@ static struct block_device *add_partition(struct gendisk *disk, int partno,
pdev->type = &part_type;
pdev->parent = ddev;
- err = blk_alloc_devt(bdev, &devt);
- if (err)
- goto out_put;
+ /* in consecutive minor range? */
+ if (bdev->bd_partno < disk->minors) {
+ devt = MKDEV(disk->major, disk->first_minor + bdev->bd_partno);
+ } else {
+ err = blk_alloc_ext_minor();
+ if (err < 0)
+ goto out_put;
+ devt = MKDEV(BLOCK_EXT_MAJOR, err);
+ }
pdev->devt = devt;
/* delay uevent until 'holders' subdir is created */
@@ -450,29 +454,27 @@ int bdev_add_partition(struct block_device *bdev, int partno,
{
struct block_device *part;
- mutex_lock(&bdev->bd_mutex);
+ mutex_lock(&bdev->bd_disk->open_mutex);
if (partition_overlaps(bdev->bd_disk, start, length, -1)) {
- mutex_unlock(&bdev->bd_mutex);
+ mutex_unlock(&bdev->bd_disk->open_mutex);
return -EBUSY;
}
part = add_partition(bdev->bd_disk, partno, start, length,
ADDPART_FLAG_NONE, NULL);
- mutex_unlock(&bdev->bd_mutex);
+ mutex_unlock(&bdev->bd_disk->open_mutex);
return PTR_ERR_OR_ZERO(part);
}
int bdev_del_partition(struct block_device *bdev, int partno)
{
- struct block_device *part;
- int ret;
+ struct block_device *part = NULL;
+ int ret = -ENXIO;
- part = bdget_disk(bdev->bd_disk, partno);
+ mutex_lock(&bdev->bd_disk->open_mutex);
+ part = xa_load(&bdev->bd_disk->part_tbl, partno);
if (!part)
- return -ENXIO;
-
- mutex_lock(&part->bd_mutex);
- mutex_lock_nested(&bdev->bd_mutex, 1);
+ goto out_unlock;
ret = -EBUSY;
if (part->bd_openers)
@@ -481,24 +483,21 @@ int bdev_del_partition(struct block_device *bdev, int partno)
delete_partition(part);
ret = 0;
out_unlock:
- mutex_unlock(&bdev->bd_mutex);
- mutex_unlock(&part->bd_mutex);
- bdput(part);
+ mutex_unlock(&bdev->bd_disk->open_mutex);
return ret;
}
int bdev_resize_partition(struct block_device *bdev, int partno,
sector_t start, sector_t length)
{
- struct block_device *part;
- int ret = 0;
+ struct block_device *part = NULL;
+ int ret = -ENXIO;
- part = bdget_disk(bdev->bd_disk, partno);
+ mutex_lock(&bdev->bd_disk->open_mutex);
+ part = xa_load(&bdev->bd_disk->part_tbl, partno);
if (!part)
- return -ENXIO;
+ goto out_unlock;
- mutex_lock(&part->bd_mutex);
- mutex_lock_nested(&bdev->bd_mutex, 1);
ret = -EINVAL;
if (start != part->bd_start_sect)
goto out_unlock;
@@ -511,9 +510,7 @@ int bdev_resize_partition(struct block_device *bdev, int partno,
ret = 0;
out_unlock:
- mutex_unlock(&part->bd_mutex);
- mutex_unlock(&bdev->bd_mutex);
- bdput(part);
+ mutex_unlock(&bdev->bd_disk->open_mutex);
return ret;
}
@@ -538,7 +535,7 @@ void blk_drop_partitions(struct gendisk *disk)
struct block_device *part;
unsigned long idx;
- lockdep_assert_held(&disk->part0->bd_mutex);
+ lockdep_assert_held(&disk->open_mutex);
xa_for_each_start(&disk->part_tbl, idx, part, 1) {
if (!bdgrab(part))
@@ -548,7 +545,7 @@ void blk_drop_partitions(struct gendisk *disk)
}
}
-static bool blk_add_partition(struct gendisk *disk, struct block_device *bdev,
+static bool blk_add_partition(struct gendisk *disk,
struct parsed_partitions *state, int p)
{
sector_t size = state->parts[p].size;
@@ -598,7 +595,7 @@ static bool blk_add_partition(struct gendisk *disk, struct block_device *bdev,
return true;
}
-int blk_add_partitions(struct gendisk *disk, struct block_device *bdev)
+static int blk_add_partitions(struct gendisk *disk)
{
struct parsed_partitions *state;
int ret = -EAGAIN, p;
@@ -606,7 +603,7 @@ int blk_add_partitions(struct gendisk *disk, struct block_device *bdev)
if (!disk_part_scan_enabled(disk))
return 0;
- state = check_partition(disk, bdev);
+ state = check_partition(disk);
if (!state)
return 0;
if (IS_ERR(state)) {
@@ -650,7 +647,7 @@ int blk_add_partitions(struct gendisk *disk, struct block_device *bdev)
kobject_uevent(&disk_to_dev(disk)->kobj, KOBJ_CHANGE);
for (p = 1; p < state->limit; p++)
- if (!blk_add_partition(disk, bdev, state, p))
+ if (!blk_add_partition(disk, state, p))
goto out_free_state;
ret = 0;
@@ -659,6 +656,58 @@ out_free_state:
return ret;
}
+int bdev_disk_changed(struct gendisk *disk, bool invalidate)
+{
+ int ret = 0;
+
+ lockdep_assert_held(&disk->open_mutex);
+
+ if (!(disk->flags & GENHD_FL_UP))
+ return -ENXIO;
+
+rescan:
+ if (disk->open_partitions)
+ return -EBUSY;
+ sync_blockdev(disk->part0);
+ invalidate_bdev(disk->part0);
+ blk_drop_partitions(disk);
+
+ clear_bit(GD_NEED_PART_SCAN, &disk->state);
+
+ /*
+ * Historically we only set the capacity to zero for devices that
+ * support partitions (independ of actually having partitions created).
+ * Doing that is rather inconsistent, but changing it broke legacy
+ * udisks polling for legacy ide-cdrom devices. Use the crude check
+ * below to get the sane behavior for most device while not breaking
+ * userspace for this particular setup.
+ */
+ if (invalidate) {
+ if (disk_part_scan_enabled(disk) ||
+ !(disk->flags & GENHD_FL_REMOVABLE))
+ set_capacity(disk, 0);
+ }
+
+ if (get_capacity(disk)) {
+ ret = blk_add_partitions(disk);
+ if (ret == -EAGAIN)
+ goto rescan;
+ } else if (invalidate) {
+ /*
+ * Tell userspace that the media / partition table may have
+ * changed.
+ */
+ kobject_uevent(&disk_to_dev(disk)->kobj, KOBJ_CHANGE);
+ }
+
+ return ret;
+}
+/*
+ * Only exported for loop and dasd for historic reasons. Don't use in new
+ * code!
+ */
+EXPORT_SYMBOL_GPL(bdev_disk_changed);
+
void *read_part_sector(struct parsed_partitions *state, sector_t n, Sector *p)
{
struct address_space *mapping = state->bdev->bd_inode->i_mapping;