summaryrefslogtreecommitdiff
path: root/block
diff options
context:
space:
mode:
Diffstat (limited to 'block')
-rw-r--r--block/bdev.c97
-rw-r--r--block/bio.c1
-rw-r--r--block/blk-core.c17
-rw-r--r--block/blk-mq.c2
-rw-r--r--block/blk-settings.c245
-rw-r--r--block/blk-zoned.c8
-rw-r--r--block/blk.h2
-rw-r--r--block/bsg-lib.c6
-rw-r--r--block/early-lookup.c2
-rw-r--r--block/fops.c4
-rw-r--r--block/genhd.c23
-rw-r--r--block/ioctl.c40
-rw-r--r--block/partitions/core.c20
-rw-r--r--block/partitions/ldm.c6
14 files changed, 138 insertions, 335 deletions
diff --git a/block/bdev.c b/block/bdev.c
index 2af3dca56f3d..353677ac49b3 100644
--- a/block/bdev.c
+++ b/block/bdev.c
@@ -43,6 +43,11 @@ static inline struct bdev_inode *BDEV_I(struct inode *inode)
return container_of(inode, struct bdev_inode, vfs_inode);
}
+static inline struct inode *BD_INODE(struct block_device *bdev)
+{
+ return &container_of(bdev, struct bdev_inode, bdev)->vfs_inode;
+}
+
struct block_device *I_BDEV(struct inode *inode)
{
return &BDEV_I(inode)->bdev;
@@ -57,7 +62,7 @@ EXPORT_SYMBOL(file_bdev);
static void bdev_write_inode(struct block_device *bdev)
{
- struct inode *inode = bdev->bd_inode;
+ struct inode *inode = BD_INODE(bdev);
int ret;
spin_lock(&inode->i_lock);
@@ -76,7 +81,7 @@ static void bdev_write_inode(struct block_device *bdev)
/* Kill _all_ buffers and pagecache , dirty or not.. */
static void kill_bdev(struct block_device *bdev)
{
- struct address_space *mapping = bdev->bd_inode->i_mapping;
+ struct address_space *mapping = bdev->bd_mapping;
if (mapping_empty(mapping))
return;
@@ -88,7 +93,7 @@ static void kill_bdev(struct block_device *bdev)
/* Invalidate clean unused buffers and pagecache. */
void invalidate_bdev(struct block_device *bdev)
{
- struct address_space *mapping = bdev->bd_inode->i_mapping;
+ struct address_space *mapping = bdev->bd_mapping;
if (mapping->nrpages) {
invalidate_bh_lrus();
@@ -116,7 +121,7 @@ int truncate_bdev_range(struct block_device *bdev, blk_mode_t mode,
goto invalidate;
}
- truncate_inode_pages_range(bdev->bd_inode->i_mapping, lstart, lend);
+ truncate_inode_pages_range(bdev->bd_mapping, lstart, lend);
if (!(mode & BLK_OPEN_EXCL))
bd_abort_claiming(bdev, truncate_bdev_range);
return 0;
@@ -126,7 +131,7 @@ invalidate:
* Someone else has handle exclusively open. Try invalidating instead.
* The 'end' argument is inclusive so the rounding is safe.
*/
- return invalidate_inode_pages2_range(bdev->bd_inode->i_mapping,
+ return invalidate_inode_pages2_range(bdev->bd_mapping,
lstart >> PAGE_SHIFT,
lend >> PAGE_SHIFT);
}
@@ -134,18 +139,21 @@ invalidate:
static void set_init_blocksize(struct block_device *bdev)
{
unsigned int bsize = bdev_logical_block_size(bdev);
- loff_t size = i_size_read(bdev->bd_inode);
+ loff_t size = i_size_read(BD_INODE(bdev));
while (bsize < PAGE_SIZE) {
if (size & bsize)
break;
bsize <<= 1;
}
- bdev->bd_inode->i_blkbits = blksize_bits(bsize);
+ BD_INODE(bdev)->i_blkbits = blksize_bits(bsize);
}
-int set_blocksize(struct block_device *bdev, int size)
+int set_blocksize(struct file *file, int size)
{
+ struct inode *inode = file->f_mapping->host;
+ struct block_device *bdev = I_BDEV(inode);
+
/* Size must be a power of two, and between 512 and PAGE_SIZE */
if (size > PAGE_SIZE || size < 512 || !is_power_of_2(size))
return -EINVAL;
@@ -154,10 +162,13 @@ int set_blocksize(struct block_device *bdev, int size)
if (size < bdev_logical_block_size(bdev))
return -EINVAL;
+ if (!file->private_data)
+ return -EINVAL;
+
/* Don't change the size if it is same as current */
- if (bdev->bd_inode->i_blkbits != blksize_bits(size)) {
+ if (inode->i_blkbits != blksize_bits(size)) {
sync_blockdev(bdev);
- bdev->bd_inode->i_blkbits = blksize_bits(size);
+ inode->i_blkbits = blksize_bits(size);
kill_bdev(bdev);
}
return 0;
@@ -167,7 +178,7 @@ EXPORT_SYMBOL(set_blocksize);
int sb_set_blocksize(struct super_block *sb, int size)
{
- if (set_blocksize(sb->s_bdev, size))
+ if (set_blocksize(sb->s_bdev_file, size))
return 0;
/* If we get here, we know size is power of two
* and it's value is between 512 and PAGE_SIZE */
@@ -192,7 +203,7 @@ int sync_blockdev_nowait(struct block_device *bdev)
{
if (!bdev)
return 0;
- return filemap_flush(bdev->bd_inode->i_mapping);
+ return filemap_flush(bdev->bd_mapping);
}
EXPORT_SYMBOL_GPL(sync_blockdev_nowait);
@@ -204,13 +215,13 @@ int sync_blockdev(struct block_device *bdev)
{
if (!bdev)
return 0;
- return filemap_write_and_wait(bdev->bd_inode->i_mapping);
+ return filemap_write_and_wait(bdev->bd_mapping);
}
EXPORT_SYMBOL(sync_blockdev);
int sync_blockdev_range(struct block_device *bdev, loff_t lstart, loff_t lend)
{
- return filemap_write_and_wait_range(bdev->bd_inode->i_mapping,
+ return filemap_write_and_wait_range(bdev->bd_mapping,
lstart, lend);
}
EXPORT_SYMBOL(sync_blockdev_range);
@@ -411,13 +422,11 @@ struct block_device *bdev_alloc(struct gendisk *disk, u8 partno)
mutex_init(&bdev->bd_fsfreeze_mutex);
spin_lock_init(&bdev->bd_size_lock);
mutex_init(&bdev->bd_holder_lock);
- bdev->bd_partno = partno;
- bdev->bd_inode = inode;
+ atomic_set(&bdev->__bd_flags, partno);
+ bdev->bd_mapping = &inode->i_data;
bdev->bd_queue = disk->queue;
- if (partno)
- bdev->bd_has_submit_bio = disk->part0->bd_has_submit_bio;
- else
- bdev->bd_has_submit_bio = false;
+ if (partno && bdev_test_flag(disk->part0, BD_HAS_SUBMIT_BIO))
+ bdev_set_flag(bdev, BD_HAS_SUBMIT_BIO);
bdev->bd_stats = alloc_percpu(struct disk_stats);
if (!bdev->bd_stats) {
iput(inode);
@@ -430,19 +439,30 @@ struct block_device *bdev_alloc(struct gendisk *disk, u8 partno)
void bdev_set_nr_sectors(struct block_device *bdev, sector_t sectors)
{
spin_lock(&bdev->bd_size_lock);
- i_size_write(bdev->bd_inode, (loff_t)sectors << SECTOR_SHIFT);
+ i_size_write(BD_INODE(bdev), (loff_t)sectors << SECTOR_SHIFT);
bdev->bd_nr_sectors = sectors;
spin_unlock(&bdev->bd_size_lock);
}
void bdev_add(struct block_device *bdev, dev_t dev)
{
+ struct inode *inode = BD_INODE(bdev);
if (bdev_stable_writes(bdev))
- mapping_set_stable_writes(bdev->bd_inode->i_mapping);
+ mapping_set_stable_writes(bdev->bd_mapping);
bdev->bd_dev = dev;
- bdev->bd_inode->i_rdev = dev;
- bdev->bd_inode->i_ino = dev;
- insert_inode_hash(bdev->bd_inode);
+ inode->i_rdev = dev;
+ inode->i_ino = dev;
+ insert_inode_hash(inode);
+}
+
+void bdev_unhash(struct block_device *bdev)
+{
+ remove_inode_hash(BD_INODE(bdev));
+}
+
+void bdev_drop(struct block_device *bdev)
+{
+ iput(BD_INODE(bdev));
}
long nr_blockdev_pages(void)
@@ -620,7 +640,7 @@ static void bd_end_claim(struct block_device *bdev, void *holder)
bdev->bd_holder = NULL;
bdev->bd_holder_ops = NULL;
mutex_unlock(&bdev->bd_holder_lock);
- if (bdev->bd_write_holder)
+ if (bdev_test_flag(bdev, BD_WRITE_HOLDER))
unblock = true;
}
if (!whole->bd_holders)
@@ -633,7 +653,7 @@ static void bd_end_claim(struct block_device *bdev, void *holder)
*/
if (unblock) {
disk_unblock_events(bdev->bd_disk);
- bdev->bd_write_holder = false;
+ bdev_clear_flag(bdev, BD_WRITE_HOLDER);
}
}
@@ -900,9 +920,10 @@ int bdev_open(struct block_device *bdev, blk_mode_t mode, void *holder,
* writeable reference is too fragile given the way @mode is
* used in blkdev_get/put().
*/
- if ((mode & BLK_OPEN_WRITE) && !bdev->bd_write_holder &&
+ if ((mode & BLK_OPEN_WRITE) &&
+ !bdev_test_flag(bdev, BD_WRITE_HOLDER) &&
(disk->event_flags & DISK_EVENT_FLAG_BLOCK_ON_EXCL_WRITE)) {
- bdev->bd_write_holder = true;
+ bdev_set_flag(bdev, BD_WRITE_HOLDER);
unblock_events = false;
}
}
@@ -917,7 +938,7 @@ int bdev_open(struct block_device *bdev, blk_mode_t mode, void *holder,
bdev_file->f_mode |= FMODE_NOWAIT;
if (mode & BLK_OPEN_RESTRICT_WRITES)
bdev_file->f_mode |= FMODE_WRITE_RESTRICTED;
- bdev_file->f_mapping = bdev->bd_inode->i_mapping;
+ bdev_file->f_mapping = bdev->bd_mapping;
bdev_file->f_wb_err = filemap_sample_wb_err(bdev_file->f_mapping);
bdev_file->private_data = holder;
@@ -979,13 +1000,13 @@ struct file *bdev_file_open_by_dev(dev_t dev, blk_mode_t mode, void *holder,
return ERR_PTR(-ENXIO);
flags = blk_to_file_flags(mode);
- bdev_file = alloc_file_pseudo_noaccount(bdev->bd_inode,
+ bdev_file = alloc_file_pseudo_noaccount(BD_INODE(bdev),
blockdev_mnt, "", flags | O_LARGEFILE, &def_blk_fops);
if (IS_ERR(bdev_file)) {
blkdev_put_no_open(bdev);
return bdev_file;
}
- ihold(bdev->bd_inode);
+ ihold(BD_INODE(bdev));
ret = bdev_open(bdev, mode, holder, hops, bdev_file);
if (ret) {
@@ -1260,6 +1281,18 @@ void bdev_statx_dioalign(struct inode *inode, struct kstat *stat)
blkdev_put_no_open(bdev);
}
+bool disk_live(struct gendisk *disk)
+{
+ return !inode_unhashed(BD_INODE(disk->part0));
+}
+EXPORT_SYMBOL_GPL(disk_live);
+
+unsigned int block_size(struct block_device *bdev)
+{
+ return 1 << BD_INODE(bdev)->i_blkbits;
+}
+EXPORT_SYMBOL_GPL(block_size);
+
static int __init setup_bdev_allow_write_mounted(char *str)
{
if (kstrtobool(str, &bdev_allow_write_mounted))
diff --git a/block/bio.c b/block/bio.c
index 53f608028c78..e9e809a63c59 100644
--- a/block/bio.c
+++ b/block/bio.c
@@ -1136,6 +1136,7 @@ void bio_add_folio_nofail(struct bio *bio, struct folio *folio, size_t len,
WARN_ON_ONCE(off > UINT_MAX);
__bio_add_page(bio, &folio->page, len, off);
}
+EXPORT_SYMBOL_GPL(bio_add_folio_nofail);
/**
* bio_add_folio - Attempt to add part of a folio to a bio.
diff --git a/block/blk-core.c b/block/blk-core.c
index dd29d5465af6..82c3ae22d76d 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -496,7 +496,8 @@ __setup("fail_make_request=", setup_fail_make_request);
bool should_fail_request(struct block_device *part, unsigned int bytes)
{
- return part->bd_make_it_fail && should_fail(&fail_make_request, bytes);
+ return bdev_test_flag(part, BD_MAKE_IT_FAIL) &&
+ should_fail(&fail_make_request, bytes);
}
static int __init fail_make_request_debugfs(void)
@@ -516,10 +517,11 @@ static inline void bio_check_ro(struct bio *bio)
if (op_is_flush(bio->bi_opf) && !bio_sectors(bio))
return;
- if (bio->bi_bdev->bd_ro_warned)
+ if (bdev_test_flag(bio->bi_bdev, BD_RO_WARNED))
return;
- bio->bi_bdev->bd_ro_warned = true;
+ bdev_set_flag(bio->bi_bdev, BD_RO_WARNED);
+
/*
* Use ioctl to set underlying disk of raid/dm to read-only
* will trigger this.
@@ -621,7 +623,7 @@ static void __submit_bio(struct bio *bio)
blk_start_plug(&plug);
- if (!bio->bi_bdev->bd_has_submit_bio) {
+ if (!bdev_test_flag(bio->bi_bdev, BD_HAS_SUBMIT_BIO)) {
blk_mq_submit_bio(bio);
} else if (likely(bio_queue_enter(bio) == 0)) {
struct gendisk *disk = bio->bi_bdev->bd_disk;
@@ -731,7 +733,7 @@ void submit_bio_noacct_nocheck(struct bio *bio)
*/
if (current->bio_list)
bio_list_add(&current->bio_list[0], bio);
- else if (!bio->bi_bdev->bd_has_submit_bio)
+ else if (!bdev_test_flag(bio->bi_bdev, BD_HAS_SUBMIT_BIO))
__submit_bio_noacct_mq(bio);
else
__submit_bio_noacct(bio);
@@ -767,7 +769,8 @@ void submit_bio_noacct(struct bio *bio)
if (!bio_flagged(bio, BIO_REMAPPED)) {
if (unlikely(bio_check_eod(bio)))
goto end_io;
- if (bdev->bd_partno && unlikely(blk_partition_remap(bio)))
+ if (bdev_is_partition(bdev) &&
+ unlikely(blk_partition_remap(bio)))
goto end_io;
}
@@ -992,7 +995,7 @@ again:
(end || part_in_flight(part)))
__part_stat_add(part, io_ticks, now - stamp);
- if (part->bd_partno) {
+ if (bdev_is_partition(part)) {
part = bdev_whole(part);
goto again;
}
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 579921a2f1c6..3b4df8e5ac9e 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -93,7 +93,7 @@ static bool blk_mq_check_inflight(struct request *rq, void *priv)
struct mq_inflight *mi = priv;
if (rq->part && blk_do_io_stat(rq) &&
- (!mi->part->bd_partno || rq->part == mi->part) &&
+ (!bdev_is_partition(mi->part) || rq->part == mi->part) &&
blk_mq_rq_state(rq) == MQ_RQ_IN_FLIGHT)
mi->inflight[rq_data_dir(rq)]++;
diff --git a/block/blk-settings.c b/block/blk-settings.c
index ebba05a2bc7f..a7fe8e90240a 100644
--- a/block/blk-settings.c
+++ b/block/blk-settings.c
@@ -283,72 +283,6 @@ int queue_limits_set(struct request_queue *q, struct queue_limits *lim)
EXPORT_SYMBOL_GPL(queue_limits_set);
/**
- * blk_queue_bounce_limit - set bounce buffer limit for queue
- * @q: the request queue for the device
- * @bounce: bounce limit to enforce
- *
- * Description:
- * Force bouncing for ISA DMA ranges or highmem.
- *
- * DEPRECATED, don't use in new code.
- **/
-void blk_queue_bounce_limit(struct request_queue *q, enum blk_bounce bounce)
-{
- q->limits.bounce = bounce;
-}
-EXPORT_SYMBOL(blk_queue_bounce_limit);
-
-/**
- * blk_queue_max_hw_sectors - set max sectors for a request for this queue
- * @q: the request queue for the device
- * @max_hw_sectors: max hardware sectors in the usual 512b unit
- *
- * Description:
- * Enables a low level driver to set a hard upper limit,
- * max_hw_sectors, on the size of requests. max_hw_sectors is set by
- * the device driver based upon the capabilities of the I/O
- * controller.
- *
- * max_dev_sectors is a hard limit imposed by the storage device for
- * READ/WRITE requests. It is set by the disk driver.
- *
- * max_sectors is a soft limit imposed by the block layer for
- * filesystem type requests. This value can be overridden on a
- * per-device basis in /sys/block/<device>/queue/max_sectors_kb.
- * The soft limit can not exceed max_hw_sectors.
- **/
-void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_hw_sectors)
-{
- struct queue_limits *limits = &q->limits;
- unsigned int max_sectors;
-
- if ((max_hw_sectors << 9) < PAGE_SIZE) {
- max_hw_sectors = 1 << (PAGE_SHIFT - 9);
- pr_info("%s: set to minimum %u\n", __func__, max_hw_sectors);
- }
-
- max_hw_sectors = round_down(max_hw_sectors,
- limits->logical_block_size >> SECTOR_SHIFT);
- limits->max_hw_sectors = max_hw_sectors;
-
- max_sectors = min_not_zero(max_hw_sectors, limits->max_dev_sectors);
-
- if (limits->max_user_sectors)
- max_sectors = min(max_sectors, limits->max_user_sectors);
- else
- max_sectors = min(max_sectors, BLK_DEF_MAX_SECTORS_CAP);
-
- max_sectors = round_down(max_sectors,
- limits->logical_block_size >> SECTOR_SHIFT);
- limits->max_sectors = max_sectors;
-
- if (!q->disk)
- return;
- q->disk->bdi->io_pages = max_sectors >> (PAGE_SHIFT - 9);
-}
-EXPORT_SYMBOL(blk_queue_max_hw_sectors);
-
-/**
* blk_queue_chunk_sectors - set size of the chunk for this queue
* @q: the request queue for the device
* @chunk_sectors: chunk sectors in the usual 512b unit
@@ -443,65 +377,6 @@ void blk_queue_max_zone_append_sectors(struct request_queue *q,
EXPORT_SYMBOL_GPL(blk_queue_max_zone_append_sectors);
/**
- * blk_queue_max_segments - set max hw segments for a request for this queue
- * @q: the request queue for the device
- * @max_segments: max number of segments
- *
- * Description:
- * Enables a low level driver to set an upper limit on the number of
- * hw data segments in a request.
- **/
-void blk_queue_max_segments(struct request_queue *q, unsigned short max_segments)
-{
- if (!max_segments) {
- max_segments = 1;
- pr_info("%s: set to minimum %u\n", __func__, max_segments);
- }
-
- q->limits.max_segments = max_segments;
-}
-EXPORT_SYMBOL(blk_queue_max_segments);
-
-/**
- * blk_queue_max_discard_segments - set max segments for discard requests
- * @q: the request queue for the device
- * @max_segments: max number of segments
- *
- * Description:
- * Enables a low level driver to set an upper limit on the number of
- * segments in a discard request.
- **/
-void blk_queue_max_discard_segments(struct request_queue *q,
- unsigned short max_segments)
-{
- q->limits.max_discard_segments = max_segments;
-}
-EXPORT_SYMBOL_GPL(blk_queue_max_discard_segments);
-
-/**
- * blk_queue_max_segment_size - set max segment size for blk_rq_map_sg
- * @q: the request queue for the device
- * @max_size: max size of segment in bytes
- *
- * Description:
- * Enables a low level driver to set an upper limit on the size of a
- * coalesced segment
- **/
-void blk_queue_max_segment_size(struct request_queue *q, unsigned int max_size)
-{
- if (max_size < PAGE_SIZE) {
- max_size = PAGE_SIZE;
- pr_info("%s: set to minimum %u\n", __func__, max_size);
- }
-
- /* see blk_queue_virt_boundary() for the explanation */
- WARN_ON_ONCE(q->limits.virt_boundary_mask);
-
- q->limits.max_segment_size = max_size;
-}
-EXPORT_SYMBOL(blk_queue_max_segment_size);
-
-/**
* blk_queue_logical_block_size - set logical block size for the queue
* @q: the request queue for the device
* @size: the logical block size, in bytes
@@ -667,29 +542,6 @@ void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt)
}
EXPORT_SYMBOL(blk_limits_io_opt);
-/**
- * blk_queue_io_opt - set optimal request size for the queue
- * @q: the request queue for the device
- * @opt: optimal request size in bytes
- *
- * Description:
- * Storage devices may report an optimal I/O size, which is the
- * device's preferred unit for sustained I/O. This is rarely reported
- * for disk drives. For RAID arrays it is usually the stripe width or
- * the internal track size. A properly aligned multiple of
- * optimal_io_size is the preferred request size for workloads where
- * sustained throughput is desired.
- */
-void blk_queue_io_opt(struct request_queue *q, unsigned int opt)
-{
- blk_limits_io_opt(&q->limits, opt);
- if (!q->disk)
- return;
- q->disk->bdi->ra_pages =
- max(queue_io_opt(q) * 2 / PAGE_SIZE, VM_READAHEAD_PAGES);
-}
-EXPORT_SYMBOL(blk_queue_io_opt);
-
static int queue_limit_alignment_offset(const struct queue_limits *lim,
sector_t sector)
{
@@ -940,81 +792,6 @@ void blk_queue_update_dma_pad(struct request_queue *q, unsigned int mask)
EXPORT_SYMBOL(blk_queue_update_dma_pad);
/**
- * blk_queue_segment_boundary - set boundary rules for segment merging
- * @q: the request queue for the device
- * @mask: the memory boundary mask
- **/
-void blk_queue_segment_boundary(struct request_queue *q, unsigned long mask)
-{
- if (mask < PAGE_SIZE - 1) {
- mask = PAGE_SIZE - 1;
- pr_info("%s: set to minimum %lx\n", __func__, mask);
- }
-
- q->limits.seg_boundary_mask = mask;
-}
-EXPORT_SYMBOL(blk_queue_segment_boundary);
-
-/**
- * blk_queue_virt_boundary - set boundary rules for bio merging
- * @q: the request queue for the device
- * @mask: the memory boundary mask
- **/
-void blk_queue_virt_boundary(struct request_queue *q, unsigned long mask)
-{
- q->limits.virt_boundary_mask = mask;
-
- /*
- * Devices that require a virtual boundary do not support scatter/gather
- * I/O natively, but instead require a descriptor list entry for each
- * page (which might not be idential to the Linux PAGE_SIZE). Because
- * of that they are not limited by our notion of "segment size".
- */
- if (mask)
- q->limits.max_segment_size = UINT_MAX;
-}
-EXPORT_SYMBOL(blk_queue_virt_boundary);
-
-/**
- * blk_queue_dma_alignment - set dma length and memory alignment
- * @q: the request queue for the device
- * @mask: alignment mask
- *
- * description:
- * set required memory and length alignment for direct dma transactions.
- * this is used when building direct io requests for the queue.
- *
- **/
-void blk_queue_dma_alignment(struct request_queue *q, int mask)
-{
- q->limits.dma_alignment = mask;
-}
-EXPORT_SYMBOL(blk_queue_dma_alignment);
-
-/**
- * blk_queue_update_dma_alignment - update dma length and memory alignment
- * @q: the request queue for the device
- * @mask: alignment mask
- *
- * description:
- * update required memory and length alignment for direct dma transactions.
- * If the requested alignment is larger than the current alignment, then
- * the current queue alignment is updated to the new value, otherwise it
- * is left alone. The design of this is to allow multiple objects
- * (driver, device, transport etc) to set their respective
- * alignments without having them interfere.
- *
- **/
-void blk_queue_update_dma_alignment(struct request_queue *q, int mask)
-{
- BUG_ON(mask > PAGE_SIZE);
-
- if (mask > q->limits.dma_alignment)
- q->limits.dma_alignment = mask;
-}
-EXPORT_SYMBOL(blk_queue_update_dma_alignment);
-
-/**
* blk_set_queue_depth - tell the block layer about the device queue depth
* @q: the request queue for the device
* @depth: queue depth
@@ -1052,28 +829,6 @@ void blk_queue_write_cache(struct request_queue *q, bool wc, bool fua)
EXPORT_SYMBOL_GPL(blk_queue_write_cache);
/**
- * blk_queue_can_use_dma_map_merging - configure queue for merging segments.
- * @q: the request queue for the device
- * @dev: the device pointer for dma
- *
- * Tell the block layer about merging the segments by dma map of @q.
- */
-bool blk_queue_can_use_dma_map_merging(struct request_queue *q,
- struct device *dev)
-{
- unsigned long boundary = dma_get_merge_boundary(dev);
-
- if (!boundary)
- return false;
-
- /* No need to update max_segment_size. see blk_queue_virt_boundary() */
- blk_queue_virt_boundary(q, boundary);
-
- return true;
-}
-EXPORT_SYMBOL_GPL(blk_queue_can_use_dma_map_merging);
-
-/**
* disk_set_zoned - inidicate a zoned device
* @disk: gendisk to configure
*/
diff --git a/block/blk-zoned.c b/block/blk-zoned.c
index 48e5e3bbb89c..03aa4eead39e 100644
--- a/block/blk-zoned.c
+++ b/block/blk-zoned.c
@@ -416,7 +416,7 @@ int blkdev_zone_mgmt_ioctl(struct block_device *bdev, blk_mode_t mode,
op = REQ_OP_ZONE_RESET;
/* Invalidate the page cache, including dirty pages. */
- filemap_invalidate_lock(bdev->bd_inode->i_mapping);
+ filemap_invalidate_lock(bdev->bd_mapping);
ret = blkdev_truncate_zone_range(bdev, mode, &zrange);
if (ret)
goto fail;
@@ -438,7 +438,7 @@ int blkdev_zone_mgmt_ioctl(struct block_device *bdev, blk_mode_t mode,
fail:
if (cmd == BLKRESETZONE)
- filemap_invalidate_unlock(bdev->bd_inode->i_mapping);
+ filemap_invalidate_unlock(bdev->bd_mapping);
return ret;
}
@@ -1257,7 +1257,7 @@ void blk_zone_write_plug_bio_endio(struct bio *bio)
* is not called. So we need to schedule execution of the next
* plugged BIO here.
*/
- if (bio->bi_bdev->bd_has_submit_bio)
+ if (bdev_test_flag(bio->bi_bdev, BD_HAS_SUBMIT_BIO))
disk_zone_wplug_unplug_bio(disk, zwplug);
/* Drop the reference we took when entering this function. */
@@ -1326,7 +1326,7 @@ static void blk_zone_wplug_bio_work(struct work_struct *work)
* path for BIO-based devices will not do that. So drop this extra
* reference here.
*/
- if (bdev->bd_has_submit_bio)
+ if (bdev_test_flag(bdev, BD_HAS_SUBMIT_BIO))
blk_queue_exit(bdev->bd_disk->queue);
put_zwplug:
diff --git a/block/blk.h b/block/blk.h
index 6e94c10af798..189bc25beb50 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -499,6 +499,8 @@ static inline int blkdev_zone_mgmt_ioctl(struct block_device *bdev,
struct block_device *bdev_alloc(struct gendisk *disk, u8 partno);
void bdev_add(struct block_device *bdev, dev_t dev);
+void bdev_unhash(struct block_device *bdev);
+void bdev_drop(struct block_device *bdev);
int blk_alloc_ext_minor(void);
void blk_free_ext_minor(unsigned int minor);
diff --git a/block/bsg-lib.c b/block/bsg-lib.c
index bcc7dee6abce..ee738d129a9f 100644
--- a/block/bsg-lib.c
+++ b/block/bsg-lib.c
@@ -354,12 +354,14 @@ static const struct blk_mq_ops bsg_mq_ops = {
* bsg_setup_queue - Create and add the bsg hooks so we can receive requests
* @dev: device to attach bsg device to
* @name: device to give bsg device
+ * @lim: queue limits for the bsg queue
* @job_fn: bsg job handler
* @timeout: timeout handler function pointer
* @dd_job_size: size of LLD data needed for each job
*/
struct request_queue *bsg_setup_queue(struct device *dev, const char *name,
- bsg_job_fn *job_fn, bsg_timeout_fn *timeout, int dd_job_size)
+ struct queue_limits *lim, bsg_job_fn *job_fn,
+ bsg_timeout_fn *timeout, int dd_job_size)
{
struct bsg_set *bset;
struct blk_mq_tag_set *set;
@@ -383,7 +385,7 @@ struct request_queue *bsg_setup_queue(struct device *dev, const char *name,
if (blk_mq_alloc_tag_set(set))
goto out_tag_set;
- q = blk_mq_alloc_queue(set, NULL, NULL);
+ q = blk_mq_alloc_queue(set, lim, NULL);
if (IS_ERR(q)) {
ret = PTR_ERR(q);
goto out_queue;
diff --git a/block/early-lookup.c b/block/early-lookup.c
index 3effbd0d35e9..3fb57f7d2b12 100644
--- a/block/early-lookup.c
+++ b/block/early-lookup.c
@@ -78,7 +78,7 @@ static int __init devt_from_partuuid(const char *uuid_str, dev_t *devt)
* to the partition number found by UUID.
*/
*devt = part_devt(dev_to_disk(dev),
- dev_to_bdev(dev)->bd_partno + offset);
+ bdev_partno(dev_to_bdev(dev)) + offset);
} else {
*devt = dev->devt;
}
diff --git a/block/fops.c b/block/fops.c
index 7a163f7fe2d8..376265935714 100644
--- a/block/fops.c
+++ b/block/fops.c
@@ -663,8 +663,8 @@ static ssize_t blkdev_buffered_write(struct kiocb *iocb, struct iov_iter *from)
static ssize_t blkdev_write_iter(struct kiocb *iocb, struct iov_iter *from)
{
struct file *file = iocb->ki_filp;
- struct block_device *bdev = I_BDEV(file->f_mapping->host);
- struct inode *bd_inode = bdev->bd_inode;
+ struct inode *bd_inode = bdev_file_inode(file);
+ struct block_device *bdev = I_BDEV(bd_inode);
loff_t size = bdev_nr_bytes(bdev);
size_t shorted = 0;
ssize_t ret;
diff --git a/block/genhd.c b/block/genhd.c
index 7f39fbe60753..8f1f3c6b4d67 100644
--- a/block/genhd.c
+++ b/block/genhd.c
@@ -411,7 +411,8 @@ int __must_check device_add_disk(struct device *parent, struct gendisk *disk,
elevator_init_mq(disk->queue);
/* Mark bdev as having a submit_bio, if needed */
- disk->part0->bd_has_submit_bio = disk->fops->submit_bio != NULL;
+ if (disk->fops->submit_bio)
+ bdev_set_flag(disk->part0, BD_HAS_SUBMIT_BIO);
/*
* If the driver provides an explicit major number it also must provide
@@ -653,7 +654,7 @@ void del_gendisk(struct gendisk *disk)
*/
mutex_lock(&disk->open_mutex);
xa_for_each(&disk->part_tbl, idx, part)
- remove_inode_hash(part->bd_inode);
+ bdev_unhash(part);
mutex_unlock(&disk->open_mutex);
/*
@@ -742,7 +743,7 @@ void invalidate_disk(struct gendisk *disk)
struct block_device *bdev = disk->part0;
invalidate_bdev(bdev);
- bdev->bd_inode->i_mapping->wb_err = 0;
+ bdev->bd_mapping->wb_err = 0;
set_capacity(disk, 0);
}
EXPORT_SYMBOL(invalidate_disk);
@@ -1064,7 +1065,8 @@ static DEVICE_ATTR(partscan, 0444, partscan_show, NULL);
ssize_t part_fail_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- return sprintf(buf, "%d\n", dev_to_bdev(dev)->bd_make_it_fail);
+ return sprintf(buf, "%d\n",
+ bdev_test_flag(dev_to_bdev(dev), BD_MAKE_IT_FAIL));
}
ssize_t part_fail_store(struct device *dev,
@@ -1073,9 +1075,12 @@ ssize_t part_fail_store(struct device *dev,
{
int i;
- if (count > 0 && sscanf(buf, "%d", &i) > 0)
- dev_to_bdev(dev)->bd_make_it_fail = i;
-
+ if (count > 0 && sscanf(buf, "%d", &i) > 0) {
+ if (i)
+ bdev_set_flag(dev_to_bdev(dev), BD_MAKE_IT_FAIL);
+ else
+ bdev_clear_flag(dev_to_bdev(dev), BD_MAKE_IT_FAIL);
+ }
return count;
}
@@ -1191,7 +1196,7 @@ static void disk_release(struct device *dev)
if (test_bit(GD_ADDED, &disk->state) && disk->fops->free_disk)
disk->fops->free_disk(disk);
- iput(disk->part0->bd_inode); /* frees the disk */
+ bdev_drop(disk->part0); /* frees the disk */
}
static int block_uevent(const struct device *dev, struct kobj_uevent_env *env)
@@ -1379,7 +1384,7 @@ out_erase_part0:
out_destroy_part_tbl:
xa_destroy(&disk->part_tbl);
disk->part0->bd_disk = NULL;
- iput(disk->part0->bd_inode);
+ bdev_drop(disk->part0);
out_free_bdi:
bdi_put(disk->bdi);
out_free_bioset:
diff --git a/block/ioctl.c b/block/ioctl.c
index c7db3bd2d653..d570e1695896 100644
--- a/block/ioctl.c
+++ b/block/ioctl.c
@@ -96,7 +96,6 @@ static int blk_ioctl_discard(struct block_device *bdev, blk_mode_t mode,
unsigned long arg)
{
unsigned int bs_mask = bdev_logical_block_size(bdev) - 1;
- struct inode *inode = bdev->bd_inode;
uint64_t range[2], start, len, end;
struct bio *prev = NULL, *bio;
sector_t sector, nr_sects;
@@ -126,7 +125,7 @@ static int blk_ioctl_discard(struct block_device *bdev, blk_mode_t mode,
end > bdev_nr_bytes(bdev))
return -EINVAL;
- filemap_invalidate_lock(inode->i_mapping);
+ filemap_invalidate_lock(bdev->bd_mapping);
err = truncate_bdev_range(bdev, mode, start, start + len - 1);
if (err)
goto fail;
@@ -157,7 +156,7 @@ static int blk_ioctl_discard(struct block_device *bdev, blk_mode_t mode,
out_unplug:
blk_finish_plug(&plug);
fail:
- filemap_invalidate_unlock(inode->i_mapping);
+ filemap_invalidate_unlock(bdev->bd_mapping);
return err;
}
@@ -182,12 +181,12 @@ static int blk_ioctl_secure_erase(struct block_device *bdev, blk_mode_t mode,
if (start + len > bdev_nr_bytes(bdev))
return -EINVAL;
- filemap_invalidate_lock(bdev->bd_inode->i_mapping);
+ filemap_invalidate_lock(bdev->bd_mapping);
err = truncate_bdev_range(bdev, mode, start, start + len - 1);
if (!err)
err = blkdev_issue_secure_erase(bdev, start >> 9, len >> 9,
GFP_KERNEL);
- filemap_invalidate_unlock(bdev->bd_inode->i_mapping);
+ filemap_invalidate_unlock(bdev->bd_mapping);
return err;
}
@@ -197,7 +196,6 @@ static int blk_ioctl_zeroout(struct block_device *bdev, blk_mode_t mode,
{
uint64_t range[2];
uint64_t start, end, len;
- struct inode *inode = bdev->bd_inode;
int err;
if (!(mode & BLK_OPEN_WRITE))
@@ -220,7 +218,7 @@ static int blk_ioctl_zeroout(struct block_device *bdev, blk_mode_t mode,
return -EINVAL;
/* Invalidate the page cache, including dirty pages */
- filemap_invalidate_lock(inode->i_mapping);
+ filemap_invalidate_lock(bdev->bd_mapping);
err = truncate_bdev_range(bdev, mode, start, end);
if (err)
goto fail;
@@ -229,7 +227,7 @@ static int blk_ioctl_zeroout(struct block_device *bdev, blk_mode_t mode,
BLKDEV_ZERO_NOUNMAP);
fail:
- filemap_invalidate_unlock(inode->i_mapping);
+ filemap_invalidate_unlock(bdev->bd_mapping);
return err;
}
@@ -433,7 +431,10 @@ static int blkdev_roset(struct block_device *bdev, unsigned cmd,
if (ret)
return ret;
}
- bdev->bd_read_only = n;
+ if (n)
+ bdev_set_flag(bdev, BD_READ_ONLY);
+ else
+ bdev_clear_flag(bdev, BD_READ_ONLY);
return 0;
}
@@ -503,11 +504,14 @@ static int compat_hdio_getgeo(struct block_device *bdev,
#endif
/* set the logical block size */
-static int blkdev_bszset(struct block_device *bdev, blk_mode_t mode,
+static int blkdev_bszset(struct file *file, blk_mode_t mode,
int __user *argp)
{
+ // this one might be file_inode(file)->i_rdev - a rare valid
+ // use of file_inode() for those.
+ dev_t dev = I_BDEV(file->f_mapping->host)->bd_dev;
+ struct file *excl_file;
int ret, n;
- struct file *file;
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
@@ -517,13 +521,13 @@ static int blkdev_bszset(struct block_device *bdev, blk_mode_t mode,
return -EFAULT;
if (mode & BLK_OPEN_EXCL)
- return set_blocksize(bdev, n);
+ return set_blocksize(file, n);
- file = bdev_file_open_by_dev(bdev->bd_dev, mode, &bdev, NULL);
- if (IS_ERR(file))
+ excl_file = bdev_file_open_by_dev(dev, mode, &dev, NULL);
+ if (IS_ERR(excl_file))
return -EBUSY;
- ret = set_blocksize(bdev, n);
- fput(file);
+ ret = set_blocksize(excl_file, n);
+ fput(excl_file);
return ret;
}
@@ -652,7 +656,7 @@ long blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg)
case BLKBSZGET: /* get block device soft block size (cf. BLKSSZGET) */
return put_int(argp, block_size(bdev));
case BLKBSZSET:
- return blkdev_bszset(bdev, mode, argp);
+ return blkdev_bszset(file, mode, argp);
case BLKGETSIZE64:
return put_u64(argp, bdev_nr_bytes(bdev));
@@ -712,7 +716,7 @@ long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg)
case BLKBSZGET_32: /* get the logical block size (cf. BLKSSZGET) */
return put_int(argp, bdev_logical_block_size(bdev));
case BLKBSZSET_32:
- return blkdev_bszset(bdev, mode, argp);
+ return blkdev_bszset(file, mode, argp);
case BLKGETSIZE64_32:
return put_u64(argp, bdev_nr_bytes(bdev));
diff --git a/block/partitions/core.c b/block/partitions/core.c
index 37b5f92d07fe..ab76e64f0f6c 100644
--- a/block/partitions/core.c
+++ b/block/partitions/core.c
@@ -173,7 +173,7 @@ static struct parsed_partitions *check_partition(struct gendisk *hd)
static ssize_t part_partition_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- return sprintf(buf, "%d\n", dev_to_bdev(dev)->bd_partno);
+ return sprintf(buf, "%d\n", bdev_partno(dev_to_bdev(dev)));
}
static ssize_t part_start_show(struct device *dev,
@@ -243,14 +243,14 @@ static const struct attribute_group *part_attr_groups[] = {
static void part_release(struct device *dev)
{
put_disk(dev_to_bdev(dev)->bd_disk);
- iput(dev_to_bdev(dev)->bd_inode);
+ bdev_drop(dev_to_bdev(dev));
}
static int part_uevent(const struct device *dev, struct kobj_uevent_env *env)
{
const struct block_device *part = dev_to_bdev(dev);
- add_uevent_var(env, "PARTN=%u", part->bd_partno);
+ add_uevent_var(env, "PARTN=%u", bdev_partno(part));
if (part->bd_meta_info && part->bd_meta_info->volname[0])
add_uevent_var(env, "PARTNAME=%s", part->bd_meta_info->volname);
return 0;
@@ -267,7 +267,7 @@ void drop_partition(struct block_device *part)
{
lockdep_assert_held(&part->bd_disk->open_mutex);
- xa_erase(&part->bd_disk->part_tbl, part->bd_partno);
+ xa_erase(&part->bd_disk->part_tbl, bdev_partno(part));
kobject_put(part->bd_holder_dir);
device_del(&part->bd_device);
@@ -338,8 +338,8 @@ static struct block_device *add_partition(struct gendisk *disk, int partno,
pdev->parent = ddev;
/* in consecutive minor range? */
- if (bdev->bd_partno < disk->minors) {
- devt = MKDEV(disk->major, disk->first_minor + bdev->bd_partno);
+ if (bdev_partno(bdev) < disk->minors) {
+ devt = MKDEV(disk->major, disk->first_minor + bdev_partno(bdev));
} else {
err = blk_alloc_ext_minor();
if (err < 0)
@@ -404,7 +404,7 @@ static bool partition_overlaps(struct gendisk *disk, sector_t start,
rcu_read_lock();
xa_for_each_start(&disk->part_tbl, idx, part, 1) {
- if (part->bd_partno != skip_partno &&
+ if (bdev_partno(part) != skip_partno &&
start < part->bd_start_sect + bdev_nr_sectors(part) &&
start + length > part->bd_start_sect) {
overlap = true;
@@ -469,7 +469,7 @@ int bdev_del_partition(struct gendisk *disk, int partno)
* Just delete the partition and invalidate it.
*/
- remove_inode_hash(part->bd_inode);
+ bdev_unhash(part);
invalidate_bdev(part);
drop_partition(part);
ret = 0;
@@ -652,7 +652,7 @@ rescan:
* it cannot be looked up any more even when openers
* still hold references.
*/
- remove_inode_hash(part->bd_inode);
+ bdev_unhash(part);
/*
* If @disk->open_partitions isn't elevated but there's
@@ -701,7 +701,7 @@ EXPORT_SYMBOL_GPL(bdev_disk_changed);
void *read_part_sector(struct parsed_partitions *state, sector_t n, Sector *p)
{
- struct address_space *mapping = state->disk->part0->bd_inode->i_mapping;
+ struct address_space *mapping = state->disk->part0->bd_mapping;
struct folio *folio;
if (n >= get_capacity(state->disk)) {
diff --git a/block/partitions/ldm.c b/block/partitions/ldm.c
index 38e58960ae03..2bd42fedb907 100644
--- a/block/partitions/ldm.c
+++ b/block/partitions/ldm.c
@@ -131,8 +131,7 @@ static bool ldm_parse_tocblock (const u8 *data, struct tocblock *toc)
ldm_crit ("Cannot find TOCBLOCK, database may be corrupt.");
return false;
}
- strncpy (toc->bitmap1_name, data + 0x24, sizeof (toc->bitmap1_name));
- toc->bitmap1_name[sizeof (toc->bitmap1_name) - 1] = 0;
+ strscpy_pad(toc->bitmap1_name, data + 0x24, sizeof(toc->bitmap1_name));
toc->bitmap1_start = get_unaligned_be64(data + 0x2E);
toc->bitmap1_size = get_unaligned_be64(data + 0x36);
@@ -142,8 +141,7 @@ static bool ldm_parse_tocblock (const u8 *data, struct tocblock *toc)
TOC_BITMAP1, toc->bitmap1_name);
return false;
}
- strncpy (toc->bitmap2_name, data + 0x46, sizeof (toc->bitmap2_name));
- toc->bitmap2_name[sizeof (toc->bitmap2_name) - 1] = 0;
+ strscpy_pad(toc->bitmap2_name, data + 0x46, sizeof(toc->bitmap2_name));
toc->bitmap2_start = get_unaligned_be64(data + 0x50);
toc->bitmap2_size = get_unaligned_be64(data + 0x58);
if (strncmp (toc->bitmap2_name, TOC_BITMAP2,