summaryrefslogtreecommitdiff
path: root/block
diff options
context:
space:
mode:
Diffstat (limited to 'block')
-rw-r--r--block/blk-zoned.c38
1 files changed, 36 insertions, 2 deletions
diff --git a/block/blk-zoned.c b/block/blk-zoned.c
index 8b9f3fc5a690..c0276b42d9fb 100644
--- a/block/blk-zoned.c
+++ b/block/blk-zoned.c
@@ -318,6 +318,22 @@ int blkdev_report_zones_ioctl(struct block_device *bdev, fmode_t mode,
return 0;
}
+static int blkdev_truncate_zone_range(struct block_device *bdev, fmode_t mode,
+ const struct blk_zone_range *zrange)
+{
+ loff_t start, end;
+
+ if (zrange->sector + zrange->nr_sectors <= zrange->sector ||
+ zrange->sector + zrange->nr_sectors > get_capacity(bdev->bd_disk))
+ /* Out of range */
+ return -EINVAL;
+
+ start = zrange->sector << SECTOR_SHIFT;
+ end = ((zrange->sector + zrange->nr_sectors) << SECTOR_SHIFT) - 1;
+
+ return truncate_bdev_range(bdev, mode, start, end);
+}
+
/*
* BLKRESETZONE, BLKOPENZONE, BLKCLOSEZONE and BLKFINISHZONE ioctl processing.
* Called from blkdev_ioctl.
@@ -329,6 +345,7 @@ int blkdev_zone_mgmt_ioctl(struct block_device *bdev, fmode_t mode,
struct request_queue *q;
struct blk_zone_range zrange;
enum req_opf op;
+ int ret;
if (!argp)
return -EINVAL;
@@ -352,6 +369,11 @@ int blkdev_zone_mgmt_ioctl(struct block_device *bdev, fmode_t mode,
switch (cmd) {
case BLKRESETZONE:
op = REQ_OP_ZONE_RESET;
+
+ /* Invalidate the page cache, including dirty pages. */
+ ret = blkdev_truncate_zone_range(bdev, mode, &zrange);
+ if (ret)
+ return ret;
break;
case BLKOPENZONE:
op = REQ_OP_ZONE_OPEN;
@@ -366,8 +388,20 @@ int blkdev_zone_mgmt_ioctl(struct block_device *bdev, fmode_t mode,
return -ENOTTY;
}
- return blkdev_zone_mgmt(bdev, op, zrange.sector, zrange.nr_sectors,
- GFP_KERNEL);
+ ret = blkdev_zone_mgmt(bdev, op, zrange.sector, zrange.nr_sectors,
+ GFP_KERNEL);
+
+ /*
+ * Invalidate the page cache again for zone reset: writes can only be
+ * direct for zoned devices so concurrent writes would not add any page
+ * to the page cache after/during reset. The page cache may be filled
+ * again due to concurrent reads though and dropping the pages for
+ * these is fine.
+ */
+ if (!ret && cmd == BLKRESETZONE)
+ ret = blkdev_truncate_zone_range(bdev, mode, &zrange);
+
+ return ret;
}
static inline unsigned long *blk_alloc_zone_bitmap(int node,