summaryrefslogtreecommitdiff
path: root/drivers/dax/super.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/dax/super.c')
-rw-r--r--drivers/dax/super.c272
1 files changed, 67 insertions, 205 deletions
diff --git a/drivers/dax/super.c b/drivers/dax/super.c
index b882cf8106ea..e3029389d809 100644
--- a/drivers/dax/super.c
+++ b/drivers/dax/super.c
@@ -7,10 +7,8 @@
#include <linux/mount.h>
#include <linux/pseudo_fs.h>
#include <linux/magic.h>
-#include <linux/genhd.h>
#include <linux/pfn_t.h>
#include <linux/cdev.h>
-#include <linux/hash.h>
#include <linux/slab.h>
#include <linux/uio.h>
#include <linux/dax.h>
@@ -21,15 +19,12 @@
* struct dax_device - anchor object for dax services
* @inode: core vfs
* @cdev: optional character interface for "device dax"
- * @host: optional name for lookups where the device path is not available
* @private: dax driver private data
* @flags: state and boolean properties
*/
struct dax_device {
- struct hlist_node list;
struct inode inode;
struct cdev cdev;
- const char *host;
void *private;
unsigned long flags;
const struct dax_operations *ops;
@@ -42,10 +37,6 @@ static DEFINE_IDA(dax_minor_ida);
static struct kmem_cache *dax_cache __read_mostly;
static struct super_block *dax_superblock __read_mostly;
-#define DAX_HASH_SIZE (PAGE_SIZE / sizeof(struct hlist_head))
-static struct hlist_head dax_host_list[DAX_HASH_SIZE];
-static DEFINE_SPINLOCK(dax_host_lock);
-
int dax_read_lock(void)
{
return srcu_read_lock(&dax_srcu);
@@ -58,169 +49,54 @@ void dax_read_unlock(int id)
}
EXPORT_SYMBOL_GPL(dax_read_unlock);
-static int dax_host_hash(const char *host)
-{
- return hashlen_hash(hashlen_string("DAX", host)) % DAX_HASH_SIZE;
-}
-
-#ifdef CONFIG_BLOCK
+#if defined(CONFIG_BLOCK) && defined(CONFIG_FS_DAX)
#include <linux/blkdev.h>
-int bdev_dax_pgoff(struct block_device *bdev, sector_t sector, size_t size,
- pgoff_t *pgoff)
+static DEFINE_XARRAY(dax_hosts);
+
+int dax_add_host(struct dax_device *dax_dev, struct gendisk *disk)
{
- sector_t start_sect = bdev ? get_start_sect(bdev) : 0;
- phys_addr_t phys_off = (start_sect + sector) * 512;
+ return xa_insert(&dax_hosts, (unsigned long)disk, dax_dev, GFP_KERNEL);
+}
+EXPORT_SYMBOL_GPL(dax_add_host);
- if (pgoff)
- *pgoff = PHYS_PFN(phys_off);
- if (phys_off % PAGE_SIZE || size % PAGE_SIZE)
- return -EINVAL;
- return 0;
+void dax_remove_host(struct gendisk *disk)
+{
+ xa_erase(&dax_hosts, (unsigned long)disk);
}
-EXPORT_SYMBOL(bdev_dax_pgoff);
+EXPORT_SYMBOL_GPL(dax_remove_host);
-#if IS_ENABLED(CONFIG_FS_DAX)
/**
- * dax_get_by_host() - temporary lookup mechanism for filesystem-dax
- * @host: alternate name for the device registered by a dax driver
+ * fs_dax_get_by_bdev() - temporary lookup mechanism for filesystem-dax
+ * @bdev: block device to find a dax_device for
+ * @start_off: returns the byte offset into the dax_device that @bdev starts
*/
-static struct dax_device *dax_get_by_host(const char *host)
+struct dax_device *fs_dax_get_by_bdev(struct block_device *bdev, u64 *start_off)
{
- struct dax_device *dax_dev, *found = NULL;
- int hash, id;
-
- if (!host)
- return NULL;
-
- hash = dax_host_hash(host);
-
- id = dax_read_lock();
- spin_lock(&dax_host_lock);
- hlist_for_each_entry(dax_dev, &dax_host_list[hash], list) {
- if (!dax_alive(dax_dev)
- || strcmp(host, dax_dev->host) != 0)
- continue;
-
- if (igrab(&dax_dev->inode))
- found = dax_dev;
- break;
- }
- spin_unlock(&dax_host_lock);
- dax_read_unlock(id);
-
- return found;
-}
+ struct dax_device *dax_dev;
+ u64 part_size;
+ int id;
-struct dax_device *fs_dax_get_by_bdev(struct block_device *bdev)
-{
if (!blk_queue_dax(bdev->bd_disk->queue))
return NULL;
- return dax_get_by_host(bdev->bd_disk->disk_name);
-}
-EXPORT_SYMBOL_GPL(fs_dax_get_by_bdev);
-
-bool generic_fsdax_supported(struct dax_device *dax_dev,
- struct block_device *bdev, int blocksize, sector_t start,
- sector_t sectors)
-{
- bool dax_enabled = false;
- pgoff_t pgoff, pgoff_end;
- void *kaddr, *end_kaddr;
- pfn_t pfn, end_pfn;
- sector_t last_page;
- long len, len2;
- int err, id;
-
- if (blocksize != PAGE_SIZE) {
- pr_info("%pg: error: unsupported blocksize for dax\n", bdev);
- return false;
- }
-
- if (!dax_dev) {
- pr_debug("%pg: error: dax unsupported by block device\n", bdev);
- return false;
- }
- err = bdev_dax_pgoff(bdev, start, PAGE_SIZE, &pgoff);
- if (err) {
+ *start_off = get_start_sect(bdev) * SECTOR_SIZE;
+ part_size = bdev_nr_sectors(bdev) * SECTOR_SIZE;
+ if (*start_off % PAGE_SIZE || part_size % PAGE_SIZE) {
pr_info("%pg: error: unaligned partition for dax\n", bdev);
- return false;
- }
-
- last_page = PFN_DOWN((start + sectors - 1) * 512) * PAGE_SIZE / 512;
- err = bdev_dax_pgoff(bdev, last_page, PAGE_SIZE, &pgoff_end);
- if (err) {
- pr_info("%pg: error: unaligned partition for dax\n", bdev);
- return false;
+ return NULL;
}
id = dax_read_lock();
- len = dax_direct_access(dax_dev, pgoff, 1, &kaddr, &pfn);
- len2 = dax_direct_access(dax_dev, pgoff_end, 1, &end_kaddr, &end_pfn);
-
- if (len < 1 || len2 < 1) {
- pr_info("%pg: error: dax access failed (%ld)\n",
- bdev, len < 1 ? len : len2);
- dax_read_unlock(id);
- return false;
- }
-
- if (IS_ENABLED(CONFIG_FS_DAX_LIMITED) && pfn_t_special(pfn)) {
- /*
- * An arch that has enabled the pmem api should also
- * have its drivers support pfn_t_devmap()
- *
- * This is a developer warning and should not trigger in
- * production. dax_flush() will crash since it depends
- * on being able to do (page_address(pfn_to_page())).
- */
- WARN_ON(IS_ENABLED(CONFIG_ARCH_HAS_PMEM_API));
- dax_enabled = true;
- } else if (pfn_t_devmap(pfn) && pfn_t_devmap(end_pfn)) {
- struct dev_pagemap *pgmap, *end_pgmap;
-
- pgmap = get_dev_pagemap(pfn_t_to_pfn(pfn), NULL);
- end_pgmap = get_dev_pagemap(pfn_t_to_pfn(end_pfn), NULL);
- if (pgmap && pgmap == end_pgmap && pgmap->type == MEMORY_DEVICE_FS_DAX
- && pfn_t_to_page(pfn)->pgmap == pgmap
- && pfn_t_to_page(end_pfn)->pgmap == pgmap
- && pfn_t_to_pfn(pfn) == PHYS_PFN(__pa(kaddr))
- && pfn_t_to_pfn(end_pfn) == PHYS_PFN(__pa(end_kaddr)))
- dax_enabled = true;
- put_dev_pagemap(pgmap);
- put_dev_pagemap(end_pgmap);
-
- }
+ dax_dev = xa_load(&dax_hosts, (unsigned long)bdev->bd_disk);
+ if (!dax_dev || !dax_alive(dax_dev) || !igrab(&dax_dev->inode))
+ dax_dev = NULL;
dax_read_unlock(id);
- if (!dax_enabled) {
- pr_info("%pg: error: dax support not enabled\n", bdev);
- return false;
- }
- return true;
-}
-EXPORT_SYMBOL_GPL(generic_fsdax_supported);
-
-bool dax_supported(struct dax_device *dax_dev, struct block_device *bdev,
- int blocksize, sector_t start, sector_t len)
-{
- bool ret = false;
- int id;
-
- if (!dax_dev)
- return false;
-
- id = dax_read_lock();
- if (dax_alive(dax_dev) && dax_dev->ops->dax_supported)
- ret = dax_dev->ops->dax_supported(dax_dev, bdev, blocksize,
- start, len);
- dax_read_unlock(id);
- return ret;
+ return dax_dev;
}
-EXPORT_SYMBOL_GPL(dax_supported);
-#endif /* CONFIG_FS_DAX */
-#endif /* CONFIG_BLOCK */
+EXPORT_SYMBOL_GPL(fs_dax_get_by_bdev);
+#endif /* CONFIG_BLOCK && CONFIG_FS_DAX */
enum dax_device_flags {
/* !alive + rcu grace period == no new operations / mappings */
@@ -229,6 +105,10 @@ enum dax_device_flags {
DAXDEV_WRITE_CACHE,
/* flag to check if device supports synchronous flush */
DAXDEV_SYNC,
+ /* do not leave the caches dirty after writes */
+ DAXDEV_NOCACHE,
+ /* handle CPU fetch exceptions during reads */
+ DAXDEV_NOMC,
};
/**
@@ -270,9 +150,15 @@ size_t dax_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr,
if (!dax_alive(dax_dev))
return 0;
- return dax_dev->ops->copy_from_iter(dax_dev, pgoff, addr, bytes, i);
+ /*
+ * The userspace address for the memory copy has already been validated
+ * via access_ok() in vfs_write, so use the 'no check' version to bypass
+ * the HARDENED_USERCOPY overhead.
+ */
+ if (test_bit(DAXDEV_NOCACHE, &dax_dev->flags))
+ return _copy_from_iter_flushcache(addr, bytes, i);
+ return _copy_from_iter(addr, bytes, i);
}
-EXPORT_SYMBOL_GPL(dax_copy_from_iter);
size_t dax_copy_to_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr,
size_t bytes, struct iov_iter *i)
@@ -280,9 +166,15 @@ size_t dax_copy_to_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr,
if (!dax_alive(dax_dev))
return 0;
- return dax_dev->ops->copy_to_iter(dax_dev, pgoff, addr, bytes, i);
+ /*
+ * The userspace address for the memory copy has already been validated
+ * via access_ok() in vfs_red, so use the 'no check' version to bypass
+ * the HARDENED_USERCOPY overhead.
+ */
+ if (test_bit(DAXDEV_NOMC, &dax_dev->flags))
+ return _copy_mc_to_iter(addr, bytes, i);
+ return _copy_to_iter(addr, bytes, i);
}
-EXPORT_SYMBOL_GPL(dax_copy_to_iter);
int dax_zero_page_range(struct dax_device *dax_dev, pgoff_t pgoff,
size_t nr_pages)
@@ -332,17 +224,29 @@ bool dax_write_cache_enabled(struct dax_device *dax_dev)
}
EXPORT_SYMBOL_GPL(dax_write_cache_enabled);
-bool __dax_synchronous(struct dax_device *dax_dev)
+bool dax_synchronous(struct dax_device *dax_dev)
{
return test_bit(DAXDEV_SYNC, &dax_dev->flags);
}
-EXPORT_SYMBOL_GPL(__dax_synchronous);
+EXPORT_SYMBOL_GPL(dax_synchronous);
-void __set_dax_synchronous(struct dax_device *dax_dev)
+void set_dax_synchronous(struct dax_device *dax_dev)
{
set_bit(DAXDEV_SYNC, &dax_dev->flags);
}
-EXPORT_SYMBOL_GPL(__set_dax_synchronous);
+EXPORT_SYMBOL_GPL(set_dax_synchronous);
+
+void set_dax_nocache(struct dax_device *dax_dev)
+{
+ set_bit(DAXDEV_NOCACHE, &dax_dev->flags);
+}
+EXPORT_SYMBOL_GPL(set_dax_nocache);
+
+void set_dax_nomc(struct dax_device *dax_dev)
+{
+ set_bit(DAXDEV_NOMC, &dax_dev->flags);
+}
+EXPORT_SYMBOL_GPL(set_dax_nomc);
bool dax_alive(struct dax_device *dax_dev)
{
@@ -363,12 +267,7 @@ void kill_dax(struct dax_device *dax_dev)
return;
clear_bit(DAXDEV_ALIVE, &dax_dev->flags);
-
synchronize_srcu(&dax_srcu);
-
- spin_lock(&dax_host_lock);
- hlist_del_init(&dax_dev->list);
- spin_unlock(&dax_host_lock);
}
EXPORT_SYMBOL_GPL(kill_dax);
@@ -400,8 +299,6 @@ static struct dax_device *to_dax_dev(struct inode *inode)
static void dax_free_inode(struct inode *inode)
{
struct dax_device *dax_dev = to_dax_dev(inode);
- kfree(dax_dev->host);
- dax_dev->host = NULL;
if (inode->i_rdev)
ida_simple_remove(&dax_minor_ida, iminor(inode));
kmem_cache_free(dax_cache, dax_dev);
@@ -476,65 +373,30 @@ static struct dax_device *dax_dev_get(dev_t devt)
return dax_dev;
}
-static void dax_add_host(struct dax_device *dax_dev, const char *host)
-{
- int hash;
-
- /*
- * Unconditionally init dax_dev since it's coming from a
- * non-zeroed slab cache
- */
- INIT_HLIST_NODE(&dax_dev->list);
- dax_dev->host = host;
- if (!host)
- return;
-
- hash = dax_host_hash(host);
- spin_lock(&dax_host_lock);
- hlist_add_head(&dax_dev->list, &dax_host_list[hash]);
- spin_unlock(&dax_host_lock);
-}
-
-struct dax_device *alloc_dax(void *private, const char *__host,
- const struct dax_operations *ops, unsigned long flags)
+struct dax_device *alloc_dax(void *private, const struct dax_operations *ops)
{
struct dax_device *dax_dev;
- const char *host;
dev_t devt;
int minor;
- if (ops && !ops->zero_page_range) {
- pr_debug("%s: error: device does not provide dax"
- " operation zero_page_range()\n",
- __host ? __host : "Unknown");
+ if (WARN_ON_ONCE(ops && !ops->zero_page_range))
return ERR_PTR(-EINVAL);
- }
-
- host = kstrdup(__host, GFP_KERNEL);
- if (__host && !host)
- return ERR_PTR(-ENOMEM);
minor = ida_simple_get(&dax_minor_ida, 0, MINORMASK+1, GFP_KERNEL);
if (minor < 0)
- goto err_minor;
+ return ERR_PTR(-ENOMEM);
devt = MKDEV(MAJOR(dax_devt), minor);
dax_dev = dax_dev_get(devt);
if (!dax_dev)
goto err_dev;
- dax_add_host(dax_dev, host);
dax_dev->ops = ops;
dax_dev->private = private;
- if (flags & DAXDEV_F_SYNC)
- set_dax_synchronous(dax_dev);
-
return dax_dev;
err_dev:
ida_simple_remove(&dax_minor_ida, minor);
- err_minor:
- kfree(host);
return ERR_PTR(-ENOMEM);
}
EXPORT_SYMBOL_GPL(alloc_dax);