summaryrefslogtreecommitdiff
path: root/drivers/base
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/base')
-rw-r--r--drivers/base/Makefile5
-rw-r--r--drivers/base/arch_topology.c12
-rw-r--r--drivers/base/core.c25
-rw-r--r--drivers/base/cpu.c4
-rw-r--r--drivers/base/dd.c3
-rw-r--r--drivers/base/devcon.c136
-rw-r--r--drivers/base/devtmpfs.c11
-rw-r--r--drivers/base/firmware_loader/Makefile7
-rw-r--r--drivers/base/firmware_loader/fallback.c675
-rw-r--r--drivers/base/firmware_loader/fallback.h67
-rw-r--r--drivers/base/firmware_loader/fallback_table.c55
-rw-r--r--drivers/base/firmware_loader/firmware.h115
-rw-r--r--drivers/base/firmware_loader/main.c (renamed from drivers/base/firmware_class.c)833
-rw-r--r--drivers/base/memory.c47
-rw-r--r--drivers/base/node.c28
-rw-r--r--drivers/base/platform.c4
-rw-r--r--drivers/base/power/power.h1
-rw-r--r--drivers/base/power/wakeirq.c13
-rw-r--r--drivers/base/regmap/regmap-debugfs.c20
-rw-r--r--drivers/base/regmap/regmap-i2c.c4
-rw-r--r--drivers/base/regmap/regmap-mmio.c24
-rw-r--r--drivers/base/regmap/regmap.c306
-rw-r--r--drivers/base/soc.c2
23 files changed, 1385 insertions, 1012 deletions
diff --git a/drivers/base/Makefile b/drivers/base/Makefile
index e32a52490051..b074f242a435 100644
--- a/drivers/base/Makefile
+++ b/drivers/base/Makefile
@@ -5,14 +5,15 @@ obj-y := component.o core.o bus.o dd.o syscore.o \
driver.o class.o platform.o \
cpu.o firmware.o init.o map.o devres.o \
attribute_container.o transport_class.o \
- topology.o container.o property.o cacheinfo.o
+ topology.o container.o property.o cacheinfo.o \
+ devcon.o
obj-$(CONFIG_DEVTMPFS) += devtmpfs.o
obj-$(CONFIG_DMA_CMA) += dma-contiguous.o
obj-y += power/
obj-$(CONFIG_HAS_DMA) += dma-mapping.o
obj-$(CONFIG_HAVE_GENERIC_DMA_COHERENT) += dma-coherent.o
obj-$(CONFIG_ISA_BUS_API) += isa.o
-obj-$(CONFIG_FW_LOADER) += firmware_class.o
+obj-y += firmware_loader/
obj-$(CONFIG_NUMA) += node.o
obj-$(CONFIG_MEMORY_HOTPLUG_SPARSE) += memory.o
ifeq ($(CONFIG_SYSFS),y)
diff --git a/drivers/base/arch_topology.c b/drivers/base/arch_topology.c
index 52ec5174bcb1..e7cb0c6ade81 100644
--- a/drivers/base/arch_topology.c
+++ b/drivers/base/arch_topology.c
@@ -169,11 +169,11 @@ bool __init topology_parse_cpu_capacity(struct device_node *cpu_node, int cpu)
}
#ifdef CONFIG_CPU_FREQ
-static cpumask_var_t cpus_to_visit __initdata;
-static void __init parsing_done_workfn(struct work_struct *work);
-static __initdata DECLARE_WORK(parsing_done_work, parsing_done_workfn);
+static cpumask_var_t cpus_to_visit;
+static void parsing_done_workfn(struct work_struct *work);
+static DECLARE_WORK(parsing_done_work, parsing_done_workfn);
-static int __init
+static int
init_cpu_capacity_callback(struct notifier_block *nb,
unsigned long val,
void *data)
@@ -209,7 +209,7 @@ init_cpu_capacity_callback(struct notifier_block *nb,
return 0;
}
-static struct notifier_block init_cpu_capacity_notifier __initdata = {
+static struct notifier_block init_cpu_capacity_notifier = {
.notifier_call = init_cpu_capacity_callback,
};
@@ -242,7 +242,7 @@ static int __init register_cpufreq_notifier(void)
}
core_initcall(register_cpufreq_notifier);
-static void __init parsing_done_workfn(struct work_struct *work)
+static void parsing_done_workfn(struct work_struct *work)
{
cpufreq_unregister_notifier(&init_cpu_capacity_notifier,
CPUFREQ_POLICY_NOTIFIER);
diff --git a/drivers/base/core.c b/drivers/base/core.c
index 5847364f25d9..b610816eb887 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -196,8 +196,10 @@ struct device_link *device_link_add(struct device *consumer,
}
list_for_each_entry(link, &supplier->links.consumers, s_node)
- if (link->consumer == consumer)
+ if (link->consumer == consumer) {
+ kref_get(&link->kref);
goto out;
+ }
link = kzalloc(sizeof(*link), GFP_KERNEL);
if (!link)
@@ -222,6 +224,7 @@ struct device_link *device_link_add(struct device *consumer,
link->consumer = consumer;
INIT_LIST_HEAD(&link->c_node);
link->flags = flags;
+ kref_init(&link->kref);
/* Determine the initial link state. */
if (flags & DL_FLAG_STATELESS) {
@@ -292,8 +295,10 @@ static void __device_link_free_srcu(struct rcu_head *rhead)
device_link_free(container_of(rhead, struct device_link, rcu_head));
}
-static void __device_link_del(struct device_link *link)
+static void __device_link_del(struct kref *kref)
{
+ struct device_link *link = container_of(kref, struct device_link, kref);
+
dev_info(link->consumer, "Dropping the link to %s\n",
dev_name(link->supplier));
@@ -305,8 +310,10 @@ static void __device_link_del(struct device_link *link)
call_srcu(&device_links_srcu, &link->rcu_head, __device_link_free_srcu);
}
#else /* !CONFIG_SRCU */
-static void __device_link_del(struct device_link *link)
+static void __device_link_del(struct kref *kref)
{
+ struct device_link *link = container_of(kref, struct device_link, kref);
+
dev_info(link->consumer, "Dropping the link to %s\n",
dev_name(link->supplier));
@@ -324,13 +331,15 @@ static void __device_link_del(struct device_link *link)
* @link: Device link to delete.
*
* The caller must ensure proper synchronization of this function with runtime
- * PM.
+ * PM. If the link was added multiple times, it needs to be deleted as often.
+ * Care is required for hotplugged devices: Their links are purged on removal
+ * and calling device_link_del() is then no longer allowed.
*/
void device_link_del(struct device_link *link)
{
device_links_write_lock();
device_pm_lock();
- __device_link_del(link);
+ kref_put(&link->kref, __device_link_del);
device_pm_unlock();
device_links_write_unlock();
}
@@ -444,7 +453,7 @@ static void __device_links_no_driver(struct device *dev)
continue;
if (link->flags & DL_FLAG_AUTOREMOVE)
- __device_link_del(link);
+ kref_put(&link->kref, __device_link_del);
else if (link->status != DL_STATE_SUPPLIER_UNBIND)
WRITE_ONCE(link->status, DL_STATE_AVAILABLE);
}
@@ -597,13 +606,13 @@ static void device_links_purge(struct device *dev)
list_for_each_entry_safe_reverse(link, ln, &dev->links.suppliers, c_node) {
WARN_ON(link->status == DL_STATE_ACTIVE);
- __device_link_del(link);
+ __device_link_del(&link->kref);
}
list_for_each_entry_safe_reverse(link, ln, &dev->links.consumers, s_node) {
WARN_ON(link->status != DL_STATE_DORMANT &&
link->status != DL_STATE_NONE);
- __device_link_del(link);
+ __device_link_del(&link->kref);
}
device_links_write_unlock();
diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c
index d21a2d913107..2da998baa75c 100644
--- a/drivers/base/cpu.c
+++ b/drivers/base/cpu.c
@@ -382,8 +382,10 @@ int register_cpu(struct cpu *cpu, int num)
if (cpu->hotpluggable)
cpu->dev.groups = hotplugable_cpu_attr_groups;
error = device_register(&cpu->dev);
- if (error)
+ if (error) {
+ put_device(&cpu->dev);
return error;
+ }
per_cpu(cpu_sys_devices, num) = &cpu->dev;
register_cpu_under_node(num, cpu_to_node(num));
diff --git a/drivers/base/dd.c b/drivers/base/dd.c
index de6fd092bf2f..c9f54089429b 100644
--- a/drivers/base/dd.c
+++ b/drivers/base/dd.c
@@ -292,8 +292,7 @@ static ssize_t coredump_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
device_lock(dev);
- if (dev->driver->coredump)
- dev->driver->coredump(dev);
+ dev->driver->coredump(dev);
device_unlock(dev);
return count;
diff --git a/drivers/base/devcon.c b/drivers/base/devcon.c
new file mode 100644
index 000000000000..d427e806cd73
--- /dev/null
+++ b/drivers/base/devcon.c
@@ -0,0 +1,136 @@
+// SPDX-License-Identifier: GPL-2.0
+/**
+ * Device connections
+ *
+ * Copyright (C) 2018 Intel Corporation
+ * Author: Heikki Krogerus <heikki.krogerus@linux.intel.com>
+ */
+
+#include <linux/device.h>
+
+static DEFINE_MUTEX(devcon_lock);
+static LIST_HEAD(devcon_list);
+
+/**
+ * device_connection_find_match - Find physical connection to a device
+ * @dev: Device with the connection
+ * @con_id: Identifier for the connection
+ * @data: Data for the match function
+ * @match: Function to check and convert the connection description
+ *
+ * Find a connection with unique identifier @con_id between @dev and another
+ * device. @match will be used to convert the connection description to data the
+ * caller is expecting to be returned.
+ */
+void *device_connection_find_match(struct device *dev, const char *con_id,
+ void *data,
+ void *(*match)(struct device_connection *con,
+ int ep, void *data))
+{
+ const char *devname = dev_name(dev);
+ struct device_connection *con;
+ void *ret = NULL;
+ int ep;
+
+ if (!match)
+ return NULL;
+
+ mutex_lock(&devcon_lock);
+
+ list_for_each_entry(con, &devcon_list, list) {
+ ep = match_string(con->endpoint, 2, devname);
+ if (ep < 0)
+ continue;
+
+ if (con_id && strcmp(con->id, con_id))
+ continue;
+
+ ret = match(con, !ep, data);
+ if (ret)
+ break;
+ }
+
+ mutex_unlock(&devcon_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(device_connection_find_match);
+
+extern struct bus_type platform_bus_type;
+extern struct bus_type pci_bus_type;
+extern struct bus_type i2c_bus_type;
+extern struct bus_type spi_bus_type;
+
+static struct bus_type *generic_match_buses[] = {
+ &platform_bus_type,
+#ifdef CONFIG_PCI
+ &pci_bus_type,
+#endif
+#ifdef CONFIG_I2C
+ &i2c_bus_type,
+#endif
+#ifdef CONFIG_SPI_MASTER
+ &spi_bus_type,
+#endif
+ NULL,
+};
+
+/* This tries to find the device from the most common bus types by name. */
+static void *generic_match(struct device_connection *con, int ep, void *data)
+{
+ struct bus_type *bus;
+ struct device *dev;
+
+ for (bus = generic_match_buses[0]; bus; bus++) {
+ dev = bus_find_device_by_name(bus, NULL, con->endpoint[ep]);
+ if (dev)
+ return dev;
+ }
+
+ /*
+ * We only get called if a connection was found, tell the caller to
+ * wait for the other device to show up.
+ */
+ return ERR_PTR(-EPROBE_DEFER);
+}
+
+/**
+ * device_connection_find - Find two devices connected together
+ * @dev: Device with the connection
+ * @con_id: Identifier for the connection
+ *
+ * Find a connection with unique identifier @con_id between @dev and
+ * another device. On success returns handle to the device that is connected
+ * to @dev, with the reference count for the found device incremented. Returns
+ * NULL if no matching connection was found, or ERR_PTR(-EPROBE_DEFER) when a
+ * connection was found but the other device has not been enumerated yet.
+ */
+struct device *device_connection_find(struct device *dev, const char *con_id)
+{
+ return device_connection_find_match(dev, con_id, NULL, generic_match);
+}
+EXPORT_SYMBOL_GPL(device_connection_find);
+
+/**
+ * device_connection_add - Register a connection description
+ * @con: The connection description to be registered
+ */
+void device_connection_add(struct device_connection *con)
+{
+ mutex_lock(&devcon_lock);
+ list_add_tail(&con->list, &devcon_list);
+ mutex_unlock(&devcon_lock);
+}
+EXPORT_SYMBOL_GPL(device_connection_add);
+
+/**
+ * device_connections_remove - Unregister connection description
+ * @con: The connection description to be unregistered
+ */
+void device_connection_remove(struct device_connection *con)
+{
+ mutex_lock(&devcon_lock);
+ list_del(&con->list);
+ mutex_unlock(&devcon_lock);
+}
+EXPORT_SYMBOL_GPL(device_connection_remove);
diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c
index 50025d7959cb..f7768077e817 100644
--- a/drivers/base/devtmpfs.c
+++ b/drivers/base/devtmpfs.c
@@ -356,7 +356,8 @@ int devtmpfs_mount(const char *mntdir)
if (!thread)
return 0;
- err = sys_mount("devtmpfs", (char *)mntdir, "devtmpfs", MS_SILENT, NULL);
+ err = ksys_mount("devtmpfs", (char *)mntdir, "devtmpfs", MS_SILENT,
+ NULL);
if (err)
printk(KERN_INFO "devtmpfs: error mounting %i\n", err);
else
@@ -379,14 +380,14 @@ static int devtmpfsd(void *p)
{
char options[] = "mode=0755";
int *err = p;
- *err = sys_unshare(CLONE_NEWNS);
+ *err = ksys_unshare(CLONE_NEWNS);
if (*err)
goto out;
- *err = sys_mount("devtmpfs", "/", "devtmpfs", MS_SILENT, options);
+ *err = ksys_mount("devtmpfs", "/", "devtmpfs", MS_SILENT, options);
if (*err)
goto out;
- sys_chdir("/.."); /* will traverse into overmounted root */
- sys_chroot(".");
+ ksys_chdir("/.."); /* will traverse into overmounted root */
+ ksys_chroot(".");
complete(&setup_done);
while (1) {
spin_lock(&req_lock);
diff --git a/drivers/base/firmware_loader/Makefile b/drivers/base/firmware_loader/Makefile
new file mode 100644
index 000000000000..a97eeb0be1d8
--- /dev/null
+++ b/drivers/base/firmware_loader/Makefile
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: GPL-2.0
+# Makefile for the Linux firmware loader
+
+obj-y := fallback_table.o
+obj-$(CONFIG_FW_LOADER) += firmware_class.o
+firmware_class-objs := main.o
+firmware_class-$(CONFIG_FW_LOADER_USER_HELPER) += fallback.o
diff --git a/drivers/base/firmware_loader/fallback.c b/drivers/base/firmware_loader/fallback.c
new file mode 100644
index 000000000000..31b5015b59fe
--- /dev/null
+++ b/drivers/base/firmware_loader/fallback.c
@@ -0,0 +1,675 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/types.h>
+#include <linux/kconfig.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/security.h>
+#include <linux/highmem.h>
+#include <linux/umh.h>
+#include <linux/sysctl.h>
+#include <linux/vmalloc.h>
+
+#include "fallback.h"
+#include "firmware.h"
+
+/*
+ * firmware fallback mechanism
+ */
+
+extern struct firmware_fallback_config fw_fallback_config;
+
+/* These getters are vetted to use int properly */
+static inline int __firmware_loading_timeout(void)
+{
+ return fw_fallback_config.loading_timeout;
+}
+
+/* These setters are vetted to use int properly */
+static void __fw_fallback_set_timeout(int timeout)
+{
+ fw_fallback_config.loading_timeout = timeout;
+}
+
+/*
+ * use small loading timeout for caching devices' firmware because all these
+ * firmware images have been loaded successfully at lease once, also system is
+ * ready for completing firmware loading now. The maximum size of firmware in
+ * current distributions is about 2M bytes, so 10 secs should be enough.
+ */
+void fw_fallback_set_cache_timeout(void)
+{
+ fw_fallback_config.old_timeout = __firmware_loading_timeout();
+ __fw_fallback_set_timeout(10);
+}
+
+/* Restores the timeout to the value last configured during normal operation */
+void fw_fallback_set_default_timeout(void)
+{
+ __fw_fallback_set_timeout(fw_fallback_config.old_timeout);
+}
+
+static long firmware_loading_timeout(void)
+{
+ return __firmware_loading_timeout() > 0 ?
+ __firmware_loading_timeout() * HZ : MAX_JIFFY_OFFSET;
+}
+
+static inline bool fw_sysfs_done(struct fw_priv *fw_priv)
+{
+ return __fw_state_check(fw_priv, FW_STATUS_DONE);
+}
+
+static inline bool fw_sysfs_loading(struct fw_priv *fw_priv)
+{
+ return __fw_state_check(fw_priv, FW_STATUS_LOADING);
+}
+
+static inline int fw_sysfs_wait_timeout(struct fw_priv *fw_priv, long timeout)
+{
+ return __fw_state_wait_common(fw_priv, timeout);
+}
+
+struct fw_sysfs {
+ bool nowait;
+ struct device dev;
+ struct fw_priv *fw_priv;
+ struct firmware *fw;
+};
+
+static struct fw_sysfs *to_fw_sysfs(struct device *dev)
+{
+ return container_of(dev, struct fw_sysfs, dev);
+}
+
+static void __fw_load_abort(struct fw_priv *fw_priv)
+{
+ /*
+ * There is a small window in which user can write to 'loading'
+ * between loading done and disappearance of 'loading'
+ */
+ if (fw_sysfs_done(fw_priv))
+ return;
+
+ list_del_init(&fw_priv->pending_list);
+ fw_state_aborted(fw_priv);
+}
+
+static void fw_load_abort(struct fw_sysfs *fw_sysfs)
+{
+ struct fw_priv *fw_priv = fw_sysfs->fw_priv;
+
+ __fw_load_abort(fw_priv);
+}
+
+static LIST_HEAD(pending_fw_head);
+
+void kill_pending_fw_fallback_reqs(bool only_kill_custom)
+{
+ struct fw_priv *fw_priv;
+ struct fw_priv *next;
+
+ mutex_lock(&fw_lock);
+ list_for_each_entry_safe(fw_priv, next, &pending_fw_head,
+ pending_list) {
+ if (!fw_priv->need_uevent || !only_kill_custom)
+ __fw_load_abort(fw_priv);
+ }
+ mutex_unlock(&fw_lock);
+}
+
+static ssize_t timeout_show(struct class *class, struct class_attribute *attr,
+ char *buf)
+{
+ return sprintf(buf, "%d\n", __firmware_loading_timeout());
+}
+
+/**
+ * firmware_timeout_store - set number of seconds to wait for firmware
+ * @class: device class pointer
+ * @attr: device attribute pointer
+ * @buf: buffer to scan for timeout value
+ * @count: number of bytes in @buf
+ *
+ * Sets the number of seconds to wait for the firmware. Once
+ * this expires an error will be returned to the driver and no
+ * firmware will be provided.
+ *
+ * Note: zero means 'wait forever'.
+ **/
+static ssize_t timeout_store(struct class *class, struct class_attribute *attr,
+ const char *buf, size_t count)
+{
+ int tmp_loading_timeout = simple_strtol(buf, NULL, 10);
+
+ if (tmp_loading_timeout < 0)
+ tmp_loading_timeout = 0;
+
+ __fw_fallback_set_timeout(tmp_loading_timeout);
+
+ return count;
+}
+static CLASS_ATTR_RW(timeout);
+
+static struct attribute *firmware_class_attrs[] = {
+ &class_attr_timeout.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(firmware_class);
+
+static void fw_dev_release(struct device *dev)
+{
+ struct fw_sysfs *fw_sysfs = to_fw_sysfs(dev);
+
+ kfree(fw_sysfs);
+}
+
+static int do_firmware_uevent(struct fw_sysfs *fw_sysfs, struct kobj_uevent_env *env)
+{
+ if (add_uevent_var(env, "FIRMWARE=%s", fw_sysfs->fw_priv->fw_name))
+ return -ENOMEM;
+ if (add_uevent_var(env, "TIMEOUT=%i", __firmware_loading_timeout()))
+ return -ENOMEM;
+ if (add_uevent_var(env, "ASYNC=%d", fw_sysfs->nowait))
+ return -ENOMEM;
+
+ return 0;
+}
+
+static int firmware_uevent(struct device *dev, struct kobj_uevent_env *env)
+{
+ struct fw_sysfs *fw_sysfs = to_fw_sysfs(dev);
+ int err = 0;
+
+ mutex_lock(&fw_lock);
+ if (fw_sysfs->fw_priv)
+ err = do_firmware_uevent(fw_sysfs, env);
+ mutex_unlock(&fw_lock);
+ return err;
+}
+
+static struct class firmware_class = {
+ .name = "firmware",
+ .class_groups = firmware_class_groups,
+ .dev_uevent = firmware_uevent,
+ .dev_release = fw_dev_release,
+};
+
+int register_sysfs_loader(void)
+{
+ return class_register(&firmware_class);
+}
+
+void unregister_sysfs_loader(void)
+{
+ class_unregister(&firmware_class);
+}
+
+static ssize_t firmware_loading_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct fw_sysfs *fw_sysfs = to_fw_sysfs(dev);
+ int loading = 0;
+
+ mutex_lock(&fw_lock);
+ if (fw_sysfs->fw_priv)
+ loading = fw_sysfs_loading(fw_sysfs->fw_priv);
+ mutex_unlock(&fw_lock);
+
+ return sprintf(buf, "%d\n", loading);
+}
+
+/* Some architectures don't have PAGE_KERNEL_RO */
+#ifndef PAGE_KERNEL_RO
+#define PAGE_KERNEL_RO PAGE_KERNEL
+#endif
+
+/* one pages buffer should be mapped/unmapped only once */
+static int map_fw_priv_pages(struct fw_priv *fw_priv)
+{
+ if (!fw_priv->is_paged_buf)
+ return 0;
+
+ vunmap(fw_priv->data);
+ fw_priv->data = vmap(fw_priv->pages, fw_priv->nr_pages, 0,
+ PAGE_KERNEL_RO);
+ if (!fw_priv->data)
+ return -ENOMEM;
+ return 0;
+}
+
+/**
+ * firmware_loading_store - set value in the 'loading' control file
+ * @dev: device pointer
+ * @attr: device attribute pointer
+ * @buf: buffer to scan for loading control value
+ * @count: number of bytes in @buf
+ *
+ * The relevant values are:
+ *
+ * 1: Start a load, discarding any previous partial load.
+ * 0: Conclude the load and hand the data to the driver code.
+ * -1: Conclude the load with an error and discard any written data.
+ **/
+static ssize_t firmware_loading_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct fw_sysfs *fw_sysfs = to_fw_sysfs(dev);
+ struct fw_priv *fw_priv;
+ ssize_t written = count;
+ int loading = simple_strtol(buf, NULL, 10);
+ int i;
+
+ mutex_lock(&fw_lock);
+ fw_priv = fw_sysfs->fw_priv;
+ if (fw_state_is_aborted(fw_priv))
+ goto out;
+
+ switch (loading) {
+ case 1:
+ /* discarding any previous partial load */
+ if (!fw_sysfs_done(fw_priv)) {
+ for (i = 0; i < fw_priv->nr_pages; i++)
+ __free_page(fw_priv->pages[i]);
+ vfree(fw_priv->pages);
+ fw_priv->pages = NULL;
+ fw_priv->page_array_size = 0;
+ fw_priv->nr_pages = 0;
+ fw_state_start(fw_priv);
+ }
+ break;
+ case 0:
+ if (fw_sysfs_loading(fw_priv)) {
+ int rc;
+
+ /*
+ * Several loading requests may be pending on
+ * one same firmware buf, so let all requests
+ * see the mapped 'buf->data' once the loading
+ * is completed.
+ * */
+ rc = map_fw_priv_pages(fw_priv);
+ if (rc)
+ dev_err(dev, "%s: map pages failed\n",
+ __func__);
+ else
+ rc = security_kernel_post_read_file(NULL,
+ fw_priv->data, fw_priv->size,
+ READING_FIRMWARE);
+
+ /*
+ * Same logic as fw_load_abort, only the DONE bit
+ * is ignored and we set ABORT only on failure.
+ */
+ list_del_init(&fw_priv->pending_list);
+ if (rc) {
+ fw_state_aborted(fw_priv);
+ written = rc;
+ } else {
+ fw_state_done(fw_priv);
+ }
+ break;
+ }
+ /* fallthrough */
+ default:
+ dev_err(dev, "%s: unexpected value (%d)\n", __func__, loading);
+ /* fallthrough */
+ case -1:
+ fw_load_abort(fw_sysfs);
+ break;
+ }
+out:
+ mutex_unlock(&fw_lock);
+ return written;
+}
+
+static DEVICE_ATTR(loading, 0644, firmware_loading_show, firmware_loading_store);
+
+static void firmware_rw_data(struct fw_priv *fw_priv, char *buffer,
+ loff_t offset, size_t count, bool read)
+{
+ if (read)
+ memcpy(buffer, fw_priv->data + offset, count);
+ else
+ memcpy(fw_priv->data + offset, buffer, count);
+}
+
+static void firmware_rw(struct fw_priv *fw_priv, char *buffer,
+ loff_t offset, size_t count, bool read)
+{
+ while (count) {
+ void *page_data;
+ int page_nr = offset >> PAGE_SHIFT;
+ int page_ofs = offset & (PAGE_SIZE-1);
+ int page_cnt = min_t(size_t, PAGE_SIZE - page_ofs, count);
+
+ page_data = kmap(fw_priv->pages[page_nr]);
+
+ if (read)
+ memcpy(buffer, page_data + page_ofs, page_cnt);
+ else
+ memcpy(page_data + page_ofs, buffer, page_cnt);
+
+ kunmap(fw_priv->pages[page_nr]);
+ buffer += page_cnt;
+ offset += page_cnt;
+ count -= page_cnt;
+ }
+}
+
+static ssize_t firmware_data_read(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buffer, loff_t offset, size_t count)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct fw_sysfs *fw_sysfs = to_fw_sysfs(dev);
+ struct fw_priv *fw_priv;
+ ssize_t ret_count;
+
+ mutex_lock(&fw_lock);
+ fw_priv = fw_sysfs->fw_priv;
+ if (!fw_priv || fw_sysfs_done(fw_priv)) {
+ ret_count = -ENODEV;
+ goto out;
+ }
+ if (offset > fw_priv->size) {
+ ret_count = 0;
+ goto out;
+ }
+ if (count > fw_priv->size - offset)
+ count = fw_priv->size - offset;
+
+ ret_count = count;
+
+ if (fw_priv->data)
+ firmware_rw_data(fw_priv, buffer, offset, count, true);
+ else
+ firmware_rw(fw_priv, buffer, offset, count, true);
+
+out:
+ mutex_unlock(&fw_lock);
+ return ret_count;
+}
+
+static int fw_realloc_pages(struct fw_sysfs *fw_sysfs, int min_size)
+{
+ struct fw_priv *fw_priv= fw_sysfs->fw_priv;
+ int pages_needed = PAGE_ALIGN(min_size) >> PAGE_SHIFT;
+
+ /* If the array of pages is too small, grow it... */
+ if (fw_priv->page_array_size < pages_needed) {
+ int new_array_size = max(pages_needed,
+ fw_priv->page_array_size * 2);
+ struct page **new_pages;
+
+ new_pages = vmalloc(new_array_size * sizeof(void *));
+ if (!new_pages) {
+ fw_load_abort(fw_sysfs);
+ return -ENOMEM;
+ }
+ memcpy(new_pages, fw_priv->pages,
+ fw_priv->page_array_size * sizeof(void *));
+ memset(&new_pages[fw_priv->page_array_size], 0, sizeof(void *) *
+ (new_array_size - fw_priv->page_array_size));
+ vfree(fw_priv->pages);
+ fw_priv->pages = new_pages;
+ fw_priv->page_array_size = new_array_size;
+ }
+
+ while (fw_priv->nr_pages < pages_needed) {
+ fw_priv->pages[fw_priv->nr_pages] =
+ alloc_page(GFP_KERNEL | __GFP_HIGHMEM);
+
+ if (!fw_priv->pages[fw_priv->nr_pages]) {
+ fw_load_abort(fw_sysfs);
+ return -ENOMEM;
+ }
+ fw_priv->nr_pages++;
+ }
+ return 0;
+}
+
+/**
+ * firmware_data_write - write method for firmware
+ * @filp: open sysfs file
+ * @kobj: kobject for the device
+ * @bin_attr: bin_attr structure
+ * @buffer: buffer being written
+ * @offset: buffer offset for write in total data store area
+ * @count: buffer size
+ *
+ * Data written to the 'data' attribute will be later handed to
+ * the driver as a firmware image.
+ **/
+static ssize_t firmware_data_write(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buffer, loff_t offset, size_t count)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct fw_sysfs *fw_sysfs = to_fw_sysfs(dev);
+ struct fw_priv *fw_priv;
+ ssize_t retval;
+
+ if (!capable(CAP_SYS_RAWIO))
+ return -EPERM;
+
+ mutex_lock(&fw_lock);
+ fw_priv = fw_sysfs->fw_priv;
+ if (!fw_priv || fw_sysfs_done(fw_priv)) {
+ retval = -ENODEV;
+ goto out;
+ }
+
+ if (fw_priv->data) {
+ if (offset + count > fw_priv->allocated_size) {
+ retval = -ENOMEM;
+ goto out;
+ }
+ firmware_rw_data(fw_priv, buffer, offset, count, false);
+ retval = count;
+ } else {
+ retval = fw_realloc_pages(fw_sysfs, offset + count);
+ if (retval)
+ goto out;
+
+ retval = count;
+ firmware_rw(fw_priv, buffer, offset, count, false);
+ }
+
+ fw_priv->size = max_t(size_t, offset + count, fw_priv->size);
+out:
+ mutex_unlock(&fw_lock);
+ return retval;
+}
+
+static struct bin_attribute firmware_attr_data = {
+ .attr = { .name = "data", .mode = 0644 },
+ .size = 0,
+ .read = firmware_data_read,
+ .write = firmware_data_write,
+};
+
+static struct attribute *fw_dev_attrs[] = {
+ &dev_attr_loading.attr,
+ NULL
+};
+
+static struct bin_attribute *fw_dev_bin_attrs[] = {
+ &firmware_attr_data,
+ NULL
+};
+
+static const struct attribute_group fw_dev_attr_group = {
+ .attrs = fw_dev_attrs,
+ .bin_attrs = fw_dev_bin_attrs,
+};
+
+static const struct attribute_group *fw_dev_attr_groups[] = {
+ &fw_dev_attr_group,
+ NULL
+};
+
+static struct fw_sysfs *
+fw_create_instance(struct firmware *firmware, const char *fw_name,
+ struct device *device, unsigned int opt_flags)
+{
+ struct fw_sysfs *fw_sysfs;
+ struct device *f_dev;
+
+ fw_sysfs = kzalloc(sizeof(*fw_sysfs), GFP_KERNEL);
+ if (!fw_sysfs) {
+ fw_sysfs = ERR_PTR(-ENOMEM);
+ goto exit;
+ }
+
+ fw_sysfs->nowait = !!(opt_flags & FW_OPT_NOWAIT);
+ fw_sysfs->fw = firmware;
+ f_dev = &fw_sysfs->dev;
+
+ device_initialize(f_dev);
+ dev_set_name(f_dev, "%s", fw_name);
+ f_dev->parent = device;
+ f_dev->class = &firmware_class;
+ f_dev->groups = fw_dev_attr_groups;
+exit:
+ return fw_sysfs;
+}
+
+/**
+ * fw_load_sysfs_fallback - load a firmware via the syfs fallback mechanism
+ * @fw_sysfs: firmware syfs information for the firmware to load
+ * @opt_flags: flags of options, FW_OPT_*
+ * @timeout: timeout to wait for the load
+ *
+ * In charge of constructing a sysfs fallback interface for firmware loading.
+ **/
+static int fw_load_sysfs_fallback(struct fw_sysfs *fw_sysfs,
+ unsigned int opt_flags, long timeout)
+{
+ int retval = 0;
+ struct device *f_dev = &fw_sysfs->dev;
+ struct fw_priv *fw_priv = fw_sysfs->fw_priv;
+
+ /* fall back on userspace loading */
+ if (!fw_priv->data)
+ fw_priv->is_paged_buf = true;
+
+ dev_set_uevent_suppress(f_dev, true);
+
+ retval = device_add(f_dev);
+ if (retval) {
+ dev_err(f_dev, "%s: device_register failed\n", __func__);
+ goto err_put_dev;
+ }
+
+ mutex_lock(&fw_lock);
+ list_add(&fw_priv->pending_list, &pending_fw_head);
+ mutex_unlock(&fw_lock);
+
+ if (opt_flags & FW_OPT_UEVENT) {
+ fw_priv->need_uevent = true;
+ dev_set_uevent_suppress(f_dev, false);
+ dev_dbg(f_dev, "firmware: requesting %s\n", fw_priv->fw_name);
+ kobject_uevent(&fw_sysfs->dev.kobj, KOBJ_ADD);
+ } else {
+ timeout = MAX_JIFFY_OFFSET;
+ }
+
+ retval = fw_sysfs_wait_timeout(fw_priv, timeout);
+ if (retval < 0) {
+ mutex_lock(&fw_lock);
+ fw_load_abort(fw_sysfs);
+ mutex_unlock(&fw_lock);
+ }
+
+ if (fw_state_is_aborted(fw_priv)) {
+ if (retval == -ERESTARTSYS)
+ retval = -EINTR;
+ else
+ retval = -EAGAIN;
+ } else if (fw_priv->is_paged_buf && !fw_priv->data)
+ retval = -ENOMEM;
+
+ device_del(f_dev);
+err_put_dev:
+ put_device(f_dev);
+ return retval;
+}
+
+static int fw_load_from_user_helper(struct firmware *firmware,
+ const char *name, struct device *device,
+ unsigned int opt_flags)
+{
+ struct fw_sysfs *fw_sysfs;
+ long timeout;
+ int ret;
+
+ timeout = firmware_loading_timeout();
+ if (opt_flags & FW_OPT_NOWAIT) {
+ timeout = usermodehelper_read_lock_wait(timeout);
+ if (!timeout) {
+ dev_dbg(device, "firmware: %s loading timed out\n",
+ name);
+ return -EBUSY;
+ }
+ } else {
+ ret = usermodehelper_read_trylock();
+ if (WARN_ON(ret)) {
+ dev_err(device, "firmware: %s will not be loaded\n",
+ name);
+ return ret;
+ }
+ }
+
+ fw_sysfs = fw_create_instance(firmware, name, device, opt_flags);
+ if (IS_ERR(fw_sysfs)) {
+ ret = PTR_ERR(fw_sysfs);
+ goto out_unlock;
+ }
+
+ fw_sysfs->fw_priv = firmware->priv;
+ ret = fw_load_sysfs_fallback(fw_sysfs, opt_flags, timeout);
+
+ if (!ret)
+ ret = assign_fw(firmware, device, opt_flags);
+
+out_unlock:
+ usermodehelper_read_unlock();
+
+ return ret;
+}
+
+static bool fw_force_sysfs_fallback(unsigned int opt_flags)
+{
+ if (fw_fallback_config.force_sysfs_fallback)
+ return true;
+ if (!(opt_flags & FW_OPT_USERHELPER))
+ return false;
+ return true;
+}
+
+static bool fw_run_sysfs_fallback(unsigned int opt_flags)
+{
+ if (fw_fallback_config.ignore_sysfs_fallback) {
+ pr_info_once("Ignoring firmware sysfs fallback due to sysctl knob\n");
+ return false;
+ }
+
+ if ((opt_flags & FW_OPT_NOFALLBACK))
+ return false;
+
+ return fw_force_sysfs_fallback(opt_flags);
+}
+
+int fw_sysfs_fallback(struct firmware *fw, const char *name,
+ struct device *device,
+ unsigned int opt_flags,
+ int ret)
+{
+ if (!fw_run_sysfs_fallback(opt_flags))
+ return ret;
+
+ dev_warn(device, "Falling back to user helper\n");
+ return fw_load_from_user_helper(fw, name, device, opt_flags);
+}
diff --git a/drivers/base/firmware_loader/fallback.h b/drivers/base/firmware_loader/fallback.h
new file mode 100644
index 000000000000..dfebc644ed35
--- /dev/null
+++ b/drivers/base/firmware_loader/fallback.h
@@ -0,0 +1,67 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __FIRMWARE_FALLBACK_H
+#define __FIRMWARE_FALLBACK_H
+
+#include <linux/firmware.h>
+#include <linux/device.h>
+
+/**
+ * struct firmware_fallback_config - firmware fallback configuratioon settings
+ *
+ * Helps describe and fine tune the fallback mechanism.
+ *
+ * @force_sysfs_fallback: force the sysfs fallback mechanism to be used
+ * as if one had enabled CONFIG_FW_LOADER_USER_HELPER_FALLBACK=y.
+ * Useful to help debug a CONFIG_FW_LOADER_USER_HELPER_FALLBACK=y
+ * functionality on a kernel where that config entry has been disabled.
+ * @ignore_sysfs_fallback: force to disable the sysfs fallback mechanism.
+ * This emulates the behaviour as if we had set the kernel
+ * config CONFIG_FW_LOADER_USER_HELPER=n.
+ * @old_timeout: for internal use
+ * @loading_timeout: the timeout to wait for the fallback mechanism before
+ * giving up, in seconds.
+ */
+struct firmware_fallback_config {
+ unsigned int force_sysfs_fallback;
+ unsigned int ignore_sysfs_fallback;
+ int old_timeout;
+ int loading_timeout;
+};
+
+#ifdef CONFIG_FW_LOADER_USER_HELPER
+int fw_sysfs_fallback(struct firmware *fw, const char *name,
+ struct device *device,
+ unsigned int opt_flags,
+ int ret);
+void kill_pending_fw_fallback_reqs(bool only_kill_custom);
+
+void fw_fallback_set_cache_timeout(void);
+void fw_fallback_set_default_timeout(void);
+
+int register_sysfs_loader(void);
+void unregister_sysfs_loader(void);
+#else /* CONFIG_FW_LOADER_USER_HELPER */
+static inline int fw_sysfs_fallback(struct firmware *fw, const char *name,
+ struct device *device,
+ unsigned int opt_flags,
+ int ret)
+{
+ /* Keep carrying over the same error */
+ return ret;
+}
+
+static inline void kill_pending_fw_fallback_reqs(bool only_kill_custom) { }
+static inline void fw_fallback_set_cache_timeout(void) { }
+static inline void fw_fallback_set_default_timeout(void) { }
+
+static inline int register_sysfs_loader(void)
+{
+ return 0;
+}
+
+static inline void unregister_sysfs_loader(void)
+{
+}
+#endif /* CONFIG_FW_LOADER_USER_HELPER */
+
+#endif /* __FIRMWARE_FALLBACK_H */
diff --git a/drivers/base/firmware_loader/fallback_table.c b/drivers/base/firmware_loader/fallback_table.c
new file mode 100644
index 000000000000..7428659d8df9
--- /dev/null
+++ b/drivers/base/firmware_loader/fallback_table.c
@@ -0,0 +1,55 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/types.h>
+#include <linux/kconfig.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/security.h>
+#include <linux/highmem.h>
+#include <linux/umh.h>
+#include <linux/sysctl.h>
+
+#include "fallback.h"
+#include "firmware.h"
+
+/*
+ * firmware fallback configuration table
+ */
+
+/* Module or buit-in */
+#ifdef CONFIG_FW_LOADER_USER_HELPER
+
+static unsigned int zero;
+static unsigned int one = 1;
+
+struct firmware_fallback_config fw_fallback_config = {
+ .force_sysfs_fallback = IS_ENABLED(CONFIG_FW_LOADER_USER_HELPER_FALLBACK),
+ .loading_timeout = 60,
+ .old_timeout = 60,
+};
+EXPORT_SYMBOL_GPL(fw_fallback_config);
+
+struct ctl_table firmware_config_table[] = {
+ {
+ .procname = "force_sysfs_fallback",
+ .data = &fw_fallback_config.force_sysfs_fallback,
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = proc_douintvec_minmax,
+ .extra1 = &zero,
+ .extra2 = &one,
+ },
+ {
+ .procname = "ignore_sysfs_fallback",
+ .data = &fw_fallback_config.ignore_sysfs_fallback,
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = proc_douintvec_minmax,
+ .extra1 = &zero,
+ .extra2 = &one,
+ },
+ { }
+};
+EXPORT_SYMBOL_GPL(firmware_config_table);
+
+#endif
diff --git a/drivers/base/firmware_loader/firmware.h b/drivers/base/firmware_loader/firmware.h
new file mode 100644
index 000000000000..64acbb1a392c
--- /dev/null
+++ b/drivers/base/firmware_loader/firmware.h
@@ -0,0 +1,115 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __FIRMWARE_LOADER_H
+#define __FIRMWARE_LOADER_H
+
+#include <linux/firmware.h>
+#include <linux/types.h>
+#include <linux/kref.h>
+#include <linux/list.h>
+#include <linux/completion.h>
+
+#include <generated/utsrelease.h>
+
+/* firmware behavior options */
+#define FW_OPT_UEVENT (1U << 0)
+#define FW_OPT_NOWAIT (1U << 1)
+#define FW_OPT_USERHELPER (1U << 2)
+#define FW_OPT_NO_WARN (1U << 3)
+#define FW_OPT_NOCACHE (1U << 4)
+#define FW_OPT_NOFALLBACK (1U << 5)
+
+enum fw_status {
+ FW_STATUS_UNKNOWN,
+ FW_STATUS_LOADING,
+ FW_STATUS_DONE,
+ FW_STATUS_ABORTED,
+};
+
+/*
+ * Concurrent request_firmware() for the same firmware need to be
+ * serialized. struct fw_state is simple state machine which hold the
+ * state of the firmware loading.
+ */
+struct fw_state {
+ struct completion completion;
+ enum fw_status status;
+};
+
+struct fw_priv {
+ struct kref ref;
+ struct list_head list;
+ struct firmware_cache *fwc;
+ struct fw_state fw_st;
+ void *data;
+ size_t size;
+ size_t allocated_size;
+#ifdef CONFIG_FW_LOADER_USER_HELPER
+ bool is_paged_buf;
+ bool need_uevent;
+ struct page **pages;
+ int nr_pages;
+ int page_array_size;
+ struct list_head pending_list;
+#endif
+ const char *fw_name;
+};
+
+extern struct mutex fw_lock;
+
+static inline bool __fw_state_check(struct fw_priv *fw_priv,
+ enum fw_status status)
+{
+ struct fw_state *fw_st = &fw_priv->fw_st;
+
+ return fw_st->status == status;
+}
+
+static inline int __fw_state_wait_common(struct fw_priv *fw_priv, long timeout)
+{
+ struct fw_state *fw_st = &fw_priv->fw_st;
+ long ret;
+
+ ret = wait_for_completion_killable_timeout(&fw_st->completion, timeout);
+ if (ret != 0 && fw_st->status == FW_STATUS_ABORTED)
+ return -ENOENT;
+ if (!ret)
+ return -ETIMEDOUT;
+
+ return ret < 0 ? ret : 0;
+}
+
+static inline void __fw_state_set(struct fw_priv *fw_priv,
+ enum fw_status status)
+{
+ struct fw_state *fw_st = &fw_priv->fw_st;
+
+ WRITE_ONCE(fw_st->status, status);
+
+ if (status == FW_STATUS_DONE || status == FW_STATUS_ABORTED)
+ complete_all(&fw_st->completion);
+}
+
+static inline void fw_state_aborted(struct fw_priv *fw_priv)
+{
+ __fw_state_set(fw_priv, FW_STATUS_ABORTED);
+}
+
+static inline bool fw_state_is_aborted(struct fw_priv *fw_priv)
+{
+ return __fw_state_check(fw_priv, FW_STATUS_ABORTED);
+}
+
+static inline void fw_state_start(struct fw_priv *fw_priv)
+{
+ __fw_state_set(fw_priv, FW_STATUS_LOADING);
+}
+
+static inline void fw_state_done(struct fw_priv *fw_priv)
+{
+ __fw_state_set(fw_priv, FW_STATUS_DONE);
+}
+
+int assign_fw(struct firmware *fw, struct device *device,
+ unsigned int opt_flags);
+
+#endif /* __FIRMWARE_LOADER_H */
diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_loader/main.c
index 7dd36ace6152..eb34089e4299 100644
--- a/drivers/base/firmware_class.c
+++ b/drivers/base/firmware_loader/main.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * firmware_class.c - Multi purpose firmware loading support
+ * main.c - Multi purpose firmware loading support
*
* Copyright (c) 2003 Manuel Estrada Sainz
*
@@ -36,37 +36,14 @@
#include <generated/utsrelease.h>
-#include "base.h"
+#include "../base.h"
+#include "firmware.h"
+#include "fallback.h"
MODULE_AUTHOR("Manuel Estrada Sainz");
MODULE_DESCRIPTION("Multi purpose firmware loading support");
MODULE_LICENSE("GPL");
-enum fw_status {
- FW_STATUS_UNKNOWN,
- FW_STATUS_LOADING,
- FW_STATUS_DONE,
- FW_STATUS_ABORTED,
-};
-
-/*
- * Concurrent request_firmware() for the same firmware need to be
- * serialized. struct fw_state is simple state machine which hold the
- * state of the firmware loading.
- */
-struct fw_state {
- struct completion completion;
- enum fw_status status;
-};
-
-/* firmware behavior options */
-#define FW_OPT_UEVENT (1U << 0)
-#define FW_OPT_NOWAIT (1U << 1)
-#define FW_OPT_USERHELPER (1U << 2)
-#define FW_OPT_NO_WARN (1U << 3)
-#define FW_OPT_NOCACHE (1U << 4)
-#define FW_OPT_NOFALLBACK (1U << 5)
-
struct firmware_cache {
/* firmware_buf instance will be added into the below list */
spinlock_t lock;
@@ -89,25 +66,6 @@ struct firmware_cache {
#endif
};
-struct fw_priv {
- struct kref ref;
- struct list_head list;
- struct firmware_cache *fwc;
- struct fw_state fw_st;
- void *data;
- size_t size;
- size_t allocated_size;
-#ifdef CONFIG_FW_LOADER_USER_HELPER
- bool is_paged_buf;
- bool need_uevent;
- struct page **pages;
- int nr_pages;
- int page_array_size;
- struct list_head pending_list;
-#endif
- const char *fw_name;
-};
-
struct fw_cache_entry {
struct list_head list;
const char *name;
@@ -128,7 +86,7 @@ static inline struct fw_priv *to_fw_priv(struct kref *ref)
/* fw_lock could be moved to 'struct fw_sysfs' but since it is just
* guarding for corner cases a global lock should be OK */
-static DEFINE_MUTEX(fw_lock);
+DEFINE_MUTEX(fw_lock);
static struct firmware_cache fw_cache;
@@ -191,13 +149,6 @@ static inline bool fw_is_builtin_firmware(const struct firmware *fw)
}
#endif
-static int loading_timeout = 60; /* In seconds */
-
-static inline long firmware_loading_timeout(void)
-{
- return loading_timeout > 0 ? loading_timeout * HZ : MAX_JIFFY_OFFSET;
-}
-
static void fw_state_init(struct fw_priv *fw_priv)
{
struct fw_state *fw_st = &fw_priv->fw_st;
@@ -206,83 +157,11 @@ static void fw_state_init(struct fw_priv *fw_priv)
fw_st->status = FW_STATUS_UNKNOWN;
}
-static int __fw_state_wait_common(struct fw_priv *fw_priv, long timeout)
-{
- struct fw_state *fw_st = &fw_priv->fw_st;
- long ret;
-
- ret = wait_for_completion_killable_timeout(&fw_st->completion, timeout);
- if (ret != 0 && fw_st->status == FW_STATUS_ABORTED)
- return -ENOENT;
- if (!ret)
- return -ETIMEDOUT;
-
- return ret < 0 ? ret : 0;
-}
-
-static void __fw_state_set(struct fw_priv *fw_priv,
- enum fw_status status)
-{
- struct fw_state *fw_st = &fw_priv->fw_st;
-
- WRITE_ONCE(fw_st->status, status);
-
- if (status == FW_STATUS_DONE || status == FW_STATUS_ABORTED)
- complete_all(&fw_st->completion);
-}
-
-static inline void fw_state_start(struct fw_priv *fw_priv)
-{
- __fw_state_set(fw_priv, FW_STATUS_LOADING);
-}
-
-static inline void fw_state_done(struct fw_priv *fw_priv)
-{
- __fw_state_set(fw_priv, FW_STATUS_DONE);
-}
-
-static inline void fw_state_aborted(struct fw_priv *fw_priv)
-{
- __fw_state_set(fw_priv, FW_STATUS_ABORTED);
-}
-
static inline int fw_state_wait(struct fw_priv *fw_priv)
{
return __fw_state_wait_common(fw_priv, MAX_SCHEDULE_TIMEOUT);
}
-static bool __fw_state_check(struct fw_priv *fw_priv,
- enum fw_status status)
-{
- struct fw_state *fw_st = &fw_priv->fw_st;
-
- return fw_st->status == status;
-}
-
-static inline bool fw_state_is_aborted(struct fw_priv *fw_priv)
-{
- return __fw_state_check(fw_priv, FW_STATUS_ABORTED);
-}
-
-#ifdef CONFIG_FW_LOADER_USER_HELPER
-
-static inline bool fw_sysfs_done(struct fw_priv *fw_priv)
-{
- return __fw_state_check(fw_priv, FW_STATUS_DONE);
-}
-
-static inline bool fw_sysfs_loading(struct fw_priv *fw_priv)
-{
- return __fw_state_check(fw_priv, FW_STATUS_LOADING);
-}
-
-static inline int fw_sysfs_wait_timeout(struct fw_priv *fw_priv, long timeout)
-{
- return __fw_state_wait_common(fw_priv, timeout);
-}
-
-#endif /* CONFIG_FW_LOADER_USER_HELPER */
-
static int fw_cache_piggyback_on_request(const char *name);
static struct fw_priv *__allocate_fw_priv(const char *fw_name,
@@ -517,14 +396,24 @@ static struct fw_name_devm *fw_find_devm_name(struct device *dev,
return fwn;
}
-/* add firmware name into devres list */
-static int fw_add_devm_name(struct device *dev, const char *name)
+static bool fw_cache_is_setup(struct device *dev, const char *name)
{
struct fw_name_devm *fwn;
fwn = fw_find_devm_name(dev, name);
if (fwn)
- return 1;
+ return true;
+
+ return false;
+}
+
+/* add firmware name into devres list */
+static int fw_add_devm_name(struct device *dev, const char *name)
+{
+ struct fw_name_devm *fwn;
+
+ if (fw_cache_is_setup(dev, name))
+ return 0;
fwn = devres_alloc(fw_name_devm_release, sizeof(struct fw_name_devm),
GFP_KERNEL);
@@ -542,16 +431,22 @@ static int fw_add_devm_name(struct device *dev, const char *name)
return 0;
}
#else
+static bool fw_cache_is_setup(struct device *dev, const char *name)
+{
+ return false;
+}
+
static int fw_add_devm_name(struct device *dev, const char *name)
{
return 0;
}
#endif
-static int assign_fw(struct firmware *fw, struct device *device,
- unsigned int opt_flags)
+int assign_fw(struct firmware *fw, struct device *device,
+ unsigned int opt_flags)
{
struct fw_priv *fw_priv = fw->priv;
+ int ret;
mutex_lock(&fw_lock);
if (!fw_priv->size || fw_state_is_aborted(fw_priv)) {
@@ -568,8 +463,13 @@ static int assign_fw(struct firmware *fw, struct device *device,
*/
/* don't cache firmware handled without uevent */
if (device && (opt_flags & FW_OPT_UEVENT) &&
- !(opt_flags & FW_OPT_NOCACHE))
- fw_add_devm_name(device, fw_priv->fw_name);
+ !(opt_flags & FW_OPT_NOCACHE)) {
+ ret = fw_add_devm_name(device, fw_priv->fw_name);
+ if (ret) {
+ mutex_unlock(&fw_lock);
+ return ret;
+ }
+ }
/*
* After caching firmware image is started, let it piggyback
@@ -587,626 +487,6 @@ static int assign_fw(struct firmware *fw, struct device *device,
return 0;
}
-/*
- * user-mode helper code
- */
-#ifdef CONFIG_FW_LOADER_USER_HELPER
-struct fw_sysfs {
- bool nowait;
- struct device dev;
- struct fw_priv *fw_priv;
- struct firmware *fw;
-};
-
-static struct fw_sysfs *to_fw_sysfs(struct device *dev)
-{
- return container_of(dev, struct fw_sysfs, dev);
-}
-
-static void __fw_load_abort(struct fw_priv *fw_priv)
-{
- /*
- * There is a small window in which user can write to 'loading'
- * between loading done and disappearance of 'loading'
- */
- if (fw_sysfs_done(fw_priv))
- return;
-
- list_del_init(&fw_priv->pending_list);
- fw_state_aborted(fw_priv);
-}
-
-static void fw_load_abort(struct fw_sysfs *fw_sysfs)
-{
- struct fw_priv *fw_priv = fw_sysfs->fw_priv;
-
- __fw_load_abort(fw_priv);
-}
-
-static LIST_HEAD(pending_fw_head);
-
-static void kill_pending_fw_fallback_reqs(bool only_kill_custom)
-{
- struct fw_priv *fw_priv;
- struct fw_priv *next;
-
- mutex_lock(&fw_lock);
- list_for_each_entry_safe(fw_priv, next, &pending_fw_head,
- pending_list) {
- if (!fw_priv->need_uevent || !only_kill_custom)
- __fw_load_abort(fw_priv);
- }
- mutex_unlock(&fw_lock);
-}
-
-static ssize_t timeout_show(struct class *class, struct class_attribute *attr,
- char *buf)
-{
- return sprintf(buf, "%d\n", loading_timeout);
-}
-
-/**
- * firmware_timeout_store - set number of seconds to wait for firmware
- * @class: device class pointer
- * @attr: device attribute pointer
- * @buf: buffer to scan for timeout value
- * @count: number of bytes in @buf
- *
- * Sets the number of seconds to wait for the firmware. Once
- * this expires an error will be returned to the driver and no
- * firmware will be provided.
- *
- * Note: zero means 'wait forever'.
- **/
-static ssize_t timeout_store(struct class *class, struct class_attribute *attr,
- const char *buf, size_t count)
-{
- loading_timeout = simple_strtol(buf, NULL, 10);
- if (loading_timeout < 0)
- loading_timeout = 0;
-
- return count;
-}
-static CLASS_ATTR_RW(timeout);
-
-static struct attribute *firmware_class_attrs[] = {
- &class_attr_timeout.attr,
- NULL,
-};
-ATTRIBUTE_GROUPS(firmware_class);
-
-static void fw_dev_release(struct device *dev)
-{
- struct fw_sysfs *fw_sysfs = to_fw_sysfs(dev);
-
- kfree(fw_sysfs);
-}
-
-static int do_firmware_uevent(struct fw_sysfs *fw_sysfs, struct kobj_uevent_env *env)
-{
- if (add_uevent_var(env, "FIRMWARE=%s", fw_sysfs->fw_priv->fw_name))
- return -ENOMEM;
- if (add_uevent_var(env, "TIMEOUT=%i", loading_timeout))
- return -ENOMEM;
- if (add_uevent_var(env, "ASYNC=%d", fw_sysfs->nowait))
- return -ENOMEM;
-
- return 0;
-}
-
-static int firmware_uevent(struct device *dev, struct kobj_uevent_env *env)
-{
- struct fw_sysfs *fw_sysfs = to_fw_sysfs(dev);
- int err = 0;
-
- mutex_lock(&fw_lock);
- if (fw_sysfs->fw_priv)
- err = do_firmware_uevent(fw_sysfs, env);
- mutex_unlock(&fw_lock);
- return err;
-}
-
-static struct class firmware_class = {
- .name = "firmware",
- .class_groups = firmware_class_groups,
- .dev_uevent = firmware_uevent,
- .dev_release = fw_dev_release,
-};
-
-static inline int register_sysfs_loader(void)
-{
- return class_register(&firmware_class);
-}
-
-static inline void unregister_sysfs_loader(void)
-{
- class_unregister(&firmware_class);
-}
-
-static ssize_t firmware_loading_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct fw_sysfs *fw_sysfs = to_fw_sysfs(dev);
- int loading = 0;
-
- mutex_lock(&fw_lock);
- if (fw_sysfs->fw_priv)
- loading = fw_sysfs_loading(fw_sysfs->fw_priv);
- mutex_unlock(&fw_lock);
-
- return sprintf(buf, "%d\n", loading);
-}
-
-/* Some architectures don't have PAGE_KERNEL_RO */
-#ifndef PAGE_KERNEL_RO
-#define PAGE_KERNEL_RO PAGE_KERNEL
-#endif
-
-/* one pages buffer should be mapped/unmapped only once */
-static int map_fw_priv_pages(struct fw_priv *fw_priv)
-{
- if (!fw_priv->is_paged_buf)
- return 0;
-
- vunmap(fw_priv->data);
- fw_priv->data = vmap(fw_priv->pages, fw_priv->nr_pages, 0,
- PAGE_KERNEL_RO);
- if (!fw_priv->data)
- return -ENOMEM;
- return 0;
-}
-
-/**
- * firmware_loading_store - set value in the 'loading' control file
- * @dev: device pointer
- * @attr: device attribute pointer
- * @buf: buffer to scan for loading control value
- * @count: number of bytes in @buf
- *
- * The relevant values are:
- *
- * 1: Start a load, discarding any previous partial load.
- * 0: Conclude the load and hand the data to the driver code.
- * -1: Conclude the load with an error and discard any written data.
- **/
-static ssize_t firmware_loading_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct fw_sysfs *fw_sysfs = to_fw_sysfs(dev);
- struct fw_priv *fw_priv;
- ssize_t written = count;
- int loading = simple_strtol(buf, NULL, 10);
- int i;
-
- mutex_lock(&fw_lock);
- fw_priv = fw_sysfs->fw_priv;
- if (fw_state_is_aborted(fw_priv))
- goto out;
-
- switch (loading) {
- case 1:
- /* discarding any previous partial load */
- if (!fw_sysfs_done(fw_priv)) {
- for (i = 0; i < fw_priv->nr_pages; i++)
- __free_page(fw_priv->pages[i]);
- vfree(fw_priv->pages);
- fw_priv->pages = NULL;
- fw_priv->page_array_size = 0;
- fw_priv->nr_pages = 0;
- fw_state_start(fw_priv);
- }
- break;
- case 0:
- if (fw_sysfs_loading(fw_priv)) {
- int rc;
-
- /*
- * Several loading requests may be pending on
- * one same firmware buf, so let all requests
- * see the mapped 'buf->data' once the loading
- * is completed.
- * */
- rc = map_fw_priv_pages(fw_priv);
- if (rc)
- dev_err(dev, "%s: map pages failed\n",
- __func__);
- else
- rc = security_kernel_post_read_file(NULL,
- fw_priv->data, fw_priv->size,
- READING_FIRMWARE);
-
- /*
- * Same logic as fw_load_abort, only the DONE bit
- * is ignored and we set ABORT only on failure.
- */
- list_del_init(&fw_priv->pending_list);
- if (rc) {
- fw_state_aborted(fw_priv);
- written = rc;
- } else {
- fw_state_done(fw_priv);
- }
- break;
- }
- /* fallthrough */
- default:
- dev_err(dev, "%s: unexpected value (%d)\n", __func__, loading);
- /* fallthrough */
- case -1:
- fw_load_abort(fw_sysfs);
- break;
- }
-out:
- mutex_unlock(&fw_lock);
- return written;
-}
-
-static DEVICE_ATTR(loading, 0644, firmware_loading_show, firmware_loading_store);
-
-static void firmware_rw_data(struct fw_priv *fw_priv, char *buffer,
- loff_t offset, size_t count, bool read)
-{
- if (read)
- memcpy(buffer, fw_priv->data + offset, count);
- else
- memcpy(fw_priv->data + offset, buffer, count);
-}
-
-static void firmware_rw(struct fw_priv *fw_priv, char *buffer,
- loff_t offset, size_t count, bool read)
-{
- while (count) {
- void *page_data;
- int page_nr = offset >> PAGE_SHIFT;
- int page_ofs = offset & (PAGE_SIZE-1);
- int page_cnt = min_t(size_t, PAGE_SIZE - page_ofs, count);
-
- page_data = kmap(fw_priv->pages[page_nr]);
-
- if (read)
- memcpy(buffer, page_data + page_ofs, page_cnt);
- else
- memcpy(page_data + page_ofs, buffer, page_cnt);
-
- kunmap(fw_priv->pages[page_nr]);
- buffer += page_cnt;
- offset += page_cnt;
- count -= page_cnt;
- }
-}
-
-static ssize_t firmware_data_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
- char *buffer, loff_t offset, size_t count)
-{
- struct device *dev = kobj_to_dev(kobj);
- struct fw_sysfs *fw_sysfs = to_fw_sysfs(dev);
- struct fw_priv *fw_priv;
- ssize_t ret_count;
-
- mutex_lock(&fw_lock);
- fw_priv = fw_sysfs->fw_priv;
- if (!fw_priv || fw_sysfs_done(fw_priv)) {
- ret_count = -ENODEV;
- goto out;
- }
- if (offset > fw_priv->size) {
- ret_count = 0;
- goto out;
- }
- if (count > fw_priv->size - offset)
- count = fw_priv->size - offset;
-
- ret_count = count;
-
- if (fw_priv->data)
- firmware_rw_data(fw_priv, buffer, offset, count, true);
- else
- firmware_rw(fw_priv, buffer, offset, count, true);
-
-out:
- mutex_unlock(&fw_lock);
- return ret_count;
-}
-
-static int fw_realloc_pages(struct fw_sysfs *fw_sysfs, int min_size)
-{
- struct fw_priv *fw_priv= fw_sysfs->fw_priv;
- int pages_needed = PAGE_ALIGN(min_size) >> PAGE_SHIFT;
-
- /* If the array of pages is too small, grow it... */
- if (fw_priv->page_array_size < pages_needed) {
- int new_array_size = max(pages_needed,
- fw_priv->page_array_size * 2);
- struct page **new_pages;
-
- new_pages = vmalloc(new_array_size * sizeof(void *));
- if (!new_pages) {
- fw_load_abort(fw_sysfs);
- return -ENOMEM;
- }
- memcpy(new_pages, fw_priv->pages,
- fw_priv->page_array_size * sizeof(void *));
- memset(&new_pages[fw_priv->page_array_size], 0, sizeof(void *) *
- (new_array_size - fw_priv->page_array_size));
- vfree(fw_priv->pages);
- fw_priv->pages = new_pages;
- fw_priv->page_array_size = new_array_size;
- }
-
- while (fw_priv->nr_pages < pages_needed) {
- fw_priv->pages[fw_priv->nr_pages] =
- alloc_page(GFP_KERNEL | __GFP_HIGHMEM);
-
- if (!fw_priv->pages[fw_priv->nr_pages]) {
- fw_load_abort(fw_sysfs);
- return -ENOMEM;
- }
- fw_priv->nr_pages++;
- }
- return 0;
-}
-
-/**
- * firmware_data_write - write method for firmware
- * @filp: open sysfs file
- * @kobj: kobject for the device
- * @bin_attr: bin_attr structure
- * @buffer: buffer being written
- * @offset: buffer offset for write in total data store area
- * @count: buffer size
- *
- * Data written to the 'data' attribute will be later handed to
- * the driver as a firmware image.
- **/
-static ssize_t firmware_data_write(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
- char *buffer, loff_t offset, size_t count)
-{
- struct device *dev = kobj_to_dev(kobj);
- struct fw_sysfs *fw_sysfs = to_fw_sysfs(dev);
- struct fw_priv *fw_priv;
- ssize_t retval;
-
- if (!capable(CAP_SYS_RAWIO))
- return -EPERM;
-
- mutex_lock(&fw_lock);
- fw_priv = fw_sysfs->fw_priv;
- if (!fw_priv || fw_sysfs_done(fw_priv)) {
- retval = -ENODEV;
- goto out;
- }
-
- if (fw_priv->data) {
- if (offset + count > fw_priv->allocated_size) {
- retval = -ENOMEM;
- goto out;
- }
- firmware_rw_data(fw_priv, buffer, offset, count, false);
- retval = count;
- } else {
- retval = fw_realloc_pages(fw_sysfs, offset + count);
- if (retval)
- goto out;
-
- retval = count;
- firmware_rw(fw_priv, buffer, offset, count, false);
- }
-
- fw_priv->size = max_t(size_t, offset + count, fw_priv->size);
-out:
- mutex_unlock(&fw_lock);
- return retval;
-}
-
-static struct bin_attribute firmware_attr_data = {
- .attr = { .name = "data", .mode = 0644 },
- .size = 0,
- .read = firmware_data_read,
- .write = firmware_data_write,
-};
-
-static struct attribute *fw_dev_attrs[] = {
- &dev_attr_loading.attr,
- NULL
-};
-
-static struct bin_attribute *fw_dev_bin_attrs[] = {
- &firmware_attr_data,
- NULL
-};
-
-static const struct attribute_group fw_dev_attr_group = {
- .attrs = fw_dev_attrs,
- .bin_attrs = fw_dev_bin_attrs,
-};
-
-static const struct attribute_group *fw_dev_attr_groups[] = {
- &fw_dev_attr_group,
- NULL
-};
-
-static struct fw_sysfs *
-fw_create_instance(struct firmware *firmware, const char *fw_name,
- struct device *device, unsigned int opt_flags)
-{
- struct fw_sysfs *fw_sysfs;
- struct device *f_dev;
-
- fw_sysfs = kzalloc(sizeof(*fw_sysfs), GFP_KERNEL);
- if (!fw_sysfs) {
- fw_sysfs = ERR_PTR(-ENOMEM);
- goto exit;
- }
-
- fw_sysfs->nowait = !!(opt_flags & FW_OPT_NOWAIT);
- fw_sysfs->fw = firmware;
- f_dev = &fw_sysfs->dev;
-
- device_initialize(f_dev);
- dev_set_name(f_dev, "%s", fw_name);
- f_dev->parent = device;
- f_dev->class = &firmware_class;
- f_dev->groups = fw_dev_attr_groups;
-exit:
- return fw_sysfs;
-}
-
-/* load a firmware via user helper */
-static int _request_firmware_load(struct fw_sysfs *fw_sysfs,
- unsigned int opt_flags, long timeout)
-{
- int retval = 0;
- struct device *f_dev = &fw_sysfs->dev;
- struct fw_priv *fw_priv = fw_sysfs->fw_priv;
-
- /* fall back on userspace loading */
- if (!fw_priv->data)
- fw_priv->is_paged_buf = true;
-
- dev_set_uevent_suppress(f_dev, true);
-
- retval = device_add(f_dev);
- if (retval) {
- dev_err(f_dev, "%s: device_register failed\n", __func__);
- goto err_put_dev;
- }
-
- mutex_lock(&fw_lock);
- list_add(&fw_priv->pending_list, &pending_fw_head);
- mutex_unlock(&fw_lock);
-
- if (opt_flags & FW_OPT_UEVENT) {
- fw_priv->need_uevent = true;
- dev_set_uevent_suppress(f_dev, false);
- dev_dbg(f_dev, "firmware: requesting %s\n", fw_priv->fw_name);
- kobject_uevent(&fw_sysfs->dev.kobj, KOBJ_ADD);
- } else {
- timeout = MAX_JIFFY_OFFSET;
- }
-
- retval = fw_sysfs_wait_timeout(fw_priv, timeout);
- if (retval < 0) {
- mutex_lock(&fw_lock);
- fw_load_abort(fw_sysfs);
- mutex_unlock(&fw_lock);
- }
-
- if (fw_state_is_aborted(fw_priv)) {
- if (retval == -ERESTARTSYS)
- retval = -EINTR;
- else
- retval = -EAGAIN;
- } else if (fw_priv->is_paged_buf && !fw_priv->data)
- retval = -ENOMEM;
-
- device_del(f_dev);
-err_put_dev:
- put_device(f_dev);
- return retval;
-}
-
-static int fw_load_from_user_helper(struct firmware *firmware,
- const char *name, struct device *device,
- unsigned int opt_flags)
-{
- struct fw_sysfs *fw_sysfs;
- long timeout;
- int ret;
-
- timeout = firmware_loading_timeout();
- if (opt_flags & FW_OPT_NOWAIT) {
- timeout = usermodehelper_read_lock_wait(timeout);
- if (!timeout) {
- dev_dbg(device, "firmware: %s loading timed out\n",
- name);
- return -EBUSY;
- }
- } else {
- ret = usermodehelper_read_trylock();
- if (WARN_ON(ret)) {
- dev_err(device, "firmware: %s will not be loaded\n",
- name);
- return ret;
- }
- }
-
- fw_sysfs = fw_create_instance(firmware, name, device, opt_flags);
- if (IS_ERR(fw_sysfs)) {
- ret = PTR_ERR(fw_sysfs);
- goto out_unlock;
- }
-
- fw_sysfs->fw_priv = firmware->priv;
- ret = _request_firmware_load(fw_sysfs, opt_flags, timeout);
-
- if (!ret)
- ret = assign_fw(firmware, device, opt_flags);
-
-out_unlock:
- usermodehelper_read_unlock();
-
- return ret;
-}
-
-#ifdef CONFIG_FW_LOADER_USER_HELPER_FALLBACK
-static bool fw_force_sysfs_fallback(unsigned int opt_flags)
-{
- return true;
-}
-#else
-static bool fw_force_sysfs_fallback(unsigned int opt_flags)
-{
- if (!(opt_flags & FW_OPT_USERHELPER))
- return false;
- return true;
-}
-#endif
-
-static bool fw_run_sysfs_fallback(unsigned int opt_flags)
-{
- if ((opt_flags & FW_OPT_NOFALLBACK))
- return false;
-
- return fw_force_sysfs_fallback(opt_flags);
-}
-
-static int fw_sysfs_fallback(struct firmware *fw, const char *name,
- struct device *device,
- unsigned int opt_flags,
- int ret)
-{
- if (!fw_run_sysfs_fallback(opt_flags))
- return ret;
-
- dev_warn(device, "Falling back to user helper\n");
- return fw_load_from_user_helper(fw, name, device, opt_flags);
-}
-#else /* CONFIG_FW_LOADER_USER_HELPER */
-static int fw_sysfs_fallback(struct firmware *fw, const char *name,
- struct device *device,
- unsigned int opt_flags,
- int ret)
-{
- /* Keep carrying over the same error */
- return ret;
-}
-
-static inline void kill_pending_fw_fallback_reqs(bool only_kill_custom) { }
-
-static inline int register_sysfs_loader(void)
-{
- return 0;
-}
-
-static inline void unregister_sysfs_loader(void)
-{
-}
-
-#endif /* CONFIG_FW_LOADER_USER_HELPER */
-
/* prepare firmware and firmware_buf structs;
* return 0 if a firmware is already assigned, 1 if need to load one,
* or a negative error code
@@ -1377,6 +657,30 @@ int request_firmware_direct(const struct firmware **firmware_p,
EXPORT_SYMBOL_GPL(request_firmware_direct);
/**
+ * firmware_request_cache: - cache firmware for suspend so resume can use it
+ * @name: name of firmware file
+ * @device: device for which firmware should be cached for
+ *
+ * There are some devices with an optimization that enables the device to not
+ * require loading firmware on system reboot. This optimization may still
+ * require the firmware present on resume from suspend. This routine can be
+ * used to ensure the firmware is present on resume from suspend in these
+ * situations. This helper is not compatible with drivers which use
+ * request_firmware_into_buf() or request_firmware_nowait() with no uevent set.
+ **/
+int firmware_request_cache(struct device *device, const char *name)
+{
+ int ret;
+
+ mutex_lock(&fw_lock);
+ ret = fw_add_devm_name(device, name);
+ mutex_unlock(&fw_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(firmware_request_cache);
+
+/**
* request_firmware_into_buf - load firmware into a previously allocated buffer
* @firmware_p: pointer to firmware image
* @name: name of firmware file
@@ -1397,6 +701,9 @@ request_firmware_into_buf(const struct firmware **firmware_p, const char *name,
{
int ret;
+ if (fw_cache_is_setup(device, name))
+ return -EOPNOTSUPP;
+
__module_get(THIS_MODULE);
ret = _request_firmware(firmware_p, name, device, buf, size,
FW_OPT_UEVENT | FW_OPT_NOCACHE);
@@ -1494,6 +801,12 @@ request_firmware_nowait(
fw_work->opt_flags = FW_OPT_NOWAIT |
(uevent ? FW_OPT_UEVENT : FW_OPT_USERHELPER);
+ if (!uevent && fw_cache_is_setup(device, name)) {
+ kfree_const(fw_work->name);
+ kfree(fw_work);
+ return -EOPNOTSUPP;
+ }
+
if (!try_module_get(module)) {
kfree_const(fw_work->name);
kfree(fw_work);
@@ -1741,7 +1054,6 @@ static void __device_uncache_fw_images(void)
static void device_cache_fw_images(void)
{
struct firmware_cache *fwc = &fw_cache;
- int old_timeout;
DEFINE_WAIT(wait);
pr_debug("%s\n", __func__);
@@ -1749,16 +1061,7 @@ static void device_cache_fw_images(void)
/* cancel uncache work */
cancel_delayed_work_sync(&fwc->work);
- /*
- * use small loading timeout for caching devices' firmware
- * because all these firmware images have been loaded
- * successfully at lease once, also system is ready for
- * completing firmware loading now. The maximum size of
- * firmware in current distributions is about 2M bytes,
- * so 10 secs should be enough.
- */
- old_timeout = loading_timeout;
- loading_timeout = 10;
+ fw_fallback_set_cache_timeout();
mutex_lock(&fw_lock);
fwc->state = FW_LOADER_START_CACHE;
@@ -1768,7 +1071,7 @@ static void device_cache_fw_images(void)
/* wait for completion of caching firmware for all devices */
async_synchronize_full_domain(&fw_cache_domain);
- loading_timeout = old_timeout;
+ fw_fallback_set_default_timeout();
}
/**
diff --git a/drivers/base/memory.c b/drivers/base/memory.c
index fe4b24f05f6a..bffe8616bd55 100644
--- a/drivers/base/memory.c
+++ b/drivers/base/memory.c
@@ -187,13 +187,14 @@ int memory_isolate_notify(unsigned long val, void *v)
}
/*
- * The probe routines leave the pages reserved, just as the bootmem code does.
- * Make sure they're still that way.
+ * The probe routines leave the pages uninitialized, just as the bootmem code
+ * does. Make sure we do not access them, but instead use only information from
+ * within sections.
*/
-static bool pages_correctly_reserved(unsigned long start_pfn)
+static bool pages_correctly_probed(unsigned long start_pfn)
{
- int i, j;
- struct page *page;
+ unsigned long section_nr = pfn_to_section_nr(start_pfn);
+ unsigned long section_nr_end = section_nr + sections_per_block;
unsigned long pfn = start_pfn;
/*
@@ -201,21 +202,24 @@ static bool pages_correctly_reserved(unsigned long start_pfn)
* SPARSEMEM_VMEMMAP. We lookup the page once per section
* and assume memmap is contiguous within each section
*/
- for (i = 0; i < sections_per_block; i++, pfn += PAGES_PER_SECTION) {
+ for (; section_nr < section_nr_end; section_nr++) {
if (WARN_ON_ONCE(!pfn_valid(pfn)))
return false;
- page = pfn_to_page(pfn);
-
- for (j = 0; j < PAGES_PER_SECTION; j++) {
- if (PageReserved(page + j))
- continue;
-
- printk(KERN_WARNING "section number %ld page number %d "
- "not reserved, was it already online?\n",
- pfn_to_section_nr(pfn), j);
+ if (!present_section_nr(section_nr)) {
+ pr_warn("section %ld pfn[%lx, %lx) not present",
+ section_nr, pfn, pfn + PAGES_PER_SECTION);
+ return false;
+ } else if (!valid_section_nr(section_nr)) {
+ pr_warn("section %ld pfn[%lx, %lx) no valid memmap",
+ section_nr, pfn, pfn + PAGES_PER_SECTION);
+ return false;
+ } else if (online_section_nr(section_nr)) {
+ pr_warn("section %ld pfn[%lx, %lx) is already online",
+ section_nr, pfn, pfn + PAGES_PER_SECTION);
return false;
}
+ pfn += PAGES_PER_SECTION;
}
return true;
@@ -237,7 +241,7 @@ memory_block_action(unsigned long phys_index, unsigned long action, int online_t
switch (action) {
case MEM_ONLINE:
- if (!pages_correctly_reserved(start_pfn))
+ if (!pages_correctly_probed(start_pfn))
return -EBUSY;
ret = online_pages(start_pfn, nr_pages, online_type);
@@ -708,7 +712,7 @@ static int add_memory_block(int base_section_nr)
* need an interface for the VM to add new memory regions,
* but without onlining it.
*/
-int register_new_memory(int nid, struct mem_section *section)
+int hotplug_memory_register(int nid, struct mem_section *section)
{
int ret = 0;
struct memory_block *mem;
@@ -727,7 +731,7 @@ int register_new_memory(int nid, struct mem_section *section)
}
if (mem->section_count == sections_per_block)
- ret = register_mem_sect_under_node(mem, nid);
+ ret = register_mem_sect_under_node(mem, nid, false);
out:
mutex_unlock(&mem_sysfs_mutex);
return ret;
@@ -833,11 +837,8 @@ int __init memory_dev_init(void)
* during boot and have been initialized
*/
mutex_lock(&mem_sysfs_mutex);
- for (i = 0; i < NR_MEM_SECTIONS; i += sections_per_block) {
- /* Don't iterate over sections we know are !present: */
- if (i > __highest_present_section_nr)
- break;
-
+ for (i = 0; i <= __highest_present_section_nr;
+ i += sections_per_block) {
err = add_memory_block(i);
if (!ret)
ret = err;
diff --git a/drivers/base/node.c b/drivers/base/node.c
index ee090ab9171c..7a3a580821e0 100644
--- a/drivers/base/node.c
+++ b/drivers/base/node.c
@@ -315,7 +315,9 @@ static int register_node(struct node *node, int num)
node->dev.groups = node_dev_groups;
error = device_register(&node->dev);
- if (!error){
+ if (error)
+ put_device(&node->dev);
+ else {
hugetlb_register_node(node);
compaction_register_node(node);
@@ -397,13 +399,16 @@ static int __ref get_nid_for_pfn(unsigned long pfn)
}
/* register memory section under specified node if it spans that node */
-int register_mem_sect_under_node(struct memory_block *mem_blk, int nid)
+int register_mem_sect_under_node(struct memory_block *mem_blk, int nid,
+ bool check_nid)
{
int ret;
unsigned long pfn, sect_start_pfn, sect_end_pfn;
if (!mem_blk)
return -EFAULT;
+
+ mem_blk->nid = nid;
if (!node_online(nid))
return 0;
@@ -423,11 +428,18 @@ int register_mem_sect_under_node(struct memory_block *mem_blk, int nid)
continue;
}
- page_nid = get_nid_for_pfn(pfn);
- if (page_nid < 0)
- continue;
- if (page_nid != nid)
- continue;
+ /*
+ * We need to check if page belongs to nid only for the boot
+ * case, during hotplug we know that all pages in the memory
+ * block belong to the same node.
+ */
+ if (check_nid) {
+ page_nid = get_nid_for_pfn(pfn);
+ if (page_nid < 0)
+ continue;
+ if (page_nid != nid)
+ continue;
+ }
ret = sysfs_create_link_nowarn(&node_devices[nid]->dev.kobj,
&mem_blk->dev.kobj,
kobject_name(&mem_blk->dev.kobj));
@@ -502,7 +514,7 @@ int link_mem_sections(int nid, unsigned long start_pfn, unsigned long nr_pages)
mem_blk = find_memory_block_hinted(mem_sect, mem_blk);
- ret = register_mem_sect_under_node(mem_blk, nid);
+ ret = register_mem_sect_under_node(mem_blk, nid, true);
if (!err)
err = ret;
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
index f1bf7b38d91c..8075ddc70a17 100644
--- a/drivers/base/platform.c
+++ b/drivers/base/platform.c
@@ -1153,8 +1153,10 @@ int __init platform_bus_init(void)
early_platform_cleanup();
error = device_register(&platform_bus);
- if (error)
+ if (error) {
+ put_device(&platform_bus);
return error;
+ }
error = bus_register(&platform_bus_type);
if (error)
device_unregister(&platform_bus);
diff --git a/drivers/base/power/power.h b/drivers/base/power/power.h
index 21244c53e377..86e67e70b509 100644
--- a/drivers/base/power/power.h
+++ b/drivers/base/power/power.h
@@ -31,6 +31,7 @@ struct wake_irq {
struct device *dev;
unsigned int status;
int irq;
+ const char *name;
};
extern void dev_pm_arm_wake_irq(struct wake_irq *wirq);
diff --git a/drivers/base/power/wakeirq.c b/drivers/base/power/wakeirq.c
index 6637fc319269..b8fa5c0f2d13 100644
--- a/drivers/base/power/wakeirq.c
+++ b/drivers/base/power/wakeirq.c
@@ -112,6 +112,7 @@ void dev_pm_clear_wake_irq(struct device *dev)
free_irq(wirq->irq, wirq);
wirq->status &= ~WAKE_IRQ_DEDICATED_MASK;
}
+ kfree(wirq->name);
kfree(wirq);
}
EXPORT_SYMBOL_GPL(dev_pm_clear_wake_irq);
@@ -184,6 +185,12 @@ int dev_pm_set_dedicated_wake_irq(struct device *dev, int irq)
if (!wirq)
return -ENOMEM;
+ wirq->name = kasprintf(GFP_KERNEL, "%s:wakeup", dev_name(dev));
+ if (!wirq->name) {
+ err = -ENOMEM;
+ goto err_free;
+ }
+
wirq->dev = dev;
wirq->irq = irq;
irq_set_status_flags(irq, IRQ_NOAUTOEN);
@@ -196,9 +203,9 @@ int dev_pm_set_dedicated_wake_irq(struct device *dev, int irq)
* so we use a threaded irq.
*/
err = request_threaded_irq(irq, NULL, handle_threaded_wake_irq,
- IRQF_ONESHOT, dev_name(dev), wirq);
+ IRQF_ONESHOT, wirq->name, wirq);
if (err)
- goto err_free;
+ goto err_free_name;
err = dev_pm_attach_wake_irq(dev, irq, wirq);
if (err)
@@ -210,6 +217,8 @@ int dev_pm_set_dedicated_wake_irq(struct device *dev, int irq)
err_free_irq:
free_irq(irq, wirq);
+err_free_name:
+ kfree(wirq->name);
err_free:
kfree(wirq);
diff --git a/drivers/base/regmap/regmap-debugfs.c b/drivers/base/regmap/regmap-debugfs.c
index f3266334063e..87b562e49a43 100644
--- a/drivers/base/regmap/regmap-debugfs.c
+++ b/drivers/base/regmap/regmap-debugfs.c
@@ -25,6 +25,7 @@ struct regmap_debugfs_node {
struct list_head link;
};
+static unsigned int dummy_index;
static struct dentry *regmap_debugfs_root;
static LIST_HEAD(regmap_debugfs_early_list);
static DEFINE_MUTEX(regmap_debugfs_early_lock);
@@ -40,6 +41,7 @@ static ssize_t regmap_name_read_file(struct file *file,
loff_t *ppos)
{
struct regmap *map = file->private_data;
+ const char *name = "nodev";
int ret;
char *buf;
@@ -47,7 +49,10 @@ static ssize_t regmap_name_read_file(struct file *file,
if (!buf)
return -ENOMEM;
- ret = snprintf(buf, PAGE_SIZE, "%s\n", map->dev->driver->name);
+ if (map->dev && map->dev->driver)
+ name = map->dev->driver->name;
+
+ ret = snprintf(buf, PAGE_SIZE, "%s\n", name);
if (ret < 0) {
kfree(buf);
return ret;
@@ -569,9 +574,20 @@ void regmap_debugfs_init(struct regmap *map, const char *name)
name = devname;
}
+ if (!strcmp(name, "dummy")) {
+ map->debugfs_name = kasprintf(GFP_KERNEL, "dummy%d",
+ dummy_index);
+ name = map->debugfs_name;
+ dummy_index++;
+ }
+
map->debugfs = debugfs_create_dir(name, regmap_debugfs_root);
if (!map->debugfs) {
- dev_warn(map->dev, "Failed to create debugfs directory\n");
+ dev_warn(map->dev,
+ "Failed to create %s debugfs directory\n", name);
+
+ kfree(map->debugfs_name);
+ map->debugfs_name = NULL;
return;
}
diff --git a/drivers/base/regmap/regmap-i2c.c b/drivers/base/regmap/regmap-i2c.c
index 4735318f4268..056acde5e7d3 100644
--- a/drivers/base/regmap/regmap-i2c.c
+++ b/drivers/base/regmap/regmap-i2c.c
@@ -217,8 +217,6 @@ static int regmap_i2c_smbus_i2c_write(void *context, const void *data,
if (count < 1)
return -EINVAL;
- if (count >= I2C_SMBUS_BLOCK_MAX)
- return -E2BIG;
--count;
return i2c_smbus_write_i2c_block_data(i2c, ((u8 *)data)[0], count,
@@ -235,8 +233,6 @@ static int regmap_i2c_smbus_i2c_read(void *context, const void *reg,
if (reg_size != 1 || val_size < 1)
return -EINVAL;
- if (val_size >= I2C_SMBUS_BLOCK_MAX)
- return -E2BIG;
ret = i2c_smbus_read_i2c_block_data(i2c, ((u8 *)reg)[0], val_size, val);
if (ret == val_size)
diff --git a/drivers/base/regmap/regmap-mmio.c b/drivers/base/regmap/regmap-mmio.c
index 5189fd6182f6..5cadfd3394d8 100644
--- a/drivers/base/regmap/regmap-mmio.c
+++ b/drivers/base/regmap/regmap-mmio.c
@@ -28,6 +28,8 @@
struct regmap_mmio_context {
void __iomem *regs;
unsigned val_bytes;
+
+ bool attached_clk;
struct clk *clk;
void (*reg_write)(struct regmap_mmio_context *ctx,
@@ -363,4 +365,26 @@ struct regmap *__devm_regmap_init_mmio_clk(struct device *dev,
}
EXPORT_SYMBOL_GPL(__devm_regmap_init_mmio_clk);
+int regmap_mmio_attach_clk(struct regmap *map, struct clk *clk)
+{
+ struct regmap_mmio_context *ctx = map->bus_context;
+
+ ctx->clk = clk;
+ ctx->attached_clk = true;
+
+ return clk_prepare(ctx->clk);
+}
+EXPORT_SYMBOL_GPL(regmap_mmio_attach_clk);
+
+void regmap_mmio_detach_clk(struct regmap *map)
+{
+ struct regmap_mmio_context *ctx = map->bus_context;
+
+ clk_unprepare(ctx->clk);
+
+ ctx->attached_clk = false;
+ ctx->clk = NULL;
+}
+EXPORT_SYMBOL_GPL(regmap_mmio_detach_clk);
+
MODULE_LICENSE("GPL v2");
diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
index ee302ccdfbc8..3bc84885eb91 100644
--- a/drivers/base/regmap/regmap.c
+++ b/drivers/base/regmap/regmap.c
@@ -99,7 +99,7 @@ bool regmap_cached(struct regmap *map, unsigned int reg)
int ret;
unsigned int val;
- if (map->cache == REGCACHE_NONE)
+ if (map->cache_type == REGCACHE_NONE)
return false;
if (!map->cache_ops)
@@ -174,7 +174,7 @@ static bool regmap_volatile_range(struct regmap *map, unsigned int reg,
unsigned int i;
for (i = 0; i < num; i++)
- if (!regmap_volatile(map, reg + i))
+ if (!regmap_volatile(map, reg + regmap_get_offset(map, i)))
return false;
return true;
@@ -1116,6 +1116,8 @@ skip_format_initialization:
ret = regmap_attach_dev(dev, map, config);
if (ret != 0)
goto err_regcache;
+ } else {
+ regmap_debugfs_init(map, config->name);
}
return map;
@@ -1438,8 +1440,8 @@ static void regmap_set_work_buf_flag_mask(struct regmap *map, int max_bytes,
buf[i] |= (mask >> (8 * i)) & 0xff;
}
-int _regmap_raw_write(struct regmap *map, unsigned int reg,
- const void *val, size_t val_len)
+static int _regmap_raw_write_impl(struct regmap *map, unsigned int reg,
+ const void *val, size_t val_len)
{
struct regmap_range_node *range;
unsigned long flags;
@@ -1490,8 +1492,9 @@ int _regmap_raw_write(struct regmap *map, unsigned int reg,
while (val_num > win_residue) {
dev_dbg(map->dev, "Writing window %d/%zu\n",
win_residue, val_len / map->format.val_bytes);
- ret = _regmap_raw_write(map, reg, val, win_residue *
- map->format.val_bytes);
+ ret = _regmap_raw_write_impl(map, reg, val,
+ win_residue *
+ map->format.val_bytes);
if (ret != 0)
return ret;
@@ -1707,11 +1710,11 @@ static int _regmap_bus_raw_write(void *context, unsigned int reg,
map->format.format_val(map->work_buf + map->format.reg_bytes
+ map->format.pad_bytes, val, 0);
- return _regmap_raw_write(map, reg,
- map->work_buf +
- map->format.reg_bytes +
- map->format.pad_bytes,
- map->format.val_bytes);
+ return _regmap_raw_write_impl(map, reg,
+ map->work_buf +
+ map->format.reg_bytes +
+ map->format.pad_bytes,
+ map->format.val_bytes);
}
static inline void *_regmap_map_get_context(struct regmap *map)
@@ -1806,6 +1809,44 @@ int regmap_write_async(struct regmap *map, unsigned int reg, unsigned int val)
}
EXPORT_SYMBOL_GPL(regmap_write_async);
+int _regmap_raw_write(struct regmap *map, unsigned int reg,
+ const void *val, size_t val_len)
+{
+ size_t val_bytes = map->format.val_bytes;
+ size_t val_count = val_len / val_bytes;
+ size_t chunk_count, chunk_bytes;
+ size_t chunk_regs = val_count;
+ int ret, i;
+
+ if (!val_count)
+ return -EINVAL;
+
+ if (map->use_single_write)
+ chunk_regs = 1;
+ else if (map->max_raw_write && val_len > map->max_raw_write)
+ chunk_regs = map->max_raw_write / val_bytes;
+
+ chunk_count = val_count / chunk_regs;
+ chunk_bytes = chunk_regs * val_bytes;
+
+ /* Write as many bytes as possible with chunk_size */
+ for (i = 0; i < chunk_count; i++) {
+ ret = _regmap_raw_write_impl(map, reg, val, chunk_bytes);
+ if (ret)
+ return ret;
+
+ reg += regmap_get_offset(map, chunk_regs);
+ val += chunk_bytes;
+ val_len -= chunk_bytes;
+ }
+
+ /* Write remaining bytes */
+ if (val_len)
+ ret = _regmap_raw_write_impl(map, reg, val, val_len);
+
+ return ret;
+}
+
/**
* regmap_raw_write() - Write raw values to one or more registers
*
@@ -1831,8 +1872,6 @@ int regmap_raw_write(struct regmap *map, unsigned int reg,
return -EINVAL;
if (val_len % map->format.val_bytes)
return -EINVAL;
- if (map->max_raw_write && map->max_raw_write > val_len)
- return -E2BIG;
map->lock(map->lock_arg);
@@ -1923,23 +1962,15 @@ int regmap_bulk_write(struct regmap *map, unsigned int reg, const void *val,
{
int ret = 0, i;
size_t val_bytes = map->format.val_bytes;
- size_t total_size = val_bytes * val_count;
if (!IS_ALIGNED(reg, map->reg_stride))
return -EINVAL;
/*
- * Some devices don't support bulk write, for
- * them we have a series of single write operations in the first two if
- * blocks.
- *
- * The first if block is used for memory mapped io. It does not allow
- * val_bytes of 3 for example.
- * The second one is for busses that do not provide raw I/O.
- * The third one is used for busses which do not have these limitations
- * and can write arbitrary value lengths.
+ * Some devices don't support bulk write, for them we have a series of
+ * single write operations.
*/
- if (!map->bus) {
+ if (!map->bus || !map->format.parse_inplace) {
map->lock(map->lock_arg);
for (i = 0; i < val_count; i++) {
unsigned int ival;
@@ -1972,81 +2003,17 @@ int regmap_bulk_write(struct regmap *map, unsigned int reg, const void *val,
}
out:
map->unlock(map->lock_arg);
- } else if (map->bus && !map->format.parse_inplace) {
- const u8 *u8 = val;
- const u16 *u16 = val;
- const u32 *u32 = val;
- unsigned int ival;
-
- for (i = 0; i < val_count; i++) {
- switch (map->format.val_bytes) {
- case 4:
- ival = u32[i];
- break;
- case 2:
- ival = u16[i];
- break;
- case 1:
- ival = u8[i];
- break;
- default:
- return -EINVAL;
- }
-
- ret = regmap_write(map, reg + (i * map->reg_stride),
- ival);
- if (ret)
- return ret;
- }
- } else if (map->use_single_write ||
- (map->max_raw_write && map->max_raw_write < total_size)) {
- int chunk_stride = map->reg_stride;
- size_t chunk_size = val_bytes;
- size_t chunk_count = val_count;
-
- if (!map->use_single_write) {
- chunk_size = map->max_raw_write;
- if (chunk_size % val_bytes)
- chunk_size -= chunk_size % val_bytes;
- chunk_count = total_size / chunk_size;
- chunk_stride *= chunk_size / val_bytes;
- }
-
- map->lock(map->lock_arg);
- /* Write as many bytes as possible with chunk_size */
- for (i = 0; i < chunk_count; i++) {
- ret = _regmap_raw_write(map,
- reg + (i * chunk_stride),
- val + (i * chunk_size),
- chunk_size);
- if (ret)
- break;
- }
-
- /* Write remaining bytes */
- if (!ret && chunk_size * i < total_size) {
- ret = _regmap_raw_write(map, reg + (i * chunk_stride),
- val + (i * chunk_size),
- total_size - i * chunk_size);
- }
- map->unlock(map->lock_arg);
} else {
void *wval;
- if (!val_count)
- return -EINVAL;
-
wval = kmemdup(val, val_count * val_bytes, map->alloc_flags);
- if (!wval) {
- dev_err(map->dev, "Error in memory allocation\n");
+ if (!wval)
return -ENOMEM;
- }
+
for (i = 0; i < val_count * val_bytes; i += val_bytes)
map->format.parse_inplace(wval + i);
- map->lock(map->lock_arg);
- ret = _regmap_raw_write(map, reg, wval, val_bytes * val_count);
- map->unlock(map->lock_arg);
+ ret = regmap_raw_write(map, reg, wval, val_bytes * val_count);
kfree(wval);
}
@@ -2542,18 +2509,39 @@ int regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
if (regmap_volatile_range(map, reg, val_count) || map->cache_bypass ||
map->cache_type == REGCACHE_NONE) {
+ size_t chunk_count, chunk_bytes;
+ size_t chunk_regs = val_count;
+
if (!map->bus->read) {
ret = -ENOTSUPP;
goto out;
}
- if (map->max_raw_read && map->max_raw_read < val_len) {
- ret = -E2BIG;
- goto out;
- }
- /* Physical block read if there's no cache involved */
- ret = _regmap_raw_read(map, reg, val, val_len);
+ if (map->use_single_read)
+ chunk_regs = 1;
+ else if (map->max_raw_read && val_len > map->max_raw_read)
+ chunk_regs = map->max_raw_read / val_bytes;
+
+ chunk_count = val_count / chunk_regs;
+ chunk_bytes = chunk_regs * val_bytes;
+ /* Read bytes that fit into whole chunks */
+ for (i = 0; i < chunk_count; i++) {
+ ret = _regmap_raw_read(map, reg, val, chunk_bytes);
+ if (ret != 0)
+ goto out;
+
+ reg += regmap_get_offset(map, chunk_regs);
+ val += chunk_bytes;
+ val_len -= chunk_bytes;
+ }
+
+ /* Read remaining bytes */
+ if (val_len) {
+ ret = _regmap_raw_read(map, reg, val, val_len);
+ if (ret != 0)
+ goto out;
+ }
} else {
/* Otherwise go word by word for the cache; should be low
* cost as we expect to hit the cache.
@@ -2653,108 +2641,60 @@ int regmap_bulk_read(struct regmap *map, unsigned int reg, void *val,
if (!IS_ALIGNED(reg, map->reg_stride))
return -EINVAL;
+ if (val_count == 0)
+ return -EINVAL;
if (map->bus && map->format.parse_inplace && (vol || map->cache_type == REGCACHE_NONE)) {
- /*
- * Some devices does not support bulk read, for
- * them we have a series of single read operations.
- */
- size_t total_size = val_bytes * val_count;
-
- if (!map->use_single_read &&
- (!map->max_raw_read || map->max_raw_read > total_size)) {
- ret = regmap_raw_read(map, reg, val,
- val_bytes * val_count);
- if (ret != 0)
- return ret;
- } else {
- /*
- * Some devices do not support bulk read or do not
- * support large bulk reads, for them we have a series
- * of read operations.
- */
- int chunk_stride = map->reg_stride;
- size_t chunk_size = val_bytes;
- size_t chunk_count = val_count;
-
- if (!map->use_single_read) {
- chunk_size = map->max_raw_read;
- if (chunk_size % val_bytes)
- chunk_size -= chunk_size % val_bytes;
- chunk_count = total_size / chunk_size;
- chunk_stride *= chunk_size / val_bytes;
- }
-
- /* Read bytes that fit into a multiple of chunk_size */
- for (i = 0; i < chunk_count; i++) {
- ret = regmap_raw_read(map,
- reg + (i * chunk_stride),
- val + (i * chunk_size),
- chunk_size);
- if (ret != 0)
- return ret;
- }
-
- /* Read remaining bytes */
- if (chunk_size * i < total_size) {
- ret = regmap_raw_read(map,
- reg + (i * chunk_stride),
- val + (i * chunk_size),
- total_size - i * chunk_size);
- if (ret != 0)
- return ret;
- }
- }
+ ret = regmap_raw_read(map, reg, val, val_bytes * val_count);
+ if (ret != 0)
+ return ret;
for (i = 0; i < val_count * val_bytes; i += val_bytes)
map->format.parse_inplace(val + i);
} else {
+#ifdef CONFIG_64BIT
+ u64 *u64 = val;
+#endif
+ u32 *u32 = val;
+ u16 *u16 = val;
+ u8 *u8 = val;
+
+ map->lock(map->lock_arg);
+
for (i = 0; i < val_count; i++) {
unsigned int ival;
- ret = regmap_read(map, reg + regmap_get_offset(map, i),
- &ival);
- if (ret != 0)
- return ret;
- if (map->format.format_val) {
- map->format.format_val(val + (i * val_bytes), ival, 0);
- } else {
- /* Devices providing read and write
- * operations can use the bulk I/O
- * functions if they define a val_bytes,
- * we assume that the values are native
- * endian.
- */
-#ifdef CONFIG_64BIT
- u64 *u64 = val;
-#endif
- u32 *u32 = val;
- u16 *u16 = val;
- u8 *u8 = val;
+ ret = _regmap_read(map, reg + regmap_get_offset(map, i),
+ &ival);
+ if (ret != 0)
+ goto out;
- switch (map->format.val_bytes) {
+ switch (map->format.val_bytes) {
#ifdef CONFIG_64BIT
- case 8:
- u64[i] = ival;
- break;
+ case 8:
+ u64[i] = ival;
+ break;
#endif
- case 4:
- u32[i] = ival;
- break;
- case 2:
- u16[i] = ival;
- break;
- case 1:
- u8[i] = ival;
- break;
- default:
- return -EINVAL;
- }
+ case 4:
+ u32[i] = ival;
+ break;
+ case 2:
+ u16[i] = ival;
+ break;
+ case 1:
+ u8[i] = ival;
+ break;
+ default:
+ ret = -EINVAL;
+ goto out;
}
}
+
+out:
+ map->unlock(map->lock_arg);
}
- return 0;
+ return ret;
}
EXPORT_SYMBOL_GPL(regmap_bulk_read);
diff --git a/drivers/base/soc.c b/drivers/base/soc.c
index 4e80f48ad5d6..10b280f30217 100644
--- a/drivers/base/soc.c
+++ b/drivers/base/soc.c
@@ -150,6 +150,8 @@ struct soc_device *soc_device_register(struct soc_device_attribute *soc_dev_attr
out3:
ida_simple_remove(&soc_ida, soc_dev->soc_dev_num);
+ put_device(&soc_dev->dev);
+ soc_dev = NULL;
out2:
kfree(soc_dev);
out1: