summaryrefslogtreecommitdiff
path: root/drivers/base
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/base')
-rw-r--r--drivers/base/core.c6
-rw-r--r--drivers/base/devtmpfs.c15
-rw-r--r--drivers/base/isa.c2
-rw-r--r--drivers/base/platform.c2
-rw-r--r--drivers/base/power/clock_ops.c223
-rw-r--r--drivers/base/power/domain.c87
-rw-r--r--drivers/base/power/domain_governor.c102
-rw-r--r--drivers/base/power/main.c9
-rw-r--r--drivers/base/property.c15
-rw-r--r--drivers/base/regmap/regcache.c2
-rw-r--r--drivers/base/swnode.c294
11 files changed, 643 insertions, 114 deletions
diff --git a/drivers/base/core.c b/drivers/base/core.c
index afc6f9ce6235..f29839382f81 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -28,6 +28,7 @@
#include <linux/sched/signal.h>
#include <linux/sched/mm.h>
#include <linux/sysfs.h>
+#include <linux/dma-map-ops.h> /* for dma_default_coherent */
#include "base.h"
#include "power/power.h"
@@ -2730,6 +2731,11 @@ void device_initialize(struct device *dev)
INIT_LIST_HEAD(&dev->links.suppliers);
INIT_LIST_HEAD(&dev->links.defer_sync);
dev->links.status = DL_DEV_NO_DRIVER;
+#if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \
+ defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \
+ defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL)
+ dev->dma_coherent = dma_default_coherent;
+#endif
}
EXPORT_SYMBOL_GPL(device_initialize);
diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c
index eac184e6d657..653c8c6ac7a7 100644
--- a/drivers/base/devtmpfs.c
+++ b/drivers/base/devtmpfs.c
@@ -162,7 +162,7 @@ static int dev_mkdir(const char *name, umode_t mode)
if (IS_ERR(dentry))
return PTR_ERR(dentry);
- err = vfs_mkdir(d_inode(path.dentry), dentry, mode);
+ err = vfs_mkdir(&init_user_ns, d_inode(path.dentry), dentry, mode);
if (!err)
/* mark as kernel-created inode */
d_inode(dentry)->i_private = &thread;
@@ -212,7 +212,8 @@ static int handle_create(const char *nodename, umode_t mode, kuid_t uid,
if (IS_ERR(dentry))
return PTR_ERR(dentry);
- err = vfs_mknod(d_inode(path.dentry), dentry, mode, dev->devt);
+ err = vfs_mknod(&init_user_ns, d_inode(path.dentry), dentry, mode,
+ dev->devt);
if (!err) {
struct iattr newattrs;
@@ -221,7 +222,7 @@ static int handle_create(const char *nodename, umode_t mode, kuid_t uid,
newattrs.ia_gid = gid;
newattrs.ia_valid = ATTR_MODE|ATTR_UID|ATTR_GID;
inode_lock(d_inode(dentry));
- notify_change(dentry, &newattrs, NULL);
+ notify_change(&init_user_ns, dentry, &newattrs, NULL);
inode_unlock(d_inode(dentry));
/* mark as kernel-created inode */
@@ -242,7 +243,8 @@ static int dev_rmdir(const char *name)
return PTR_ERR(dentry);
if (d_really_is_positive(dentry)) {
if (d_inode(dentry)->i_private == &thread)
- err = vfs_rmdir(d_inode(parent.dentry), dentry);
+ err = vfs_rmdir(&init_user_ns, d_inode(parent.dentry),
+ dentry);
else
err = -EPERM;
} else {
@@ -328,9 +330,10 @@ static int handle_remove(const char *nodename, struct device *dev)
newattrs.ia_valid =
ATTR_UID|ATTR_GID|ATTR_MODE;
inode_lock(d_inode(dentry));
- notify_change(dentry, &newattrs, NULL);
+ notify_change(&init_user_ns, dentry, &newattrs, NULL);
inode_unlock(d_inode(dentry));
- err = vfs_unlink(d_inode(parent.dentry), dentry, NULL);
+ err = vfs_unlink(&init_user_ns, d_inode(parent.dentry),
+ dentry, NULL);
if (!err || err == -ENOENT)
deleted = 1;
}
diff --git a/drivers/base/isa.c b/drivers/base/isa.c
index 2772f5d1948a..aa4737667026 100644
--- a/drivers/base/isa.c
+++ b/drivers/base/isa.c
@@ -51,7 +51,7 @@ static int isa_bus_remove(struct device *dev)
struct isa_driver *isa_driver = dev->platform_data;
if (isa_driver && isa_driver->remove)
- return isa_driver->remove(dev, to_isa_dev(dev)->id);
+ isa_driver->remove(dev, to_isa_dev(dev)->id);
return 0;
}
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
index e8e0a9c3991c..6e1f8e0b661c 100644
--- a/drivers/base/platform.c
+++ b/drivers/base/platform.c
@@ -573,7 +573,7 @@ static void platform_device_release(struct device *dev)
struct platform_object *pa = container_of(dev, struct platform_object,
pdev.dev);
- of_device_node_put(&pa->pdev.dev);
+ of_node_put(pa->pdev.dev.of_node);
kfree(pa->pdev.dev.platform_data);
kfree(pa->pdev.mfd_cell);
kfree(pa->pdev.resource);
diff --git a/drivers/base/power/clock_ops.c b/drivers/base/power/clock_ops.c
index ced6863a16a5..84d5acb6301b 100644
--- a/drivers/base/power/clock_ops.c
+++ b/drivers/base/power/clock_ops.c
@@ -23,6 +23,7 @@
enum pce_status {
PCE_STATUS_NONE = 0,
PCE_STATUS_ACQUIRED,
+ PCE_STATUS_PREPARED,
PCE_STATUS_ENABLED,
PCE_STATUS_ERROR,
};
@@ -32,9 +33,113 @@ struct pm_clock_entry {
char *con_id;
struct clk *clk;
enum pce_status status;
+ bool enabled_when_prepared;
};
/**
+ * pm_clk_list_lock - ensure exclusive access for modifying the PM clock
+ * entry list.
+ * @psd: pm_subsys_data instance corresponding to the PM clock entry list
+ * and clk_op_might_sleep count to be modified.
+ *
+ * Get exclusive access before modifying the PM clock entry list and the
+ * clock_op_might_sleep count to guard against concurrent modifications.
+ * This also protects against a concurrent clock_op_might_sleep and PM clock
+ * entry list usage in pm_clk_suspend()/pm_clk_resume() that may or may not
+ * happen in atomic context, hence both the mutex and the spinlock must be
+ * taken here.
+ */
+static void pm_clk_list_lock(struct pm_subsys_data *psd)
+ __acquires(&psd->lock)
+{
+ mutex_lock(&psd->clock_mutex);
+ spin_lock_irq(&psd->lock);
+}
+
+/**
+ * pm_clk_list_unlock - counterpart to pm_clk_list_lock().
+ * @psd: the same pm_subsys_data instance previously passed to
+ * pm_clk_list_lock().
+ */
+static void pm_clk_list_unlock(struct pm_subsys_data *psd)
+ __releases(&psd->lock)
+{
+ spin_unlock_irq(&psd->lock);
+ mutex_unlock(&psd->clock_mutex);
+}
+
+/**
+ * pm_clk_op_lock - ensure exclusive access for performing clock operations.
+ * @psd: pm_subsys_data instance corresponding to the PM clock entry list
+ * and clk_op_might_sleep count being used.
+ * @flags: stored irq flags.
+ * @fn: string for the caller function's name.
+ *
+ * This is used by pm_clk_suspend() and pm_clk_resume() to guard
+ * against concurrent modifications to the clock entry list and the
+ * clock_op_might_sleep count. If clock_op_might_sleep is != 0 then
+ * only the mutex can be locked and those functions can only be used in
+ * non atomic context. If clock_op_might_sleep == 0 then these functions
+ * may be used in any context and only the spinlock can be locked.
+ * Returns -EINVAL if called in atomic context when clock ops might sleep.
+ */
+static int pm_clk_op_lock(struct pm_subsys_data *psd, unsigned long *flags,
+ const char *fn)
+ /* sparse annotations don't work here as exit state isn't static */
+{
+ bool atomic_context = in_atomic() || irqs_disabled();
+
+try_again:
+ spin_lock_irqsave(&psd->lock, *flags);
+ if (!psd->clock_op_might_sleep) {
+ /* the __release is there to work around sparse limitations */
+ __release(&psd->lock);
+ return 0;
+ }
+
+ /* bail out if in atomic context */
+ if (atomic_context) {
+ pr_err("%s: atomic context with clock_ops_might_sleep = %d",
+ fn, psd->clock_op_might_sleep);
+ spin_unlock_irqrestore(&psd->lock, *flags);
+ might_sleep();
+ return -EPERM;
+ }
+
+ /* we must switch to the mutex */
+ spin_unlock_irqrestore(&psd->lock, *flags);
+ mutex_lock(&psd->clock_mutex);
+
+ /*
+ * There was a possibility for psd->clock_op_might_sleep
+ * to become 0 above. Keep the mutex only if not the case.
+ */
+ if (likely(psd->clock_op_might_sleep))
+ return 0;
+
+ mutex_unlock(&psd->clock_mutex);
+ goto try_again;
+}
+
+/**
+ * pm_clk_op_unlock - counterpart to pm_clk_op_lock().
+ * @psd: the same pm_subsys_data instance previously passed to
+ * pm_clk_op_lock().
+ * @flags: irq flags provided by pm_clk_op_lock().
+ */
+static void pm_clk_op_unlock(struct pm_subsys_data *psd, unsigned long *flags)
+ /* sparse annotations don't work here as entry state isn't static */
+{
+ if (psd->clock_op_might_sleep) {
+ mutex_unlock(&psd->clock_mutex);
+ } else {
+ /* the __acquire is there to work around sparse limitations */
+ __acquire(&psd->lock);
+ spin_unlock_irqrestore(&psd->lock, *flags);
+ }
+}
+
+/**
* pm_clk_enable - Enable a clock, reporting any errors
* @dev: The device for the given clock
* @ce: PM clock entry corresponding to the clock.
@@ -43,14 +148,21 @@ static inline void __pm_clk_enable(struct device *dev, struct pm_clock_entry *ce
{
int ret;
- if (ce->status < PCE_STATUS_ERROR) {
+ switch (ce->status) {
+ case PCE_STATUS_ACQUIRED:
+ ret = clk_prepare_enable(ce->clk);
+ break;
+ case PCE_STATUS_PREPARED:
ret = clk_enable(ce->clk);
- if (!ret)
- ce->status = PCE_STATUS_ENABLED;
- else
- dev_err(dev, "%s: failed to enable clk %p, error %d\n",
- __func__, ce->clk, ret);
+ break;
+ default:
+ return;
}
+ if (!ret)
+ ce->status = PCE_STATUS_ENABLED;
+ else
+ dev_err(dev, "%s: failed to enable clk %p, error %d\n",
+ __func__, ce->clk, ret);
}
/**
@@ -64,17 +176,20 @@ static void pm_clk_acquire(struct device *dev, struct pm_clock_entry *ce)
ce->clk = clk_get(dev, ce->con_id);
if (IS_ERR(ce->clk)) {
ce->status = PCE_STATUS_ERROR;
+ return;
+ } else if (clk_is_enabled_when_prepared(ce->clk)) {
+ /* we defer preparing the clock in that case */
+ ce->status = PCE_STATUS_ACQUIRED;
+ ce->enabled_when_prepared = true;
+ } else if (clk_prepare(ce->clk)) {
+ ce->status = PCE_STATUS_ERROR;
+ dev_err(dev, "clk_prepare() failed\n");
+ return;
} else {
- if (clk_prepare(ce->clk)) {
- ce->status = PCE_STATUS_ERROR;
- dev_err(dev, "clk_prepare() failed\n");
- } else {
- ce->status = PCE_STATUS_ACQUIRED;
- dev_dbg(dev,
- "Clock %pC con_id %s managed by runtime PM.\n",
- ce->clk, ce->con_id);
- }
+ ce->status = PCE_STATUS_PREPARED;
}
+ dev_dbg(dev, "Clock %pC con_id %s managed by runtime PM.\n",
+ ce->clk, ce->con_id);
}
static int __pm_clk_add(struct device *dev, const char *con_id,
@@ -106,9 +221,11 @@ static int __pm_clk_add(struct device *dev, const char *con_id,
pm_clk_acquire(dev, ce);
- spin_lock_irq(&psd->lock);
+ pm_clk_list_lock(psd);
list_add_tail(&ce->node, &psd->clock_list);
- spin_unlock_irq(&psd->lock);
+ if (ce->enabled_when_prepared)
+ psd->clock_op_might_sleep++;
+ pm_clk_list_unlock(psd);
return 0;
}
@@ -239,14 +356,20 @@ static void __pm_clk_remove(struct pm_clock_entry *ce)
if (!ce)
return;
- if (ce->status < PCE_STATUS_ERROR) {
- if (ce->status == PCE_STATUS_ENABLED)
- clk_disable(ce->clk);
-
- if (ce->status >= PCE_STATUS_ACQUIRED) {
- clk_unprepare(ce->clk);
+ switch (ce->status) {
+ case PCE_STATUS_ENABLED:
+ clk_disable(ce->clk);
+ fallthrough;
+ case PCE_STATUS_PREPARED:
+ clk_unprepare(ce->clk);
+ fallthrough;
+ case PCE_STATUS_ACQUIRED:
+ case PCE_STATUS_ERROR:
+ if (!IS_ERR(ce->clk))
clk_put(ce->clk);
- }
+ break;
+ default:
+ break;
}
kfree(ce->con_id);
@@ -269,7 +392,7 @@ void pm_clk_remove(struct device *dev, const char *con_id)
if (!psd)
return;
- spin_lock_irq(&psd->lock);
+ pm_clk_list_lock(psd);
list_for_each_entry(ce, &psd->clock_list, node) {
if (!con_id && !ce->con_id)
@@ -280,12 +403,14 @@ void pm_clk_remove(struct device *dev, const char *con_id)
goto remove;
}
- spin_unlock_irq(&psd->lock);
+ pm_clk_list_unlock(psd);
return;
remove:
list_del(&ce->node);
- spin_unlock_irq(&psd->lock);
+ if (ce->enabled_when_prepared)
+ psd->clock_op_might_sleep--;
+ pm_clk_list_unlock(psd);
__pm_clk_remove(ce);
}
@@ -307,19 +432,21 @@ void pm_clk_remove_clk(struct device *dev, struct clk *clk)
if (!psd || !clk)
return;
- spin_lock_irq(&psd->lock);
+ pm_clk_list_lock(psd);
list_for_each_entry(ce, &psd->clock_list, node) {
if (clk == ce->clk)
goto remove;
}
- spin_unlock_irq(&psd->lock);
+ pm_clk_list_unlock(psd);
return;
remove:
list_del(&ce->node);
- spin_unlock_irq(&psd->lock);
+ if (ce->enabled_when_prepared)
+ psd->clock_op_might_sleep--;
+ pm_clk_list_unlock(psd);
__pm_clk_remove(ce);
}
@@ -330,13 +457,16 @@ EXPORT_SYMBOL_GPL(pm_clk_remove_clk);
* @dev: Device to initialize the list of PM clocks for.
*
* Initialize the lock and clock_list members of the device's pm_subsys_data
- * object.
+ * object, set the count of clocks that might sleep to 0.
*/
void pm_clk_init(struct device *dev)
{
struct pm_subsys_data *psd = dev_to_psd(dev);
- if (psd)
+ if (psd) {
INIT_LIST_HEAD(&psd->clock_list);
+ mutex_init(&psd->clock_mutex);
+ psd->clock_op_might_sleep = 0;
+ }
}
EXPORT_SYMBOL_GPL(pm_clk_init);
@@ -372,12 +502,13 @@ void pm_clk_destroy(struct device *dev)
INIT_LIST_HEAD(&list);
- spin_lock_irq(&psd->lock);
+ pm_clk_list_lock(psd);
list_for_each_entry_safe_reverse(ce, c, &psd->clock_list, node)
list_move(&ce->node, &list);
+ psd->clock_op_might_sleep = 0;
- spin_unlock_irq(&psd->lock);
+ pm_clk_list_unlock(psd);
dev_pm_put_subsys_data(dev);
@@ -397,23 +528,30 @@ int pm_clk_suspend(struct device *dev)
struct pm_subsys_data *psd = dev_to_psd(dev);
struct pm_clock_entry *ce;
unsigned long flags;
+ int ret;
dev_dbg(dev, "%s()\n", __func__);
if (!psd)
return 0;
- spin_lock_irqsave(&psd->lock, flags);
+ ret = pm_clk_op_lock(psd, &flags, __func__);
+ if (ret)
+ return ret;
list_for_each_entry_reverse(ce, &psd->clock_list, node) {
- if (ce->status < PCE_STATUS_ERROR) {
- if (ce->status == PCE_STATUS_ENABLED)
+ if (ce->status == PCE_STATUS_ENABLED) {
+ if (ce->enabled_when_prepared) {
+ clk_disable_unprepare(ce->clk);
+ ce->status = PCE_STATUS_ACQUIRED;
+ } else {
clk_disable(ce->clk);
- ce->status = PCE_STATUS_ACQUIRED;
+ ce->status = PCE_STATUS_PREPARED;
+ }
}
}
- spin_unlock_irqrestore(&psd->lock, flags);
+ pm_clk_op_unlock(psd, &flags);
return 0;
}
@@ -428,18 +566,21 @@ int pm_clk_resume(struct device *dev)
struct pm_subsys_data *psd = dev_to_psd(dev);
struct pm_clock_entry *ce;
unsigned long flags;
+ int ret;
dev_dbg(dev, "%s()\n", __func__);
if (!psd)
return 0;
- spin_lock_irqsave(&psd->lock, flags);
+ ret = pm_clk_op_lock(psd, &flags, __func__);
+ if (ret)
+ return ret;
list_for_each_entry(ce, &psd->clock_list, node)
__pm_clk_enable(dev, ce);
- spin_unlock_irqrestore(&psd->lock, flags);
+ pm_clk_op_unlock(psd, &flags);
return 0;
}
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
index 6ac52a038bb9..78c310d3179d 100644
--- a/drivers/base/power/domain.c
+++ b/drivers/base/power/domain.c
@@ -297,6 +297,18 @@ static int _genpd_reeval_performance_state(struct generic_pm_domain *genpd,
return state;
}
+static int genpd_xlate_performance_state(struct generic_pm_domain *genpd,
+ struct generic_pm_domain *parent,
+ unsigned int pstate)
+{
+ if (!parent->set_performance_state)
+ return pstate;
+
+ return dev_pm_opp_xlate_performance_state(genpd->opp_table,
+ parent->opp_table,
+ pstate);
+}
+
static int _genpd_set_performance_state(struct generic_pm_domain *genpd,
unsigned int state, int depth)
{
@@ -311,13 +323,8 @@ static int _genpd_set_performance_state(struct generic_pm_domain *genpd,
list_for_each_entry(link, &genpd->child_links, child_node) {
parent = link->parent;
- if (!parent->set_performance_state)
- continue;
-
/* Find parent's performance state */
- ret = dev_pm_opp_xlate_performance_state(genpd->opp_table,
- parent->opp_table,
- state);
+ ret = genpd_xlate_performance_state(genpd, parent, state);
if (unlikely(ret < 0))
goto err;
@@ -339,9 +346,11 @@ static int _genpd_set_performance_state(struct generic_pm_domain *genpd,
goto err;
}
- ret = genpd->set_performance_state(genpd, state);
- if (ret)
- goto err;
+ if (genpd->set_performance_state) {
+ ret = genpd->set_performance_state(genpd, state);
+ if (ret)
+ goto err;
+ }
genpd->performance_state = state;
return 0;
@@ -352,9 +361,6 @@ err:
child_node) {
parent = link->parent;
- if (!parent->set_performance_state)
- continue;
-
genpd_lock_nested(parent, depth + 1);
parent_state = link->prev_performance_state;
@@ -399,9 +405,6 @@ int dev_pm_genpd_set_performance_state(struct device *dev, unsigned int state)
if (!genpd)
return -ENODEV;
- if (unlikely(!genpd->set_performance_state))
- return -EINVAL;
-
if (WARN_ON(!dev->power.subsys_data ||
!dev->power.subsys_data->domain_data))
return -EINVAL;
@@ -423,6 +426,35 @@ int dev_pm_genpd_set_performance_state(struct device *dev, unsigned int state)
}
EXPORT_SYMBOL_GPL(dev_pm_genpd_set_performance_state);
+/**
+ * dev_pm_genpd_set_next_wakeup - Notify PM framework of an impending wakeup.
+ *
+ * @dev: Device to handle
+ * @next: impending interrupt/wakeup for the device
+ *
+ *
+ * Allow devices to inform of the next wakeup. It's assumed that the users
+ * guarantee that the genpd wouldn't be detached while this routine is getting
+ * called. Additionally, it's also assumed that @dev isn't runtime suspended
+ * (RPM_SUSPENDED)."
+ * Although devices are expected to update the next_wakeup after the end of
+ * their usecase as well, it is possible the devices themselves may not know
+ * about that, so stale @next will be ignored when powering off the domain.
+ */
+void dev_pm_genpd_set_next_wakeup(struct device *dev, ktime_t next)
+{
+ struct generic_pm_domain_data *gpd_data;
+ struct generic_pm_domain *genpd;
+
+ genpd = dev_to_genpd_safe(dev);
+ if (!genpd)
+ return;
+
+ gpd_data = to_gpd_data(dev->power.subsys_data->domain_data);
+ gpd_data->next_wakeup = next;
+}
+EXPORT_SYMBOL_GPL(dev_pm_genpd_set_next_wakeup);
+
static int _genpd_power_on(struct generic_pm_domain *genpd, bool timed)
{
unsigned int state_idx = genpd->state_idx;
@@ -934,8 +966,7 @@ static int genpd_runtime_resume(struct device *dev)
err_stop:
genpd_stop_dev(genpd, dev);
err_poweroff:
- if (!pm_runtime_is_irq_safe(dev) ||
- (pm_runtime_is_irq_safe(dev) && genpd_is_irq_safe(genpd))) {
+ if (!pm_runtime_is_irq_safe(dev) || genpd_is_irq_safe(genpd)) {
genpd_lock(genpd);
genpd_power_off(genpd, true, 0);
genpd_unlock(genpd);
@@ -1465,6 +1496,7 @@ static struct generic_pm_domain_data *genpd_alloc_dev_data(struct device *dev)
gpd_data->td.constraint_changed = true;
gpd_data->td.effective_constraint_ns = PM_QOS_RESUME_LATENCY_NO_CONSTRAINT_NS;
gpd_data->nb.notifier_call = genpd_dev_pm_qos_notifier;
+ gpd_data->next_wakeup = KTIME_MAX;
spin_lock_irq(&dev->power.lock);
@@ -2465,7 +2497,7 @@ int of_genpd_add_subdomain(struct of_phandle_args *parent_spec,
out:
mutex_unlock(&gpd_list_lock);
- return ret;
+ return ret == -ENOENT ? -EPROBE_DEFER : ret;
}
EXPORT_SYMBOL_GPL(of_genpd_add_subdomain);
@@ -2954,7 +2986,15 @@ static void rtpm_status_str(struct seq_file *s, struct device *dev)
else
WARN_ON(1);
- seq_puts(s, p);
+ seq_printf(s, "%-25s ", p);
+}
+
+static void perf_status_str(struct seq_file *s, struct device *dev)
+{
+ struct generic_pm_domain_data *gpd_data;
+
+ gpd_data = to_gpd_data(dev->power.subsys_data->domain_data);
+ seq_put_decimal_ull(s, "", gpd_data->performance_state);
}
static int genpd_summary_one(struct seq_file *s,
@@ -2982,7 +3022,7 @@ static int genpd_summary_one(struct seq_file *s,
else
snprintf(state, sizeof(state), "%s",
status_lookup[genpd->status]);
- seq_printf(s, "%-30s %-15s ", genpd->name, state);
+ seq_printf(s, "%-30s %-50s %u", genpd->name, state, genpd->performance_state);
/*
* Modifications on the list require holding locks on both
@@ -2990,6 +3030,8 @@ static int genpd_summary_one(struct seq_file *s,
* Also genpd->name is immutable.
*/
list_for_each_entry(link, &genpd->parent_links, parent_node) {
+ if (list_is_first(&link->parent_node, &genpd->parent_links))
+ seq_printf(s, "\n%48s", " ");
seq_printf(s, "%s", link->child->name);
if (!list_is_last(&link->parent_node, &genpd->parent_links))
seq_puts(s, ", ");
@@ -3004,6 +3046,7 @@ static int genpd_summary_one(struct seq_file *s,
seq_printf(s, "\n %-50s ", kobj_path);
rtpm_status_str(s, pm_data->dev);
+ perf_status_str(s, pm_data->dev);
kfree(kobj_path);
}
@@ -3019,9 +3062,9 @@ static int summary_show(struct seq_file *s, void *data)
struct generic_pm_domain *genpd;
int ret = 0;
- seq_puts(s, "domain status children\n");
+ seq_puts(s, "domain status children performance\n");
seq_puts(s, " /device runtime status\n");
- seq_puts(s, "----------------------------------------------------------------------\n");
+ seq_puts(s, "----------------------------------------------------------------------------------------------\n");
ret = mutex_lock_interruptible(&gpd_list_lock);
if (ret)
diff --git a/drivers/base/power/domain_governor.c b/drivers/base/power/domain_governor.c
index 490ed7deb99a..c6c218758f0b 100644
--- a/drivers/base/power/domain_governor.c
+++ b/drivers/base/power/domain_governor.c
@@ -117,6 +117,55 @@ static bool default_suspend_ok(struct device *dev)
return td->cached_suspend_ok;
}
+static void update_domain_next_wakeup(struct generic_pm_domain *genpd, ktime_t now)
+{
+ ktime_t domain_wakeup = KTIME_MAX;
+ ktime_t next_wakeup;
+ struct pm_domain_data *pdd;
+ struct gpd_link *link;
+
+ if (!(genpd->flags & GENPD_FLAG_MIN_RESIDENCY))
+ return;
+
+ /*
+ * Devices that have a predictable wakeup pattern, may specify
+ * their next wakeup. Let's find the next wakeup from all the
+ * devices attached to this domain and from all the sub-domains.
+ * It is possible that component's a next wakeup may have become
+ * stale when we read that here. We will ignore to ensure the domain
+ * is able to enter its optimal idle state.
+ */
+ list_for_each_entry(pdd, &genpd->dev_list, list_node) {
+ next_wakeup = to_gpd_data(pdd)->next_wakeup;
+ if (next_wakeup != KTIME_MAX && !ktime_before(next_wakeup, now))
+ if (ktime_before(next_wakeup, domain_wakeup))
+ domain_wakeup = next_wakeup;
+ }
+
+ list_for_each_entry(link, &genpd->parent_links, parent_node) {
+ next_wakeup = link->child->next_wakeup;
+ if (next_wakeup != KTIME_MAX && !ktime_before(next_wakeup, now))
+ if (ktime_before(next_wakeup, domain_wakeup))
+ domain_wakeup = next_wakeup;
+ }
+
+ genpd->next_wakeup = domain_wakeup;
+}
+
+static bool next_wakeup_allows_state(struct generic_pm_domain *genpd,
+ unsigned int state, ktime_t now)
+{
+ ktime_t domain_wakeup = genpd->next_wakeup;
+ s64 idle_time_ns, min_sleep_ns;
+
+ min_sleep_ns = genpd->states[state].power_off_latency_ns +
+ genpd->states[state].residency_ns;
+
+ idle_time_ns = ktime_to_ns(ktime_sub(domain_wakeup, now));
+
+ return idle_time_ns >= min_sleep_ns;
+}
+
static bool __default_power_down_ok(struct dev_pm_domain *pd,
unsigned int state)
{
@@ -201,16 +250,41 @@ static bool __default_power_down_ok(struct dev_pm_domain *pd,
}
/**
- * default_power_down_ok - Default generic PM domain power off governor routine.
+ * _default_power_down_ok - Default generic PM domain power off governor routine.
* @pd: PM domain to check.
*
* This routine must be executed under the PM domain's lock.
*/
-static bool default_power_down_ok(struct dev_pm_domain *pd)
+static bool _default_power_down_ok(struct dev_pm_domain *pd, ktime_t now)
{
struct generic_pm_domain *genpd = pd_to_genpd(pd);
+ int state_idx = genpd->state_count - 1;
struct gpd_link *link;
+ /*
+ * Find the next wakeup from devices that can determine their own wakeup
+ * to find when the domain would wakeup and do it for every device down
+ * the hierarchy. It is not worth while to sleep if the state's residency
+ * cannot be met.
+ */
+ update_domain_next_wakeup(genpd, now);
+ if ((genpd->flags & GENPD_FLAG_MIN_RESIDENCY) && (genpd->next_wakeup != KTIME_MAX)) {
+ /* Let's find out the deepest domain idle state, the devices prefer */
+ while (state_idx >= 0) {
+ if (next_wakeup_allows_state(genpd, state_idx, now)) {
+ genpd->max_off_time_changed = true;
+ break;
+ }
+ state_idx--;
+ }
+
+ if (state_idx < 0) {
+ state_idx = 0;
+ genpd->cached_power_down_ok = false;
+ goto done;
+ }
+ }
+
if (!genpd->max_off_time_changed) {
genpd->state_idx = genpd->cached_power_down_state_idx;
return genpd->cached_power_down_ok;
@@ -228,21 +302,30 @@ static bool default_power_down_ok(struct dev_pm_domain *pd)
genpd->max_off_time_ns = -1;
genpd->max_off_time_changed = false;
genpd->cached_power_down_ok = true;
- genpd->state_idx = genpd->state_count - 1;
- /* Find a state to power down to, starting from the deepest. */
- while (!__default_power_down_ok(pd, genpd->state_idx)) {
- if (genpd->state_idx == 0) {
+ /*
+ * Find a state to power down to, starting from the state
+ * determined by the next wakeup.
+ */
+ while (!__default_power_down_ok(pd, state_idx)) {
+ if (state_idx == 0) {
genpd->cached_power_down_ok = false;
break;
}
- genpd->state_idx--;
+ state_idx--;
}
+done:
+ genpd->state_idx = state_idx;
genpd->cached_power_down_state_idx = genpd->state_idx;
return genpd->cached_power_down_ok;
}
+static bool default_power_down_ok(struct dev_pm_domain *pd)
+{
+ return _default_power_down_ok(pd, ktime_get());
+}
+
static bool always_on_power_down_ok(struct dev_pm_domain *domain)
{
return false;
@@ -254,11 +337,12 @@ static bool cpu_power_down_ok(struct dev_pm_domain *pd)
struct generic_pm_domain *genpd = pd_to_genpd(pd);
struct cpuidle_device *dev;
ktime_t domain_wakeup, next_hrtimer;
+ ktime_t now = ktime_get();
s64 idle_duration_ns;
int cpu, i;
/* Validate dev PM QoS constraints. */
- if (!default_power_down_ok(pd))
+ if (!_default_power_down_ok(pd, now))
return false;
if (!(genpd->flags & GENPD_FLAG_CPU_DOMAIN))
@@ -280,7 +364,7 @@ static bool cpu_power_down_ok(struct dev_pm_domain *pd)
}
/* The minimum idle duration is from now - until the next wakeup. */
- idle_duration_ns = ktime_to_ns(ktime_sub(domain_wakeup, ktime_get()));
+ idle_duration_ns = ktime_to_ns(ktime_sub(domain_wakeup, now));
if (idle_duration_ns <= 0)
return false;
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index 46793276598d..f893c3c5af07 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -16,6 +16,7 @@
*/
#define pr_fmt(fmt) "PM: " fmt
+#define dev_fmt pr_fmt
#include <linux/device.h>
#include <linux/export.h>
@@ -449,8 +450,8 @@ static void pm_dev_dbg(struct device *dev, pm_message_t state, const char *info)
static void pm_dev_err(struct device *dev, pm_message_t state, const char *info,
int error)
{
- pr_err("Device %s failed to %s%s: error %d\n",
- dev_name(dev), pm_verb(state.event), info, error);
+ dev_err(dev, "failed to %s%s: error %d\n", pm_verb(state.event), info,
+ error);
}
static void dpm_show_time(ktime_t starttime, pm_message_t state, int error,
@@ -1897,8 +1898,8 @@ int dpm_prepare(pm_message_t state)
error = 0;
continue;
}
- pr_info("Device %s not prepared for power transition: code %d\n",
- dev_name(dev), error);
+ dev_info(dev, "not prepared for power transition: code %d\n",
+ error);
put_device(dev);
break;
}
diff --git a/drivers/base/property.c b/drivers/base/property.c
index 35b95c6ac0c6..1421e9548857 100644
--- a/drivers/base/property.c
+++ b/drivers/base/property.c
@@ -837,9 +837,15 @@ EXPORT_SYMBOL_GPL(fwnode_handle_put);
/**
* fwnode_device_is_available - check if a device is available for use
* @fwnode: Pointer to the fwnode of the device.
+ *
+ * For fwnode node types that don't implement the .device_is_available()
+ * operation, this function returns true.
*/
bool fwnode_device_is_available(const struct fwnode_handle *fwnode)
{
+ if (!fwnode_has_op(fwnode, device_is_available))
+ return true;
+
return fwnode_call_bool_op(fwnode, device_is_available);
}
EXPORT_SYMBOL_GPL(fwnode_device_is_available);
@@ -1209,7 +1215,14 @@ fwnode_graph_get_endpoint_by_id(const struct fwnode_handle *fwnode,
best_ep_id = fwnode_ep.id;
}
- return best_ep;
+ if (best_ep)
+ return best_ep;
+
+ if (fwnode && !IS_ERR_OR_NULL(fwnode->secondary))
+ return fwnode_graph_get_endpoint_by_id(fwnode->secondary, port,
+ endpoint, flags);
+
+ return NULL;
}
EXPORT_SYMBOL_GPL(fwnode_graph_get_endpoint_by_id);
diff --git a/drivers/base/regmap/regcache.c b/drivers/base/regmap/regcache.c
index 7f4b3b62492c..f2469d3435ca 100644
--- a/drivers/base/regmap/regcache.c
+++ b/drivers/base/regmap/regcache.c
@@ -68,7 +68,7 @@ static int regcache_hw_init(struct regmap *map)
map->cache_bypass = cache_bypass;
if (ret == 0) {
map->reg_defaults_raw = tmp_buf;
- map->cache_free = 1;
+ map->cache_free = true;
} else {
kfree(tmp_buf);
}
diff --git a/drivers/base/swnode.c b/drivers/base/swnode.c
index 4a4b2008fbc2..37179a8b1ceb 100644
--- a/drivers/base/swnode.c
+++ b/drivers/base/swnode.c
@@ -24,6 +24,7 @@ struct swnode {
struct swnode *parent;
unsigned int allocated:1;
+ unsigned int managed:1;
};
static DEFINE_IDA(swnode_root_ids);
@@ -48,6 +49,19 @@ EXPORT_SYMBOL_GPL(is_software_node);
struct swnode, fwnode) : NULL; \
})
+static inline struct swnode *dev_to_swnode(struct device *dev)
+{
+ struct fwnode_handle *fwnode = dev_fwnode(dev);
+
+ if (!fwnode)
+ return NULL;
+
+ if (!is_software_node(fwnode))
+ fwnode = fwnode->secondary;
+
+ return to_swnode(fwnode);
+}
+
static struct swnode *
software_node_to_swnode(const struct software_node *node)
{
@@ -443,14 +457,18 @@ software_node_get_next_child(const struct fwnode_handle *fwnode,
struct swnode *c = to_swnode(child);
if (!p || list_empty(&p->children) ||
- (c && list_is_last(&c->entry, &p->children)))
+ (c && list_is_last(&c->entry, &p->children))) {
+ fwnode_handle_put(child);
return NULL;
+ }
if (c)
c = list_next_entry(c, entry);
else
c = list_first_entry(&p->children, struct swnode, entry);
- return &c->fwnode;
+
+ fwnode_handle_put(child);
+ return fwnode_handle_get(&c->fwnode);
}
static struct fwnode_handle *
@@ -536,6 +554,115 @@ software_node_get_reference_args(const struct fwnode_handle *fwnode,
return 0;
}
+static struct fwnode_handle *
+swnode_graph_find_next_port(const struct fwnode_handle *parent,
+ struct fwnode_handle *port)
+{
+ struct fwnode_handle *old = port;
+
+ while ((port = software_node_get_next_child(parent, old))) {
+ /*
+ * fwnode ports have naming style "port@", so we search for any
+ * children that follow that convention.
+ */
+ if (!strncmp(to_swnode(port)->node->name, "port@",
+ strlen("port@")))
+ return port;
+ old = port;
+ }
+
+ return NULL;
+}
+
+static struct fwnode_handle *
+software_node_graph_get_next_endpoint(const struct fwnode_handle *fwnode,
+ struct fwnode_handle *endpoint)
+{
+ struct swnode *swnode = to_swnode(fwnode);
+ struct fwnode_handle *parent;
+ struct fwnode_handle *port;
+
+ if (!swnode)
+ return NULL;
+
+ if (endpoint) {
+ port = software_node_get_parent(endpoint);
+ parent = software_node_get_parent(port);
+ } else {
+ parent = software_node_get_named_child_node(fwnode, "ports");
+ if (!parent)
+ parent = software_node_get(&swnode->fwnode);
+
+ port = swnode_graph_find_next_port(parent, NULL);
+ }
+
+ for (; port; port = swnode_graph_find_next_port(parent, port)) {
+ endpoint = software_node_get_next_child(port, endpoint);
+ if (endpoint) {
+ fwnode_handle_put(port);
+ break;
+ }
+ }
+
+ fwnode_handle_put(parent);
+
+ return endpoint;
+}
+
+static struct fwnode_handle *
+software_node_graph_get_remote_endpoint(const struct fwnode_handle *fwnode)
+{
+ struct swnode *swnode = to_swnode(fwnode);
+ const struct software_node_ref_args *ref;
+ const struct property_entry *prop;
+
+ if (!swnode)
+ return NULL;
+
+ prop = property_entry_get(swnode->node->properties, "remote-endpoint");
+ if (!prop || prop->type != DEV_PROP_REF || prop->is_inline)
+ return NULL;
+
+ ref = prop->pointer;
+
+ return software_node_get(software_node_fwnode(ref[0].node));
+}
+
+static struct fwnode_handle *
+software_node_graph_get_port_parent(struct fwnode_handle *fwnode)
+{
+ struct swnode *swnode = to_swnode(fwnode);
+
+ swnode = swnode->parent;
+ if (swnode && !strcmp(swnode->node->name, "ports"))
+ swnode = swnode->parent;
+
+ return swnode ? software_node_get(&swnode->fwnode) : NULL;
+}
+
+static int
+software_node_graph_parse_endpoint(const struct fwnode_handle *fwnode,
+ struct fwnode_endpoint *endpoint)
+{
+ struct swnode *swnode = to_swnode(fwnode);
+ const char *parent_name = swnode->parent->node->name;
+ int ret;
+
+ if (strlen("port@") >= strlen(parent_name) ||
+ strncmp(parent_name, "port@", strlen("port@")))
+ return -EINVAL;
+
+ /* Ports have naming style "port@n", we need to select the n */
+ ret = kstrtou32(parent_name + strlen("port@"), 10, &endpoint->port);
+ if (ret)
+ return ret;
+
+ endpoint->id = swnode->id;
+ endpoint->local_fwnode = fwnode;
+
+ return 0;
+}
+
static const struct fwnode_operations software_node_ops = {
.get = software_node_get,
.put = software_node_put,
@@ -547,7 +674,11 @@ static const struct fwnode_operations software_node_ops = {
.get_parent = software_node_get_parent,
.get_next_child_node = software_node_get_next_child,
.get_named_child_node = software_node_get_named_child_node,
- .get_reference_args = software_node_get_reference_args
+ .get_reference_args = software_node_get_reference_args,
+ .graph_get_next_endpoint = software_node_graph_get_next_endpoint,
+ .graph_get_remote_endpoint = software_node_graph_get_remote_endpoint,
+ .graph_get_port_parent = software_node_graph_get_port_parent,
+ .graph_parse_endpoint = software_node_graph_parse_endpoint,
};
/* -------------------------------------------------------------------------- */
@@ -688,7 +819,11 @@ out_err:
* software_node_register_nodes - Register an array of software nodes
* @nodes: Zero terminated array of software nodes to be registered
*
- * Register multiple software nodes at once.
+ * Register multiple software nodes at once. If any node in the array
+ * has its .parent pointer set (which can only be to another software_node),
+ * then its parent **must** have been registered before it is; either outside
+ * of this function or by ordering the array such that parent comes before
+ * child.
*/
int software_node_register_nodes(const struct software_node *nodes)
{
@@ -696,14 +831,23 @@ int software_node_register_nodes(const struct software_node *nodes)
int i;
for (i = 0; nodes[i].name; i++) {
- ret = software_node_register(&nodes[i]);
- if (ret) {
- software_node_unregister_nodes(nodes);
- return ret;
+ const struct software_node *parent = nodes[i].parent;
+
+ if (parent && !software_node_to_swnode(parent)) {
+ ret = -EINVAL;
+ goto err_unregister_nodes;
}
+
+ ret = software_node_register(&nodes[i]);
+ if (ret)
+ goto err_unregister_nodes;
}
return 0;
+
+err_unregister_nodes:
+ software_node_unregister_nodes(nodes);
+ return ret;
}
EXPORT_SYMBOL_GPL(software_node_register_nodes);
@@ -711,18 +855,23 @@ EXPORT_SYMBOL_GPL(software_node_register_nodes);
* software_node_unregister_nodes - Unregister an array of software nodes
* @nodes: Zero terminated array of software nodes to be unregistered
*
- * Unregister multiple software nodes at once.
+ * Unregister multiple software nodes at once. If parent pointers are set up
+ * in any of the software nodes then the array **must** be ordered such that
+ * parents come before their children.
*
- * NOTE: Be careful using this call if the nodes had parent pointers set up in
- * them before registering. If so, it is wiser to remove the nodes
- * individually, in the correct order (child before parent) instead of relying
- * on the sequential order of the list of nodes in the array.
+ * NOTE: If you are uncertain whether the array is ordered such that
+ * parents will be unregistered before their children, it is wiser to
+ * remove the nodes individually, in the correct order (child before
+ * parent).
*/
void software_node_unregister_nodes(const struct software_node *nodes)
{
- int i;
+ unsigned int i = 0;
+
+ while (nodes[i].name)
+ i++;
- for (i = 0; nodes[i].name; i++)
+ while (i--)
software_node_unregister(&nodes[i]);
}
EXPORT_SYMBOL_GPL(software_node_unregister_nodes);
@@ -757,16 +906,23 @@ EXPORT_SYMBOL_GPL(software_node_register_node_group);
* software_node_unregister_node_group - Unregister a group of software nodes
* @node_group: NULL terminated array of software node pointers to be unregistered
*
- * Unregister multiple software nodes at once.
+ * Unregister multiple software nodes at once. The array will be unwound in
+ * reverse order (i.e. last entry first) and thus if any members of the array are
+ * children of another member then the children must appear later in the list such
+ * that they are unregistered first.
*/
-void software_node_unregister_node_group(const struct software_node **node_group)
+void software_node_unregister_node_group(
+ const struct software_node **node_group)
{
- unsigned int i;
+ unsigned int i = 0;
if (!node_group)
return;
- for (i = 0; node_group[i]; i++)
+ while (node_group[i])
+ i++;
+
+ while (i--)
software_node_unregister(node_group[i]);
}
EXPORT_SYMBOL_GPL(software_node_unregister_node_group);
@@ -843,22 +999,99 @@ void fwnode_remove_software_node(struct fwnode_handle *fwnode)
}
EXPORT_SYMBOL_GPL(fwnode_remove_software_node);
+/**
+ * device_add_software_node - Assign software node to a device
+ * @dev: The device the software node is meant for.
+ * @swnode: The software node.
+ *
+ * This function will register @swnode and make it the secondary firmware node
+ * pointer of @dev. If @dev has no primary node, then @swnode will become the primary
+ * node.
+ */
+int device_add_software_node(struct device *dev, const struct software_node *swnode)
+{
+ int ret;
+
+ /* Only one software node per device. */
+ if (dev_to_swnode(dev))
+ return -EBUSY;
+
+ ret = software_node_register(swnode);
+ if (ret)
+ return ret;
+
+ set_secondary_fwnode(dev, software_node_fwnode(swnode));
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(device_add_software_node);
+
+/**
+ * device_remove_software_node - Remove device's software node
+ * @dev: The device with the software node.
+ *
+ * This function will unregister the software node of @dev.
+ */
+void device_remove_software_node(struct device *dev)
+{
+ struct swnode *swnode;
+
+ swnode = dev_to_swnode(dev);
+ if (!swnode)
+ return;
+
+ software_node_notify(dev, KOBJ_REMOVE);
+ set_secondary_fwnode(dev, NULL);
+ kobject_put(&swnode->kobj);
+}
+EXPORT_SYMBOL_GPL(device_remove_software_node);
+
+/**
+ * device_create_managed_software_node - Create a software node for a device
+ * @dev: The device the software node is assigned to.
+ * @properties: Device properties for the software node.
+ * @parent: Parent of the software node.
+ *
+ * Creates a software node as a managed resource for @dev, which means the
+ * lifetime of the newly created software node is tied to the lifetime of @dev.
+ * Software nodes created with this function should not be reused or shared
+ * because of that. The function takes a deep copy of @properties for the
+ * software node.
+ *
+ * Since the new software node is assigned directly to @dev, and since it should
+ * not be shared, it is not returned to the caller. The function returns 0 on
+ * success, and errno in case of an error.
+ */
+int device_create_managed_software_node(struct device *dev,
+ const struct property_entry *properties,
+ const struct software_node *parent)
+{
+ struct fwnode_handle *p = software_node_fwnode(parent);
+ struct fwnode_handle *fwnode;
+
+ if (parent && !p)
+ return -EINVAL;
+
+ fwnode = fwnode_create_software_node(properties, p);
+ if (IS_ERR(fwnode))
+ return PTR_ERR(fwnode);
+
+ to_swnode(fwnode)->managed = true;
+ set_secondary_fwnode(dev, fwnode);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(device_create_managed_software_node);
+
int software_node_notify(struct device *dev, unsigned long action)
{
- struct fwnode_handle *fwnode = dev_fwnode(dev);
struct swnode *swnode;
int ret;
- if (!fwnode)
- return 0;
-
- if (!is_software_node(fwnode))
- fwnode = fwnode->secondary;
- if (!is_software_node(fwnode))
+ swnode = dev_to_swnode(dev);
+ if (!swnode)
return 0;
- swnode = to_swnode(fwnode);
-
switch (action) {
case KOBJ_ADD:
ret = sysfs_create_link(&dev->kobj, &swnode->kobj,
@@ -878,6 +1111,11 @@ int software_node_notify(struct device *dev, unsigned long action)
sysfs_remove_link(&swnode->kobj, dev_name(dev));
sysfs_remove_link(&dev->kobj, "software_node");
kobject_put(&swnode->kobj);
+
+ if (swnode->managed) {
+ set_secondary_fwnode(dev, NULL);
+ kobject_put(&swnode->kobj);
+ }
break;
default:
break;