From 2ee5f8f05949735fa2f4c463a5e13fcb3660c719 Mon Sep 17 00:00:00 2001 From: Daniel Lezcano Date: Tue, 8 Dec 2020 17:41:42 +0100 Subject: units: Add Watt units As there are the temperature units, let's add the Watt macros definition. Signed-off-by: Daniel Lezcano Reviewed-by: Lukasz Luba Signed-off-by: Rafael J. Wysocki --- include/linux/units.h | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'include') diff --git a/include/linux/units.h b/include/linux/units.h index aaf716364ec3..92c234e71cab 100644 --- a/include/linux/units.h +++ b/include/linux/units.h @@ -4,6 +4,10 @@ #include +#define MILLIWATT_PER_WATT 1000L +#define MICROWATT_PER_MILLIWATT 1000L +#define MICROWATT_PER_WATT 1000000L + #define ABSOLUTE_ZERO_MILLICELSIUS -273150 static inline long milli_kelvin_to_millicelsius(long t) -- cgit v1.2.3 From a20d0ef97abf486a917aff066c457bdb930425af Mon Sep 17 00:00:00 2001 From: Daniel Lezcano Date: Tue, 8 Dec 2020 17:41:44 +0100 Subject: powercap/drivers/dtpm: Add API for dynamic thermal power management On the embedded world, the complexity of the SoC leads to an increasing number of hotspots which need to be monitored and mitigated as a whole in order to prevent the temperature to go above the normative and legally stated 'skin temperature'. Another aspect is to sustain the performance for a given power budget, for example virtual reality where the user can feel dizziness if the GPU performance is capped while a big CPU is processing something else. Or reduce the battery charging because the dissipated power is too high compared with the power consumed by other devices. The userspace is the most adequate place to dynamically act on the different devices by limiting their power given an application profile: it has the knowledge of the platform. These userspace daemons are in charge of the Dynamic Thermal Power Management (DTPM). Nowadays, the dtpm daemons are abusing the thermal framework as they act on the cooling device state to force a specific and arbitrary state without taking care of the governor decisions. Given the closed loop of some governors that can confuse the logic or directly enter in a decision conflict. As the number of cooling device support is limited today to the CPU and the GPU, the dtpm daemons have little control on the power dissipation of the system. The out of tree solutions are hacking around here and there in the drivers, in the frameworks to have control on the devices. The common solution is to declare them as cooling devices. There is no unification of the power limitation unit, opaque states are used. This patch provides a way to create a hierarchy of constraints using the powercap framework. The devices which are registered as power limit-able devices are represented in this hierarchy as a tree. They are linked together with intermediate nodes which are just there to propagate the constraint to the children. The leaves of the tree are the real devices, the intermediate nodes are virtual, aggregating the children constraints and power characteristics. Each node have a weight on a 2^10 basis, in order to reflect the percentage of power distribution of the children's node. This percentage is used to dispatch the power limit to the children. The weight is computed against the max power of the siblings. This simple approach allows to do a fair distribution of the power limit. Signed-off-by: Daniel Lezcano Reviewed-by: Lukasz Luba Tested-by: Lukasz Luba Signed-off-by: Rafael J. Wysocki --- drivers/powercap/Kconfig | 6 + drivers/powercap/Makefile | 1 + drivers/powercap/dtpm.c | 473 ++++++++++++++++++++++++++++++++++++++ include/asm-generic/vmlinux.lds.h | 11 + include/linux/dtpm.h | 75 ++++++ 5 files changed, 566 insertions(+) create mode 100644 drivers/powercap/dtpm.c create mode 100644 include/linux/dtpm.h (limited to 'include') diff --git a/drivers/powercap/Kconfig b/drivers/powercap/Kconfig index bc228725346b..cc1953bd8bed 100644 --- a/drivers/powercap/Kconfig +++ b/drivers/powercap/Kconfig @@ -43,4 +43,10 @@ config IDLE_INJECT CPUs for power capping. Idle period can be injected synchronously on a set of specified CPUs or alternatively on a per CPU basis. + +config DTPM + bool "Power capping for Dynamic Thermal Power Management" + help + This enables support for the power capping for the dynamic + thermal power management userspace engine. endif diff --git a/drivers/powercap/Makefile b/drivers/powercap/Makefile index 7255c94ec61c..6482ac52054d 100644 --- a/drivers/powercap/Makefile +++ b/drivers/powercap/Makefile @@ -1,4 +1,5 @@ # SPDX-License-Identifier: GPL-2.0-only +obj-$(CONFIG_DTPM) += dtpm.o obj-$(CONFIG_POWERCAP) += powercap_sys.o obj-$(CONFIG_INTEL_RAPL_CORE) += intel_rapl_common.o obj-$(CONFIG_INTEL_RAPL) += intel_rapl_msr.o diff --git a/drivers/powercap/dtpm.c b/drivers/powercap/dtpm.c new file mode 100644 index 000000000000..5b6857e9b064 --- /dev/null +++ b/drivers/powercap/dtpm.c @@ -0,0 +1,473 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright 2020 Linaro Limited + * + * Author: Daniel Lezcano + * + * The powercap based Dynamic Thermal Power Management framework + * provides to the userspace a consistent API to set the power limit + * on some devices. + * + * DTPM defines the functions to create a tree of constraints. Each + * parent node is a virtual description of the aggregation of the + * children. It propagates the constraints set at its level to its + * children and collect the children power information. The leaves of + * the tree are the real devices which have the ability to get their + * current power consumption and set their power limit. + */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include + +#define DTPM_POWER_LIMIT_FLAG BIT(0) + +static const char *constraint_name[] = { + "Instantaneous", +}; + +static DEFINE_MUTEX(dtpm_lock); +static struct powercap_control_type *pct; +static struct dtpm *root; + +static int get_time_window_us(struct powercap_zone *pcz, int cid, u64 *window) +{ + return -ENOSYS; +} + +static int set_time_window_us(struct powercap_zone *pcz, int cid, u64 window) +{ + return -ENOSYS; +} + +static int get_max_power_range_uw(struct powercap_zone *pcz, u64 *max_power_uw) +{ + struct dtpm *dtpm = to_dtpm(pcz); + + mutex_lock(&dtpm_lock); + *max_power_uw = dtpm->power_max - dtpm->power_min; + mutex_unlock(&dtpm_lock); + + return 0; +} + +static int __get_power_uw(struct dtpm *dtpm, u64 *power_uw) +{ + struct dtpm *child; + u64 power; + int ret = 0; + + if (dtpm->ops) { + *power_uw = dtpm->ops->get_power_uw(dtpm); + return 0; + } + + *power_uw = 0; + + list_for_each_entry(child, &dtpm->children, sibling) { + ret = __get_power_uw(child, &power); + if (ret) + break; + *power_uw += power; + } + + return ret; +} + +static int get_power_uw(struct powercap_zone *pcz, u64 *power_uw) +{ + struct dtpm *dtpm = to_dtpm(pcz); + int ret; + + mutex_lock(&dtpm_lock); + ret = __get_power_uw(dtpm, power_uw); + mutex_unlock(&dtpm_lock); + + return ret; +} + +static void __dtpm_rebalance_weight(struct dtpm *dtpm) +{ + struct dtpm *child; + + list_for_each_entry(child, &dtpm->children, sibling) { + + pr_debug("Setting weight '%d' for '%s'\n", + child->weight, child->zone.name); + + child->weight = DIV_ROUND_CLOSEST(child->power_max * 1024, + dtpm->power_max); + + __dtpm_rebalance_weight(child); + } +} + +static void __dtpm_sub_power(struct dtpm *dtpm) +{ + struct dtpm *parent = dtpm->parent; + + while (parent) { + parent->power_min -= dtpm->power_min; + parent->power_max -= dtpm->power_max; + parent->power_limit -= dtpm->power_limit; + parent = parent->parent; + } + + __dtpm_rebalance_weight(root); +} + +static void __dtpm_add_power(struct dtpm *dtpm) +{ + struct dtpm *parent = dtpm->parent; + + while (parent) { + parent->power_min += dtpm->power_min; + parent->power_max += dtpm->power_max; + parent->power_limit += dtpm->power_limit; + parent = parent->parent; + } + + __dtpm_rebalance_weight(root); +} + +/** + * dtpm_update_power - Update the power on the dtpm + * @dtpm: a pointer to a dtpm structure to update + * @power_min: a u64 representing the new power_min value + * @power_max: a u64 representing the new power_max value + * + * Function to update the power values of the dtpm node specified in + * parameter. These new values will be propagated to the tree. + * + * Return: zero on success, -EINVAL if the values are inconsistent + */ +int dtpm_update_power(struct dtpm *dtpm, u64 power_min, u64 power_max) +{ + mutex_lock(&dtpm_lock); + + if (power_min == dtpm->power_min && power_max == dtpm->power_max) + return 0; + + if (power_max < power_min) + return -EINVAL; + + __dtpm_sub_power(dtpm); + + dtpm->power_min = power_min; + dtpm->power_max = power_max; + if (!test_bit(DTPM_POWER_LIMIT_FLAG, &dtpm->flags)) + dtpm->power_limit = power_max; + + __dtpm_add_power(dtpm); + + mutex_unlock(&dtpm_lock); + + return 0; +} + +/** + * dtpm_release_zone - Cleanup when the node is released + * @pcz: a pointer to a powercap_zone structure + * + * Do some housecleaning and update the weight on the tree. The + * release will be denied if the node has children. This function must + * be called by the specific release callback of the different + * backends. + * + * Return: 0 on success, -EBUSY if there are children + */ +int dtpm_release_zone(struct powercap_zone *pcz) +{ + struct dtpm *dtpm = to_dtpm(pcz); + struct dtpm *parent = dtpm->parent; + + mutex_lock(&dtpm_lock); + + if (!list_empty(&dtpm->children)) + return -EBUSY; + + if (parent) + list_del(&dtpm->sibling); + + __dtpm_sub_power(dtpm); + + mutex_unlock(&dtpm_lock); + + if (dtpm->ops) + dtpm->ops->release(dtpm); + + kfree(dtpm); + + return 0; +} + +static int __get_power_limit_uw(struct dtpm *dtpm, int cid, u64 *power_limit) +{ + *power_limit = dtpm->power_limit; + return 0; +} + +static int get_power_limit_uw(struct powercap_zone *pcz, + int cid, u64 *power_limit) +{ + struct dtpm *dtpm = to_dtpm(pcz); + int ret; + + mutex_lock(&dtpm_lock); + ret = __get_power_limit_uw(dtpm, cid, power_limit); + mutex_unlock(&dtpm_lock); + + return ret; +} + +/* + * Set the power limit on the nodes, the power limit is distributed + * given the weight of the children. + * + * The dtpm node lock must be held when calling this function. + */ +static int __set_power_limit_uw(struct dtpm *dtpm, int cid, u64 power_limit) +{ + struct dtpm *child; + int ret = 0; + u64 power; + + /* + * A max power limitation means we remove the power limit, + * otherwise we set a constraint and flag the dtpm node. + */ + if (power_limit == dtpm->power_max) { + clear_bit(DTPM_POWER_LIMIT_FLAG, &dtpm->flags); + } else { + set_bit(DTPM_POWER_LIMIT_FLAG, &dtpm->flags); + } + + pr_debug("Setting power limit for '%s': %llu uW\n", + dtpm->zone.name, power_limit); + + /* + * Only leaves of the dtpm tree has ops to get/set the power + */ + if (dtpm->ops) { + dtpm->power_limit = dtpm->ops->set_power_uw(dtpm, power_limit); + } else { + dtpm->power_limit = 0; + + list_for_each_entry(child, &dtpm->children, sibling) { + + /* + * Integer division rounding will inevitably + * lead to a different min or max value when + * set several times. In order to restore the + * initial value, we force the child's min or + * max power every time if the constraint is + * at the boundaries. + */ + if (power_limit == dtpm->power_max) { + power = child->power_max; + } else if (power_limit == dtpm->power_min) { + power = child->power_min; + } else { + power = DIV_ROUND_CLOSEST( + power_limit * child->weight, 1024); + } + + pr_debug("Setting power limit for '%s': %llu uW\n", + child->zone.name, power); + + ret = __set_power_limit_uw(child, cid, power); + if (!ret) + ret = __get_power_limit_uw(child, cid, &power); + + if (ret) + break; + + dtpm->power_limit += power; + } + } + + return ret; +} + +static int set_power_limit_uw(struct powercap_zone *pcz, + int cid, u64 power_limit) +{ + struct dtpm *dtpm = to_dtpm(pcz); + int ret; + + mutex_lock(&dtpm_lock); + + /* + * Don't allow values outside of the power range previously + * set when initializing the power numbers. + */ + power_limit = clamp_val(power_limit, dtpm->power_min, dtpm->power_max); + + ret = __set_power_limit_uw(dtpm, cid, power_limit); + + pr_debug("%s: power limit: %llu uW, power max: %llu uW\n", + dtpm->zone.name, dtpm->power_limit, dtpm->power_max); + + mutex_unlock(&dtpm_lock); + + return ret; +} + +static const char *get_constraint_name(struct powercap_zone *pcz, int cid) +{ + return constraint_name[cid]; +} + +static int get_max_power_uw(struct powercap_zone *pcz, int id, u64 *max_power) +{ + struct dtpm *dtpm = to_dtpm(pcz); + + mutex_lock(&dtpm_lock); + *max_power = dtpm->power_max; + mutex_unlock(&dtpm_lock); + + return 0; +} + +static struct powercap_zone_constraint_ops constraint_ops = { + .set_power_limit_uw = set_power_limit_uw, + .get_power_limit_uw = get_power_limit_uw, + .set_time_window_us = set_time_window_us, + .get_time_window_us = get_time_window_us, + .get_max_power_uw = get_max_power_uw, + .get_name = get_constraint_name, +}; + +static struct powercap_zone_ops zone_ops = { + .get_max_power_range_uw = get_max_power_range_uw, + .get_power_uw = get_power_uw, + .release = dtpm_release_zone, +}; + +/** + * dtpm_alloc - Allocate and initialize a dtpm struct + * @name: a string specifying the name of the node + * + * Return: a struct dtpm pointer, NULL in case of error + */ +struct dtpm *dtpm_alloc(struct dtpm_ops *ops) +{ + struct dtpm *dtpm; + + dtpm = kzalloc(sizeof(*dtpm), GFP_KERNEL); + if (dtpm) { + INIT_LIST_HEAD(&dtpm->children); + INIT_LIST_HEAD(&dtpm->sibling); + dtpm->weight = 1024; + dtpm->ops = ops; + } + + return dtpm; +} + +/** + * dtpm_unregister - Unregister a dtpm node from the hierarchy tree + * @dtpm: a pointer to a dtpm structure corresponding to the node to be removed + * + * Call the underlying powercap unregister function. That will call + * the release callback of the powercap zone. + */ +void dtpm_unregister(struct dtpm *dtpm) +{ + powercap_unregister_zone(pct, &dtpm->zone); + + pr_info("Unregistered dtpm node '%s'\n", dtpm->zone.name); +} + +/** + * dtpm_register - Register a dtpm node in the hierarchy tree + * @name: a string specifying the name of the node + * @dtpm: a pointer to a dtpm structure corresponding to the new node + * @parent: a pointer to a dtpm structure corresponding to the parent node + * + * Create a dtpm node in the tree. If no parent is specified, the node + * is the root node of the hierarchy. If the root node already exists, + * then the registration will fail. The powercap controller must be + * initialized before calling this function. + * + * The dtpm structure must be initialized with the power numbers + * before calling this function. + * + * Return: zero on success, a negative value in case of error: + * -EAGAIN: the function is called before the framework is initialized. + * -EBUSY: the root node is already inserted + * -EINVAL: * there is no root node yet and @parent is specified + * * no all ops are defined + * * parent have ops which are reserved for leaves + * Other negative values are reported back from the powercap framework + */ +int dtpm_register(const char *name, struct dtpm *dtpm, struct dtpm *parent) +{ + struct powercap_zone *pcz; + + if (!pct) + return -EAGAIN; + + if (root && !parent) + return -EBUSY; + + if (!root && parent) + return -EINVAL; + + if (parent && parent->ops) + return -EINVAL; + + if (!dtpm) + return -EINVAL; + + if (dtpm->ops && !(dtpm->ops->set_power_uw && + dtpm->ops->get_power_uw && + dtpm->ops->release)) + return -EINVAL; + + pcz = powercap_register_zone(&dtpm->zone, pct, name, + parent ? &parent->zone : NULL, + &zone_ops, MAX_DTPM_CONSTRAINTS, + &constraint_ops); + if (IS_ERR(pcz)) + return PTR_ERR(pcz); + + mutex_lock(&dtpm_lock); + + if (parent) { + list_add_tail(&dtpm->sibling, &parent->children); + dtpm->parent = parent; + } else { + root = dtpm; + } + + __dtpm_add_power(dtpm); + + pr_info("Registered dtpm node '%s' / %llu-%llu uW, \n", + dtpm->zone.name, dtpm->power_min, dtpm->power_max); + + mutex_unlock(&dtpm_lock); + + return 0; +} + +static int __init dtpm_init(void) +{ + struct dtpm_descr **dtpm_descr; + + pct = powercap_register_control_type(NULL, "dtpm", NULL); + if (!pct) { + pr_err("Failed to register control type\n"); + return -EINVAL; + } + + for_each_dtpm_table(dtpm_descr) + (*dtpm_descr)->init(*dtpm_descr); + + return 0; +} +late_initcall(dtpm_init); diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h index b2b3d81b1535..b3e4e0740089 100644 --- a/include/asm-generic/vmlinux.lds.h +++ b/include/asm-generic/vmlinux.lds.h @@ -316,6 +316,16 @@ #define THERMAL_TABLE(name) #endif +#ifdef CONFIG_DTPM +#define DTPM_TABLE() \ + . = ALIGN(8); \ + __dtpm_table = .; \ + KEEP(*(__dtpm_table)) \ + __dtpm_table_end = .; +#else +#define DTPM_TABLE() +#endif + #define KERNEL_DTB() \ STRUCT_ALIGN(); \ __dtb_start = .; \ @@ -733,6 +743,7 @@ ACPI_PROBE_TABLE(irqchip) \ ACPI_PROBE_TABLE(timer) \ THERMAL_TABLE(governor) \ + DTPM_TABLE() \ EARLYCON_TABLE() \ LSM_TABLE() \ EARLY_LSM_TABLE() \ diff --git a/include/linux/dtpm.h b/include/linux/dtpm.h new file mode 100644 index 000000000000..7a1d0b50e334 --- /dev/null +++ b/include/linux/dtpm.h @@ -0,0 +1,75 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2020 Linaro Ltd + * + * Author: Daniel Lezcano + */ +#ifndef ___DTPM_H__ +#define ___DTPM_H__ + +#include + +#define MAX_DTPM_DESCR 8 +#define MAX_DTPM_CONSTRAINTS 1 + +struct dtpm { + struct powercap_zone zone; + struct dtpm *parent; + struct list_head sibling; + struct list_head children; + struct dtpm_ops *ops; + unsigned long flags; + u64 power_limit; + u64 power_max; + u64 power_min; + int weight; + void *private; +}; + +struct dtpm_ops { + u64 (*set_power_uw)(struct dtpm *, u64); + u64 (*get_power_uw)(struct dtpm *); + void (*release)(struct dtpm *); +}; + +struct dtpm_descr; + +typedef int (*dtpm_init_t)(struct dtpm_descr *); + +struct dtpm_descr { + struct dtpm *parent; + const char *name; + dtpm_init_t init; +}; + +/* Init section thermal table */ +extern struct dtpm_descr *__dtpm_table[]; +extern struct dtpm_descr *__dtpm_table_end[]; + +#define DTPM_TABLE_ENTRY(name) \ + static typeof(name) *__dtpm_table_entry_##name \ + __used __section("__dtpm_table") = &name + +#define DTPM_DECLARE(name) DTPM_TABLE_ENTRY(name) + +#define for_each_dtpm_table(__dtpm) \ + for (__dtpm = __dtpm_table; \ + __dtpm < __dtpm_table_end; \ + __dtpm++) + +static inline struct dtpm *to_dtpm(struct powercap_zone *zone) +{ + return container_of(zone, struct dtpm, zone); +} + +int dtpm_update_power(struct dtpm *dtpm, u64 power_min, u64 power_max); + +int dtpm_release_zone(struct powercap_zone *pcz); + +struct dtpm *dtpm_alloc(struct dtpm_ops *ops); + +void dtpm_unregister(struct dtpm *dtpm); + +int dtpm_register(const char *name, struct dtpm *dtpm, struct dtpm *parent); + +#endif -- cgit v1.2.3 From 0e8f68d7f04856a9e2ad4817b477fa35124888bd Mon Sep 17 00:00:00 2001 From: Daniel Lezcano Date: Tue, 8 Dec 2020 17:41:45 +0100 Subject: powercap/drivers/dtpm: Add CPU energy model based support With the powercap dtpm controller, we are able to plug devices with power limitation features in the tree. The following patch introduces the CPU power limitation based on the energy model and the performance states. The power limitation is done at the performance domain level. If some CPUs are unplugged, the corresponding power will be subtracted from the performance domain total power. It is up to the platform to initialize the dtpm tree and add the CPU. Here is an example to create a simple tree with one root node called "pkg" and the CPU's performance domains. static int dtpm_register_pkg(struct dtpm_descr *descr) { struct dtpm *pkg; int ret; pkg = dtpm_alloc(NULL); if (!pkg) return -ENOMEM; ret = dtpm_register(descr->name, pkg, descr->parent); if (ret) return ret; return dtpm_register_cpu(pkg); } static struct dtpm_descr descr = { .name = "pkg", .init = dtpm_register_pkg, }; DTPM_DECLARE(descr); Signed-off-by: Daniel Lezcano Reviewed-by: Lukasz Luba Tested-by: Lukasz Luba Signed-off-by: Rafael J. Wysocki --- drivers/powercap/Kconfig | 7 ++ drivers/powercap/Makefile | 1 + drivers/powercap/dtpm_cpu.c | 257 ++++++++++++++++++++++++++++++++++++++++++++ include/linux/cpuhotplug.h | 1 + include/linux/dtpm.h | 2 + 5 files changed, 268 insertions(+) create mode 100644 drivers/powercap/dtpm_cpu.c (limited to 'include') diff --git a/drivers/powercap/Kconfig b/drivers/powercap/Kconfig index cc1953bd8bed..20b4325c6161 100644 --- a/drivers/powercap/Kconfig +++ b/drivers/powercap/Kconfig @@ -49,4 +49,11 @@ config DTPM help This enables support for the power capping for the dynamic thermal power management userspace engine. + +config DTPM_CPU + bool "Add CPU power capping based on the energy model" + depends on DTPM && ENERGY_MODEL + help + This enables support for CPU power limitation based on + energy model. endif diff --git a/drivers/powercap/Makefile b/drivers/powercap/Makefile index 6482ac52054d..fabcf388a8d3 100644 --- a/drivers/powercap/Makefile +++ b/drivers/powercap/Makefile @@ -1,5 +1,6 @@ # SPDX-License-Identifier: GPL-2.0-only obj-$(CONFIG_DTPM) += dtpm.o +obj-$(CONFIG_DTPM_CPU) += dtpm_cpu.o obj-$(CONFIG_POWERCAP) += powercap_sys.o obj-$(CONFIG_INTEL_RAPL_CORE) += intel_rapl_common.o obj-$(CONFIG_INTEL_RAPL) += intel_rapl_msr.o diff --git a/drivers/powercap/dtpm_cpu.c b/drivers/powercap/dtpm_cpu.c new file mode 100644 index 000000000000..6933c783c6b4 --- /dev/null +++ b/drivers/powercap/dtpm_cpu.c @@ -0,0 +1,257 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright 2020 Linaro Limited + * + * Author: Daniel Lezcano + * + * The DTPM CPU is based on the energy model. It hooks the CPU in the + * DTPM tree which in turns update the power number by propagating the + * power number from the CPU energy model information to the parents. + * + * The association between the power and the performance state, allows + * to set the power of the CPU at the OPP granularity. + * + * The CPU hotplug is supported and the power numbers will be updated + * if a CPU is hot plugged / unplugged. + */ +#include +#include +#include +#include +#include +#include +#include +#include + +static struct dtpm *__parent; + +static DEFINE_PER_CPU(struct dtpm *, dtpm_per_cpu); + +struct dtpm_cpu { + struct freq_qos_request qos_req; + int cpu; +}; + +/* + * When a new CPU is inserted at hotplug or boot time, add the power + * contribution and update the dtpm tree. + */ +static int power_add(struct dtpm *dtpm, struct em_perf_domain *em) +{ + u64 power_min, power_max; + + power_min = em->table[0].power; + power_min *= MICROWATT_PER_MILLIWATT; + power_min += dtpm->power_min; + + power_max = em->table[em->nr_perf_states - 1].power; + power_max *= MICROWATT_PER_MILLIWATT; + power_max += dtpm->power_max; + + return dtpm_update_power(dtpm, power_min, power_max); +} + +/* + * When a CPU is unplugged, remove its power contribution from the + * dtpm tree. + */ +static int power_sub(struct dtpm *dtpm, struct em_perf_domain *em) +{ + u64 power_min, power_max; + + power_min = em->table[0].power; + power_min *= MICROWATT_PER_MILLIWATT; + power_min = dtpm->power_min - power_min; + + power_max = em->table[em->nr_perf_states - 1].power; + power_max *= MICROWATT_PER_MILLIWATT; + power_max = dtpm->power_max - power_max; + + return dtpm_update_power(dtpm, power_min, power_max); +} + +static u64 set_pd_power_limit(struct dtpm *dtpm, u64 power_limit) +{ + struct dtpm_cpu *dtpm_cpu = dtpm->private; + struct em_perf_domain *pd; + struct cpumask cpus; + unsigned long freq; + u64 power; + int i, nr_cpus; + + pd = em_cpu_get(dtpm_cpu->cpu); + + cpumask_and(&cpus, cpu_online_mask, to_cpumask(pd->cpus)); + + nr_cpus = cpumask_weight(&cpus); + + for (i = 0; i < pd->nr_perf_states; i++) { + + power = pd->table[i].power * MICROWATT_PER_MILLIWATT * nr_cpus; + + if (power > power_limit) + break; + } + + freq = pd->table[i - 1].frequency; + + freq_qos_update_request(&dtpm_cpu->qos_req, freq); + + power_limit = pd->table[i - 1].power * + MICROWATT_PER_MILLIWATT * nr_cpus; + + return power_limit; +} + +static u64 get_pd_power_uw(struct dtpm *dtpm) +{ + struct dtpm_cpu *dtpm_cpu = dtpm->private; + struct em_perf_domain *pd; + struct cpumask cpus; + unsigned long freq; + int i, nr_cpus; + + pd = em_cpu_get(dtpm_cpu->cpu); + freq = cpufreq_quick_get(dtpm_cpu->cpu); + cpumask_and(&cpus, cpu_online_mask, to_cpumask(pd->cpus)); + nr_cpus = cpumask_weight(&cpus); + + for (i = 0; i < pd->nr_perf_states; i++) { + + if (pd->table[i].frequency < freq) + continue; + + return pd->table[i].power * + MICROWATT_PER_MILLIWATT * nr_cpus; + } + + return 0; +} + +static void pd_release(struct dtpm *dtpm) +{ + struct dtpm_cpu *dtpm_cpu = dtpm->private; + + if (freq_qos_request_active(&dtpm_cpu->qos_req)) + freq_qos_remove_request(&dtpm_cpu->qos_req); + + kfree(dtpm_cpu); +} + +static struct dtpm_ops dtpm_ops = { + .set_power_uw = set_pd_power_limit, + .get_power_uw = get_pd_power_uw, + .release = pd_release, +}; + +static int cpuhp_dtpm_cpu_offline(unsigned int cpu) +{ + struct cpufreq_policy *policy; + struct em_perf_domain *pd; + struct dtpm *dtpm; + + policy = cpufreq_cpu_get(cpu); + + if (!policy) + return 0; + + pd = em_cpu_get(cpu); + if (!pd) + return -EINVAL; + + dtpm = per_cpu(dtpm_per_cpu, cpu); + + power_sub(dtpm, pd); + + if (cpumask_weight(policy->cpus) != 1) + return 0; + + for_each_cpu(cpu, policy->related_cpus) + per_cpu(dtpm_per_cpu, cpu) = NULL; + + dtpm_unregister(dtpm); + + return 0; +} + +static int cpuhp_dtpm_cpu_online(unsigned int cpu) +{ + struct dtpm *dtpm; + struct dtpm_cpu *dtpm_cpu; + struct cpufreq_policy *policy; + struct em_perf_domain *pd; + char name[CPUFREQ_NAME_LEN]; + int ret = -ENOMEM; + + policy = cpufreq_cpu_get(cpu); + + if (!policy) + return 0; + + pd = em_cpu_get(cpu); + if (!pd) + return -EINVAL; + + dtpm = per_cpu(dtpm_per_cpu, cpu); + if (dtpm) + return power_add(dtpm, pd); + + dtpm = dtpm_alloc(&dtpm_ops); + if (!dtpm) + return -EINVAL; + + dtpm_cpu = kzalloc(sizeof(dtpm_cpu), GFP_KERNEL); + if (!dtpm_cpu) + goto out_kfree_dtpm; + + dtpm->private = dtpm_cpu; + dtpm_cpu->cpu = cpu; + + for_each_cpu(cpu, policy->related_cpus) + per_cpu(dtpm_per_cpu, cpu) = dtpm; + + sprintf(name, "cpu%d", dtpm_cpu->cpu); + + ret = dtpm_register(name, dtpm, __parent); + if (ret) + goto out_kfree_dtpm_cpu; + + ret = power_add(dtpm, pd); + if (ret) + goto out_dtpm_unregister; + + ret = freq_qos_add_request(&policy->constraints, + &dtpm_cpu->qos_req, FREQ_QOS_MAX, + pd->table[pd->nr_perf_states - 1].frequency); + if (ret) + goto out_power_sub; + + return 0; + +out_power_sub: + power_sub(dtpm, pd); + +out_dtpm_unregister: + dtpm_unregister(dtpm); + dtpm_cpu = NULL; + dtpm = NULL; + +out_kfree_dtpm_cpu: + for_each_cpu(cpu, policy->related_cpus) + per_cpu(dtpm_per_cpu, cpu) = NULL; + kfree(dtpm_cpu); + +out_kfree_dtpm: + kfree(dtpm); + return ret; +} + +int dtpm_register_cpu(struct dtpm *parent) +{ + __parent = parent; + + return cpuhp_setup_state(CPUHP_AP_DTPM_CPU_ONLINE, + "dtpm_cpu:online", + cpuhp_dtpm_cpu_online, + cpuhp_dtpm_cpu_offline); +} diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h index 0042ef362511..ee09a39627d6 100644 --- a/include/linux/cpuhotplug.h +++ b/include/linux/cpuhotplug.h @@ -193,6 +193,7 @@ enum cpuhp_state { CPUHP_AP_ONLINE_DYN_END = CPUHP_AP_ONLINE_DYN + 30, CPUHP_AP_X86_HPET_ONLINE, CPUHP_AP_X86_KVM_CLK_ONLINE, + CPUHP_AP_DTPM_CPU_ONLINE, CPUHP_AP_ACTIVE, CPUHP_ONLINE, }; diff --git a/include/linux/dtpm.h b/include/linux/dtpm.h index 7a1d0b50e334..e80a332e3d8a 100644 --- a/include/linux/dtpm.h +++ b/include/linux/dtpm.h @@ -72,4 +72,6 @@ void dtpm_unregister(struct dtpm *dtpm); int dtpm_register(const char *name, struct dtpm *dtpm, struct dtpm *parent); +int dtpm_register_cpu(struct dtpm *parent); + #endif -- cgit v1.2.3 From 67e3242ee28052daa90e1fa193efc601939fde58 Mon Sep 17 00:00:00 2001 From: Lina Iyer Date: Wed, 20 Jan 2021 08:50:41 -0700 Subject: PM: domains: inform PM domain of a device's next wakeup Some devices may have a predictable interrupt pattern while executing usecases. An example would be the VSYNC interrupt associated with display devices. A 60 Hz display could cause a interrupt every 16 ms. If the device were in a PM domain, the domain would need to be powered up for device to resume and handle the interrupt. Entering a domain idle state saves power, only if the residency of the idle state is met. Without knowing the idle duration of the domain, the governor would just choose the deepest idle state that matches the QoS requirements. The domain might be powered off just as the device is expecting to wake up. If devices could inform PM frameworks of their next event, the parent PM domain's idle duration can be determined. So let's add the dev_pm_genpd_set_next_wakeup() API for the device to inform PM domains of the impending wakeup. This information will be the domain governor to determine the best idle state given the wakeup. Signed-off-by: Lina Iyer Reviewed-by: Ulf Hansson Signed-off-by: Rafael J. Wysocki --- drivers/base/power/domain.c | 30 ++++++++++++++++++++++++++++++ include/linux/pm_domain.h | 6 ++++++ 2 files changed, 36 insertions(+) (limited to 'include') diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c index 9a14eedacb92..014033c7c287 100644 --- a/drivers/base/power/domain.c +++ b/drivers/base/power/domain.c @@ -423,6 +423,35 @@ int dev_pm_genpd_set_performance_state(struct device *dev, unsigned int state) } EXPORT_SYMBOL_GPL(dev_pm_genpd_set_performance_state); +/** + * dev_pm_genpd_set_next_wakeup - Notify PM framework of an impending wakeup. + * + * @dev: Device to handle + * @next: impending interrupt/wakeup for the device + * + * + * Allow devices to inform of the next wakeup. It's assumed that the users + * guarantee that the genpd wouldn't be detached while this routine is getting + * called. Additionally, it's also assumed that @dev isn't runtime suspended + * (RPM_SUSPENDED)." + * Although devices are expected to update the next_wakeup after the end of + * their usecase as well, it is possible the devices themselves may not know + * about that, so stale @next will be ignored when powering off the domain. + */ +void dev_pm_genpd_set_next_wakeup(struct device *dev, ktime_t next) +{ + struct generic_pm_domain_data *gpd_data; + struct generic_pm_domain *genpd; + + genpd = dev_to_genpd_safe(dev); + if (!genpd) + return; + + gpd_data = to_gpd_data(dev->power.subsys_data->domain_data); + gpd_data->next_wakeup = next; +} +EXPORT_SYMBOL_GPL(dev_pm_genpd_set_next_wakeup); + static int _genpd_power_on(struct generic_pm_domain *genpd, bool timed) { unsigned int state_idx = genpd->state_idx; @@ -1465,6 +1494,7 @@ static struct generic_pm_domain_data *genpd_alloc_dev_data(struct device *dev) gpd_data->td.constraint_changed = true; gpd_data->td.effective_constraint_ns = PM_QOS_RESUME_LATENCY_NO_CONSTRAINT_NS; gpd_data->nb.notifier_call = genpd_dev_pm_qos_notifier; + gpd_data->next_wakeup = KTIME_MAX; spin_lock_irq(&dev->power.lock); diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h index 2ca919ae8d36..735583c0bc6d 100644 --- a/include/linux/pm_domain.h +++ b/include/linux/pm_domain.h @@ -9,6 +9,7 @@ #define _LINUX_PM_DOMAIN_H #include +#include #include #include #include @@ -191,6 +192,7 @@ struct generic_pm_domain_data { struct notifier_block *power_nb; int cpu; unsigned int performance_state; + ktime_t next_wakeup; void *data; }; @@ -217,6 +219,7 @@ int pm_genpd_remove(struct generic_pm_domain *genpd); int dev_pm_genpd_set_performance_state(struct device *dev, unsigned int state); int dev_pm_genpd_add_notifier(struct device *dev, struct notifier_block *nb); int dev_pm_genpd_remove_notifier(struct device *dev); +void dev_pm_genpd_set_next_wakeup(struct device *dev, ktime_t next); extern struct dev_power_governor simple_qos_governor; extern struct dev_power_governor pm_domain_always_on_gov; @@ -275,6 +278,9 @@ static inline int dev_pm_genpd_remove_notifier(struct device *dev) return -EOPNOTSUPP; } +static inline void dev_pm_genpd_set_next_wakeup(struct device *dev, ktime_t next) +{ } + #define simple_qos_governor (*(struct dev_power_governor *)(NULL)) #define pm_domain_always_on_gov (*(struct dev_power_governor *)(NULL)) #endif -- cgit v1.2.3 From c79aa080fb0f60a0e24c87014dc9c2f373e1379b Mon Sep 17 00:00:00 2001 From: Lina Iyer Date: Wed, 20 Jan 2021 08:50:42 -0700 Subject: PM: domains: use device's next wakeup to determine domain idle state Currently, a PM domain's idle state is determined based on whether the QoS requirements are met. However, even entering an idle state may waste power if the minimum residency requirements aren't fulfilled. CPU PM domains use the next timer wakeup for the CPUs in the domain to determine the sleep duration of the domain. This is compared with the idle state residencies to determine the optimal idle state. For other PM domains, determining the sleep length is not that straight forward. But if the device's next_event is available, we can use that to determine the sleep duration of the PM domain. Let's update the domain governor logic to check for idle state residency based on the next wakeup of devices as well as QoS constraints. But since, not all domains may contain devices capable of specifying the next wakeup, let's enable this additional check only if specified by the domain's flags when initializing the domain. Signed-off-by: Lina Iyer Reviewed-by: Ulf Hansson Signed-off-by: Rafael J. Wysocki --- drivers/base/power/domain_governor.c | 102 +++++++++++++++++++++++++++++++---- include/linux/pm_domain.h | 6 +++ 2 files changed, 99 insertions(+), 9 deletions(-) (limited to 'include') diff --git a/drivers/base/power/domain_governor.c b/drivers/base/power/domain_governor.c index 490ed7deb99a..c6c218758f0b 100644 --- a/drivers/base/power/domain_governor.c +++ b/drivers/base/power/domain_governor.c @@ -117,6 +117,55 @@ static bool default_suspend_ok(struct device *dev) return td->cached_suspend_ok; } +static void update_domain_next_wakeup(struct generic_pm_domain *genpd, ktime_t now) +{ + ktime_t domain_wakeup = KTIME_MAX; + ktime_t next_wakeup; + struct pm_domain_data *pdd; + struct gpd_link *link; + + if (!(genpd->flags & GENPD_FLAG_MIN_RESIDENCY)) + return; + + /* + * Devices that have a predictable wakeup pattern, may specify + * their next wakeup. Let's find the next wakeup from all the + * devices attached to this domain and from all the sub-domains. + * It is possible that component's a next wakeup may have become + * stale when we read that here. We will ignore to ensure the domain + * is able to enter its optimal idle state. + */ + list_for_each_entry(pdd, &genpd->dev_list, list_node) { + next_wakeup = to_gpd_data(pdd)->next_wakeup; + if (next_wakeup != KTIME_MAX && !ktime_before(next_wakeup, now)) + if (ktime_before(next_wakeup, domain_wakeup)) + domain_wakeup = next_wakeup; + } + + list_for_each_entry(link, &genpd->parent_links, parent_node) { + next_wakeup = link->child->next_wakeup; + if (next_wakeup != KTIME_MAX && !ktime_before(next_wakeup, now)) + if (ktime_before(next_wakeup, domain_wakeup)) + domain_wakeup = next_wakeup; + } + + genpd->next_wakeup = domain_wakeup; +} + +static bool next_wakeup_allows_state(struct generic_pm_domain *genpd, + unsigned int state, ktime_t now) +{ + ktime_t domain_wakeup = genpd->next_wakeup; + s64 idle_time_ns, min_sleep_ns; + + min_sleep_ns = genpd->states[state].power_off_latency_ns + + genpd->states[state].residency_ns; + + idle_time_ns = ktime_to_ns(ktime_sub(domain_wakeup, now)); + + return idle_time_ns >= min_sleep_ns; +} + static bool __default_power_down_ok(struct dev_pm_domain *pd, unsigned int state) { @@ -201,16 +250,41 @@ static bool __default_power_down_ok(struct dev_pm_domain *pd, } /** - * default_power_down_ok - Default generic PM domain power off governor routine. + * _default_power_down_ok - Default generic PM domain power off governor routine. * @pd: PM domain to check. * * This routine must be executed under the PM domain's lock. */ -static bool default_power_down_ok(struct dev_pm_domain *pd) +static bool _default_power_down_ok(struct dev_pm_domain *pd, ktime_t now) { struct generic_pm_domain *genpd = pd_to_genpd(pd); + int state_idx = genpd->state_count - 1; struct gpd_link *link; + /* + * Find the next wakeup from devices that can determine their own wakeup + * to find when the domain would wakeup and do it for every device down + * the hierarchy. It is not worth while to sleep if the state's residency + * cannot be met. + */ + update_domain_next_wakeup(genpd, now); + if ((genpd->flags & GENPD_FLAG_MIN_RESIDENCY) && (genpd->next_wakeup != KTIME_MAX)) { + /* Let's find out the deepest domain idle state, the devices prefer */ + while (state_idx >= 0) { + if (next_wakeup_allows_state(genpd, state_idx, now)) { + genpd->max_off_time_changed = true; + break; + } + state_idx--; + } + + if (state_idx < 0) { + state_idx = 0; + genpd->cached_power_down_ok = false; + goto done; + } + } + if (!genpd->max_off_time_changed) { genpd->state_idx = genpd->cached_power_down_state_idx; return genpd->cached_power_down_ok; @@ -228,21 +302,30 @@ static bool default_power_down_ok(struct dev_pm_domain *pd) genpd->max_off_time_ns = -1; genpd->max_off_time_changed = false; genpd->cached_power_down_ok = true; - genpd->state_idx = genpd->state_count - 1; - /* Find a state to power down to, starting from the deepest. */ - while (!__default_power_down_ok(pd, genpd->state_idx)) { - if (genpd->state_idx == 0) { + /* + * Find a state to power down to, starting from the state + * determined by the next wakeup. + */ + while (!__default_power_down_ok(pd, state_idx)) { + if (state_idx == 0) { genpd->cached_power_down_ok = false; break; } - genpd->state_idx--; + state_idx--; } +done: + genpd->state_idx = state_idx; genpd->cached_power_down_state_idx = genpd->state_idx; return genpd->cached_power_down_ok; } +static bool default_power_down_ok(struct dev_pm_domain *pd) +{ + return _default_power_down_ok(pd, ktime_get()); +} + static bool always_on_power_down_ok(struct dev_pm_domain *domain) { return false; @@ -254,11 +337,12 @@ static bool cpu_power_down_ok(struct dev_pm_domain *pd) struct generic_pm_domain *genpd = pd_to_genpd(pd); struct cpuidle_device *dev; ktime_t domain_wakeup, next_hrtimer; + ktime_t now = ktime_get(); s64 idle_duration_ns; int cpu, i; /* Validate dev PM QoS constraints. */ - if (!default_power_down_ok(pd)) + if (!_default_power_down_ok(pd, now)) return false; if (!(genpd->flags & GENPD_FLAG_CPU_DOMAIN)) @@ -280,7 +364,7 @@ static bool cpu_power_down_ok(struct dev_pm_domain *pd) } /* The minimum idle duration is from now - until the next wakeup. */ - idle_duration_ns = ktime_to_ns(ktime_sub(domain_wakeup, ktime_get())); + idle_duration_ns = ktime_to_ns(ktime_sub(domain_wakeup, now)); if (idle_duration_ns <= 0) return false; diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h index 735583c0bc6d..dfcfbcecc34b 100644 --- a/include/linux/pm_domain.h +++ b/include/linux/pm_domain.h @@ -56,6 +56,10 @@ * * GENPD_FLAG_RPM_ALWAYS_ON: Instructs genpd to always keep the PM domain * powered on except for system suspend. + * + * GENPD_FLAG_MIN_RESIDENCY: Enable the genpd governor to consider its + * components' next wakeup when determining the + * optimal idle state. */ #define GENPD_FLAG_PM_CLK (1U << 0) #define GENPD_FLAG_IRQ_SAFE (1U << 1) @@ -63,6 +67,7 @@ #define GENPD_FLAG_ACTIVE_WAKEUP (1U << 3) #define GENPD_FLAG_CPU_DOMAIN (1U << 4) #define GENPD_FLAG_RPM_ALWAYS_ON (1U << 5) +#define GENPD_FLAG_MIN_RESIDENCY (1U << 6) enum gpd_status { GENPD_STATE_ON = 0, /* PM domain is on */ @@ -130,6 +135,7 @@ struct generic_pm_domain { unsigned int state); struct gpd_dev_ops dev_ops; s64 max_off_time_ns; /* Maximum allowed "suspended" time. */ + ktime_t next_wakeup; /* Maintained by the domain governor */ bool max_off_time_changed; bool cached_power_down_ok; bool cached_power_down_state_idx; -- cgit v1.2.3 From 0bfa0820c274b019583b3454c6c889c99c24558d Mon Sep 17 00:00:00 2001 From: Nicolas Pitre Date: Mon, 25 Jan 2021 14:29:18 -0500 Subject: PM: clk: make PM clock layer compatible with clocks that must sleep The clock API splits its interface into sleepable ant atomic contexts: - clk_prepare/clk_unprepare for stuff that might sleep - clk_enable_clk_disable for anything that may be done in atomic context The code handling runtime PM for clocks only calls clk_disable() on suspend requests, and clk_enable on resume requests. This means that runtime PM with clock providers that only have the prepare/unprepare methods implemented is basically useless. Many clock implementations can't accommodate atomic contexts. This is often the case when communication with the clock happens through another subsystem like I2C or SCMI. Let's make the clock PM code useful with such clocks by safely invoking clk_prepare/clk_unprepare upon resume/suspend requests. Of course, when such clocks are registered with the PM layer then pm_runtime_irq_safe() can't be used, and neither pm_runtime_suspend() nor pm_runtime_resume() may be invoked in atomic context. For clocks that do implement the enable and disable methods then everything just works as before. A note on sparse: According to https://lwn.net/Articles/109066/ there are things that sparse can't cope with. In particular, pm_clk_op_lock() and pm_clk_op_unlock() may or may not lock/unlock psd->lock depending on some runtime condition. To work around that we tell it the lock is always untaken for the purpose of static analisys. Thanks to Naresh Kamboju for reporting issues with the initial patch. Signed-off-by: Nicolas Pitre Tested-by: Naresh Kamboju Signed-off-by: Rafael J. Wysocki --- drivers/base/power/clock_ops.c | 223 +++++++++++++++++++++++++++++++++-------- drivers/clk/clk.c | 21 ++++ include/linux/clk.h | 24 ++++- include/linux/pm.h | 2 + 4 files changed, 228 insertions(+), 42 deletions(-) (limited to 'include') diff --git a/drivers/base/power/clock_ops.c b/drivers/base/power/clock_ops.c index ced6863a16a5..84d5acb6301b 100644 --- a/drivers/base/power/clock_ops.c +++ b/drivers/base/power/clock_ops.c @@ -23,6 +23,7 @@ enum pce_status { PCE_STATUS_NONE = 0, PCE_STATUS_ACQUIRED, + PCE_STATUS_PREPARED, PCE_STATUS_ENABLED, PCE_STATUS_ERROR, }; @@ -32,8 +33,112 @@ struct pm_clock_entry { char *con_id; struct clk *clk; enum pce_status status; + bool enabled_when_prepared; }; +/** + * pm_clk_list_lock - ensure exclusive access for modifying the PM clock + * entry list. + * @psd: pm_subsys_data instance corresponding to the PM clock entry list + * and clk_op_might_sleep count to be modified. + * + * Get exclusive access before modifying the PM clock entry list and the + * clock_op_might_sleep count to guard against concurrent modifications. + * This also protects against a concurrent clock_op_might_sleep and PM clock + * entry list usage in pm_clk_suspend()/pm_clk_resume() that may or may not + * happen in atomic context, hence both the mutex and the spinlock must be + * taken here. + */ +static void pm_clk_list_lock(struct pm_subsys_data *psd) + __acquires(&psd->lock) +{ + mutex_lock(&psd->clock_mutex); + spin_lock_irq(&psd->lock); +} + +/** + * pm_clk_list_unlock - counterpart to pm_clk_list_lock(). + * @psd: the same pm_subsys_data instance previously passed to + * pm_clk_list_lock(). + */ +static void pm_clk_list_unlock(struct pm_subsys_data *psd) + __releases(&psd->lock) +{ + spin_unlock_irq(&psd->lock); + mutex_unlock(&psd->clock_mutex); +} + +/** + * pm_clk_op_lock - ensure exclusive access for performing clock operations. + * @psd: pm_subsys_data instance corresponding to the PM clock entry list + * and clk_op_might_sleep count being used. + * @flags: stored irq flags. + * @fn: string for the caller function's name. + * + * This is used by pm_clk_suspend() and pm_clk_resume() to guard + * against concurrent modifications to the clock entry list and the + * clock_op_might_sleep count. If clock_op_might_sleep is != 0 then + * only the mutex can be locked and those functions can only be used in + * non atomic context. If clock_op_might_sleep == 0 then these functions + * may be used in any context and only the spinlock can be locked. + * Returns -EINVAL if called in atomic context when clock ops might sleep. + */ +static int pm_clk_op_lock(struct pm_subsys_data *psd, unsigned long *flags, + const char *fn) + /* sparse annotations don't work here as exit state isn't static */ +{ + bool atomic_context = in_atomic() || irqs_disabled(); + +try_again: + spin_lock_irqsave(&psd->lock, *flags); + if (!psd->clock_op_might_sleep) { + /* the __release is there to work around sparse limitations */ + __release(&psd->lock); + return 0; + } + + /* bail out if in atomic context */ + if (atomic_context) { + pr_err("%s: atomic context with clock_ops_might_sleep = %d", + fn, psd->clock_op_might_sleep); + spin_unlock_irqrestore(&psd->lock, *flags); + might_sleep(); + return -EPERM; + } + + /* we must switch to the mutex */ + spin_unlock_irqrestore(&psd->lock, *flags); + mutex_lock(&psd->clock_mutex); + + /* + * There was a possibility for psd->clock_op_might_sleep + * to become 0 above. Keep the mutex only if not the case. + */ + if (likely(psd->clock_op_might_sleep)) + return 0; + + mutex_unlock(&psd->clock_mutex); + goto try_again; +} + +/** + * pm_clk_op_unlock - counterpart to pm_clk_op_lock(). + * @psd: the same pm_subsys_data instance previously passed to + * pm_clk_op_lock(). + * @flags: irq flags provided by pm_clk_op_lock(). + */ +static void pm_clk_op_unlock(struct pm_subsys_data *psd, unsigned long *flags) + /* sparse annotations don't work here as entry state isn't static */ +{ + if (psd->clock_op_might_sleep) { + mutex_unlock(&psd->clock_mutex); + } else { + /* the __acquire is there to work around sparse limitations */ + __acquire(&psd->lock); + spin_unlock_irqrestore(&psd->lock, *flags); + } +} + /** * pm_clk_enable - Enable a clock, reporting any errors * @dev: The device for the given clock @@ -43,14 +148,21 @@ static inline void __pm_clk_enable(struct device *dev, struct pm_clock_entry *ce { int ret; - if (ce->status < PCE_STATUS_ERROR) { + switch (ce->status) { + case PCE_STATUS_ACQUIRED: + ret = clk_prepare_enable(ce->clk); + break; + case PCE_STATUS_PREPARED: ret = clk_enable(ce->clk); - if (!ret) - ce->status = PCE_STATUS_ENABLED; - else - dev_err(dev, "%s: failed to enable clk %p, error %d\n", - __func__, ce->clk, ret); + break; + default: + return; } + if (!ret) + ce->status = PCE_STATUS_ENABLED; + else + dev_err(dev, "%s: failed to enable clk %p, error %d\n", + __func__, ce->clk, ret); } /** @@ -64,17 +176,20 @@ static void pm_clk_acquire(struct device *dev, struct pm_clock_entry *ce) ce->clk = clk_get(dev, ce->con_id); if (IS_ERR(ce->clk)) { ce->status = PCE_STATUS_ERROR; + return; + } else if (clk_is_enabled_when_prepared(ce->clk)) { + /* we defer preparing the clock in that case */ + ce->status = PCE_STATUS_ACQUIRED; + ce->enabled_when_prepared = true; + } else if (clk_prepare(ce->clk)) { + ce->status = PCE_STATUS_ERROR; + dev_err(dev, "clk_prepare() failed\n"); + return; } else { - if (clk_prepare(ce->clk)) { - ce->status = PCE_STATUS_ERROR; - dev_err(dev, "clk_prepare() failed\n"); - } else { - ce->status = PCE_STATUS_ACQUIRED; - dev_dbg(dev, - "Clock %pC con_id %s managed by runtime PM.\n", - ce->clk, ce->con_id); - } + ce->status = PCE_STATUS_PREPARED; } + dev_dbg(dev, "Clock %pC con_id %s managed by runtime PM.\n", + ce->clk, ce->con_id); } static int __pm_clk_add(struct device *dev, const char *con_id, @@ -106,9 +221,11 @@ static int __pm_clk_add(struct device *dev, const char *con_id, pm_clk_acquire(dev, ce); - spin_lock_irq(&psd->lock); + pm_clk_list_lock(psd); list_add_tail(&ce->node, &psd->clock_list); - spin_unlock_irq(&psd->lock); + if (ce->enabled_when_prepared) + psd->clock_op_might_sleep++; + pm_clk_list_unlock(psd); return 0; } @@ -239,14 +356,20 @@ static void __pm_clk_remove(struct pm_clock_entry *ce) if (!ce) return; - if (ce->status < PCE_STATUS_ERROR) { - if (ce->status == PCE_STATUS_ENABLED) - clk_disable(ce->clk); - - if (ce->status >= PCE_STATUS_ACQUIRED) { - clk_unprepare(ce->clk); + switch (ce->status) { + case PCE_STATUS_ENABLED: + clk_disable(ce->clk); + fallthrough; + case PCE_STATUS_PREPARED: + clk_unprepare(ce->clk); + fallthrough; + case PCE_STATUS_ACQUIRED: + case PCE_STATUS_ERROR: + if (!IS_ERR(ce->clk)) clk_put(ce->clk); - } + break; + default: + break; } kfree(ce->con_id); @@ -269,7 +392,7 @@ void pm_clk_remove(struct device *dev, const char *con_id) if (!psd) return; - spin_lock_irq(&psd->lock); + pm_clk_list_lock(psd); list_for_each_entry(ce, &psd->clock_list, node) { if (!con_id && !ce->con_id) @@ -280,12 +403,14 @@ void pm_clk_remove(struct device *dev, const char *con_id) goto remove; } - spin_unlock_irq(&psd->lock); + pm_clk_list_unlock(psd); return; remove: list_del(&ce->node); - spin_unlock_irq(&psd->lock); + if (ce->enabled_when_prepared) + psd->clock_op_might_sleep--; + pm_clk_list_unlock(psd); __pm_clk_remove(ce); } @@ -307,19 +432,21 @@ void pm_clk_remove_clk(struct device *dev, struct clk *clk) if (!psd || !clk) return; - spin_lock_irq(&psd->lock); + pm_clk_list_lock(psd); list_for_each_entry(ce, &psd->clock_list, node) { if (clk == ce->clk) goto remove; } - spin_unlock_irq(&psd->lock); + pm_clk_list_unlock(psd); return; remove: list_del(&ce->node); - spin_unlock_irq(&psd->lock); + if (ce->enabled_when_prepared) + psd->clock_op_might_sleep--; + pm_clk_list_unlock(psd); __pm_clk_remove(ce); } @@ -330,13 +457,16 @@ EXPORT_SYMBOL_GPL(pm_clk_remove_clk); * @dev: Device to initialize the list of PM clocks for. * * Initialize the lock and clock_list members of the device's pm_subsys_data - * object. + * object, set the count of clocks that might sleep to 0. */ void pm_clk_init(struct device *dev) { struct pm_subsys_data *psd = dev_to_psd(dev); - if (psd) + if (psd) { INIT_LIST_HEAD(&psd->clock_list); + mutex_init(&psd->clock_mutex); + psd->clock_op_might_sleep = 0; + } } EXPORT_SYMBOL_GPL(pm_clk_init); @@ -372,12 +502,13 @@ void pm_clk_destroy(struct device *dev) INIT_LIST_HEAD(&list); - spin_lock_irq(&psd->lock); + pm_clk_list_lock(psd); list_for_each_entry_safe_reverse(ce, c, &psd->clock_list, node) list_move(&ce->node, &list); + psd->clock_op_might_sleep = 0; - spin_unlock_irq(&psd->lock); + pm_clk_list_unlock(psd); dev_pm_put_subsys_data(dev); @@ -397,23 +528,30 @@ int pm_clk_suspend(struct device *dev) struct pm_subsys_data *psd = dev_to_psd(dev); struct pm_clock_entry *ce; unsigned long flags; + int ret; dev_dbg(dev, "%s()\n", __func__); if (!psd) return 0; - spin_lock_irqsave(&psd->lock, flags); + ret = pm_clk_op_lock(psd, &flags, __func__); + if (ret) + return ret; list_for_each_entry_reverse(ce, &psd->clock_list, node) { - if (ce->status < PCE_STATUS_ERROR) { - if (ce->status == PCE_STATUS_ENABLED) + if (ce->status == PCE_STATUS_ENABLED) { + if (ce->enabled_when_prepared) { + clk_disable_unprepare(ce->clk); + ce->status = PCE_STATUS_ACQUIRED; + } else { clk_disable(ce->clk); - ce->status = PCE_STATUS_ACQUIRED; + ce->status = PCE_STATUS_PREPARED; + } } } - spin_unlock_irqrestore(&psd->lock, flags); + pm_clk_op_unlock(psd, &flags); return 0; } @@ -428,18 +566,21 @@ int pm_clk_resume(struct device *dev) struct pm_subsys_data *psd = dev_to_psd(dev); struct pm_clock_entry *ce; unsigned long flags; + int ret; dev_dbg(dev, "%s()\n", __func__); if (!psd) return 0; - spin_lock_irqsave(&psd->lock, flags); + ret = pm_clk_op_lock(psd, &flags, __func__); + if (ret) + return ret; list_for_each_entry(ce, &psd->clock_list, node) __pm_clk_enable(dev, ce); - spin_unlock_irqrestore(&psd->lock, flags); + pm_clk_op_unlock(psd, &flags); return 0; } diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c index 8c1d04db990d..3d751ae5bc70 100644 --- a/drivers/clk/clk.c +++ b/drivers/clk/clk.c @@ -1164,6 +1164,27 @@ int clk_enable(struct clk *clk) } EXPORT_SYMBOL_GPL(clk_enable); +/** + * clk_is_enabled_when_prepared - indicate if preparing a clock also enables it. + * @clk: clock source + * + * Returns true if clk_prepare() implicitly enables the clock, effectively + * making clk_enable()/clk_disable() no-ops, false otherwise. + * + * This is of interest mainly to power management code where actually + * disabling the clock also requires unpreparing it to have any material + * effect. + * + * Regardless of the value returned here, the caller must always invoke + * clk_enable() or clk_prepare_enable() and counterparts for usage counts + * to be right. + */ +bool clk_is_enabled_when_prepared(struct clk *clk) +{ + return clk && !(clk->core->ops->enable && clk->core->ops->disable); +} +EXPORT_SYMBOL_GPL(clk_is_enabled_when_prepared); + static int clk_core_prepare_enable(struct clk_core *core) { int ret; diff --git a/include/linux/clk.h b/include/linux/clk.h index 31ff1bf1b79f..a4a86aa8b11a 100644 --- a/include/linux/clk.h +++ b/include/linux/clk.h @@ -238,6 +238,7 @@ static inline bool clk_is_match(const struct clk *p, const struct clk *q) #endif +#ifdef CONFIG_HAVE_CLK_PREPARE /** * clk_prepare - prepare a clock source * @clk: clock source @@ -246,10 +247,26 @@ static inline bool clk_is_match(const struct clk *p, const struct clk *q) * * Must not be called from within atomic context. */ -#ifdef CONFIG_HAVE_CLK_PREPARE int clk_prepare(struct clk *clk); int __must_check clk_bulk_prepare(int num_clks, const struct clk_bulk_data *clks); + +/** + * clk_is_enabled_when_prepared - indicate if preparing a clock also enables it. + * @clk: clock source + * + * Returns true if clk_prepare() implicitly enables the clock, effectively + * making clk_enable()/clk_disable() no-ops, false otherwise. + * + * This is of interest mainly to the power management code where actually + * disabling the clock also requires unpreparing it to have any material + * effect. + * + * Regardless of the value returned here, the caller must always invoke + * clk_enable() or clk_prepare_enable() and counterparts for usage counts + * to be right. + */ +bool clk_is_enabled_when_prepared(struct clk *clk); #else static inline int clk_prepare(struct clk *clk) { @@ -263,6 +280,11 @@ clk_bulk_prepare(int num_clks, const struct clk_bulk_data *clks) might_sleep(); return 0; } + +static inline bool clk_is_enabled_when_prepared(struct clk *clk) +{ + return false; +} #endif /** diff --git a/include/linux/pm.h b/include/linux/pm.h index 47aca6bac1d6..482313a8ccfc 100644 --- a/include/linux/pm.h +++ b/include/linux/pm.h @@ -537,6 +537,8 @@ struct pm_subsys_data { spinlock_t lock; unsigned int refcount; #ifdef CONFIG_PM_CLK + unsigned int clock_op_might_sleep; + struct mutex clock_mutex; struct list_head clock_list; #endif #ifdef CONFIG_PM_GENERIC_DOMAINS -- cgit v1.2.3 From 8dd5cada393f6f4e825833a6ff05b1f51f36a791 Mon Sep 17 00:00:00 2001 From: Dmitry Osipenko Date: Mon, 18 Jan 2021 03:55:18 +0300 Subject: opp: Add dev_pm_opp_find_level_ceil() Add a ceil version of the dev_pm_opp_find_level(). It's handy to have if levels don't start from 0 in OPP table and zero usually means a minimal level. Tested-by: Peter Geis Tested-by: Nicolas Chauvet Tested-by: Matt Merhar Signed-off-by: Dmitry Osipenko Signed-off-by: Viresh Kumar --- drivers/opp/core.c | 49 +++++++++++++++++++++++++++++++++++++++++++++++++ include/linux/pm_opp.h | 8 ++++++++ 2 files changed, 57 insertions(+) (limited to 'include') diff --git a/drivers/opp/core.c b/drivers/opp/core.c index dc7a298f3611..b29f31146770 100644 --- a/drivers/opp/core.c +++ b/drivers/opp/core.c @@ -449,6 +449,55 @@ struct dev_pm_opp *dev_pm_opp_find_level_exact(struct device *dev, } EXPORT_SYMBOL_GPL(dev_pm_opp_find_level_exact); +/** + * dev_pm_opp_find_level_ceil() - search for an rounded up level + * @dev: device for which we do this operation + * @level: level to search for + * + * Return: Searches for rounded up match in the opp table and returns pointer + * to the matching opp if found, else returns ERR_PTR in case of error and + * should be handled using IS_ERR. Error return values can be: + * EINVAL: for bad pointer + * ERANGE: no match found for search + * ENODEV: if device not found in list of registered devices + * + * The callers are required to call dev_pm_opp_put() for the returned OPP after + * use. + */ +struct dev_pm_opp *dev_pm_opp_find_level_ceil(struct device *dev, + unsigned int *level) +{ + struct opp_table *opp_table; + struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE); + + opp_table = _find_opp_table(dev); + if (IS_ERR(opp_table)) { + int r = PTR_ERR(opp_table); + + dev_err(dev, "%s: OPP table not found (%d)\n", __func__, r); + return ERR_PTR(r); + } + + mutex_lock(&opp_table->lock); + + list_for_each_entry(temp_opp, &opp_table->opp_list, node) { + if (temp_opp->available && temp_opp->level >= *level) { + opp = temp_opp; + *level = opp->level; + + /* Increment the reference count of OPP */ + dev_pm_opp_get(opp); + break; + } + } + + mutex_unlock(&opp_table->lock); + dev_pm_opp_put_opp_table(opp_table); + + return opp; +} +EXPORT_SYMBOL_GPL(dev_pm_opp_find_level_ceil); + static noinline struct dev_pm_opp *_find_freq_ceil(struct opp_table *opp_table, unsigned long *freq) { diff --git a/include/linux/pm_opp.h b/include/linux/pm_opp.h index 1435c054016a..2b3030cb2ed2 100644 --- a/include/linux/pm_opp.h +++ b/include/linux/pm_opp.h @@ -111,6 +111,8 @@ struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev, bool available); struct dev_pm_opp *dev_pm_opp_find_level_exact(struct device *dev, unsigned int level); +struct dev_pm_opp *dev_pm_opp_find_level_ceil(struct device *dev, + unsigned int *level); struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev, unsigned long *freq); @@ -226,6 +228,12 @@ static inline struct dev_pm_opp *dev_pm_opp_find_level_exact(struct device *dev, return ERR_PTR(-ENOTSUPP); } +static inline struct dev_pm_opp *dev_pm_opp_find_level_ceil(struct device *dev, + unsigned int *level) +{ + return ERR_PTR(-ENOTSUPP); +} + static inline struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev, unsigned long *freq) { -- cgit v1.2.3 From 597ff5431fd41afa888809f7936508a15c977cde Mon Sep 17 00:00:00 2001 From: Dmitry Osipenko Date: Mon, 18 Jan 2021 03:55:19 +0300 Subject: opp: Add dev_pm_opp_get_required_pstate() Add dev_pm_opp_get_required_pstate() which allows OPP users to retrieve required performance state of a given OPP. Tested-by: Peter Geis Tested-by: Nicolas Chauvet Tested-by: Matt Merhar Signed-off-by: Dmitry Osipenko Signed-off-by: Viresh Kumar --- drivers/opp/core.c | 22 ++++++++++++++++++++++ include/linux/pm_opp.h | 10 ++++++++++ 2 files changed, 32 insertions(+) (limited to 'include') diff --git a/drivers/opp/core.c b/drivers/opp/core.c index b29f31146770..64a95aaa90eb 100644 --- a/drivers/opp/core.c +++ b/drivers/opp/core.c @@ -145,6 +145,28 @@ unsigned int dev_pm_opp_get_level(struct dev_pm_opp *opp) } EXPORT_SYMBOL_GPL(dev_pm_opp_get_level); +/** + * dev_pm_opp_get_required_pstate() - Gets the required performance state + * corresponding to an available opp + * @opp: opp for which performance state has to be returned for + * @index: index of the required opp + * + * Return: performance state read from device tree corresponding to the + * required opp, else return 0. + */ +unsigned int dev_pm_opp_get_required_pstate(struct dev_pm_opp *opp, + unsigned int index) +{ + if (IS_ERR_OR_NULL(opp) || !opp->available || + index >= opp->opp_table->required_opp_count) { + pr_err("%s: Invalid parameters\n", __func__); + return 0; + } + + return opp->required_opps[index]->pstate; +} +EXPORT_SYMBOL_GPL(dev_pm_opp_get_required_pstate); + /** * dev_pm_opp_is_turbo() - Returns if opp is turbo OPP or not * @opp: opp for which turbo mode is being verified diff --git a/include/linux/pm_opp.h b/include/linux/pm_opp.h index 2b3030cb2ed2..8f926815bad9 100644 --- a/include/linux/pm_opp.h +++ b/include/linux/pm_opp.h @@ -98,6 +98,9 @@ unsigned long dev_pm_opp_get_freq(struct dev_pm_opp *opp); unsigned int dev_pm_opp_get_level(struct dev_pm_opp *opp); +unsigned int dev_pm_opp_get_required_pstate(struct dev_pm_opp *opp, + unsigned int index); + bool dev_pm_opp_is_turbo(struct dev_pm_opp *opp); int dev_pm_opp_get_opp_count(struct device *dev); @@ -186,6 +189,13 @@ static inline unsigned int dev_pm_opp_get_level(struct dev_pm_opp *opp) return 0; } +static inline +unsigned int dev_pm_opp_get_required_pstate(struct dev_pm_opp *opp, + unsigned int index) +{ + return 0; +} + static inline bool dev_pm_opp_is_turbo(struct dev_pm_opp *opp) { return false; -- cgit v1.2.3 From ce8073d83f63a2cdcfc1b86d769456726faad51d Mon Sep 17 00:00:00 2001 From: Dmitry Osipenko Date: Thu, 21 Jan 2021 01:26:47 +0300 Subject: opp: Add dev_pm_opp_sync_regulators() Extend OPP API with dev_pm_opp_sync_regulators() function, which syncs voltage state of regulators. Tested-by: Peter Geis Tested-by: Nicolas Chauvet Tested-by: Matt Merhar Signed-off-by: Dmitry Osipenko [ Viresh: Added unlikely() ] Signed-off-by: Viresh Kumar --- drivers/opp/core.c | 41 +++++++++++++++++++++++++++++++++++++++++ include/linux/pm_opp.h | 6 ++++++ 2 files changed, 47 insertions(+) (limited to 'include') diff --git a/drivers/opp/core.c b/drivers/opp/core.c index 64a95aaa90eb..bf7cdab0ba64 100644 --- a/drivers/opp/core.c +++ b/drivers/opp/core.c @@ -2584,3 +2584,44 @@ void dev_pm_opp_remove_table(struct device *dev) dev_pm_opp_put_opp_table(opp_table); } EXPORT_SYMBOL_GPL(dev_pm_opp_remove_table); + +/** + * dev_pm_opp_sync_regulators() - Sync state of voltage regulators + * @dev: device for which we do this operation + * + * Sync voltage state of the OPP table regulators. + * + * Return: 0 on success or a negative error value. + */ +int dev_pm_opp_sync_regulators(struct device *dev) +{ + struct opp_table *opp_table; + struct regulator *reg; + int i, ret = 0; + + /* Device may not have OPP table */ + opp_table = _find_opp_table(dev); + if (IS_ERR(opp_table)) + return 0; + + /* Regulator may not be required for the device */ + if (unlikely(!opp_table->regulators)) + goto put_table; + + /* Nothing to sync if voltage wasn't changed */ + if (!opp_table->enabled) + goto put_table; + + for (i = 0; i < opp_table->regulator_count; i++) { + reg = opp_table->regulators[i]; + ret = regulator_sync_voltage(reg); + if (ret) + break; + } +put_table: + /* Drop reference taken by _find_opp_table() */ + dev_pm_opp_put_opp_table(opp_table); + + return ret; +} +EXPORT_SYMBOL_GPL(dev_pm_opp_sync_regulators); diff --git a/include/linux/pm_opp.h b/include/linux/pm_opp.h index 8f926815bad9..979b208bc4a8 100644 --- a/include/linux/pm_opp.h +++ b/include/linux/pm_opp.h @@ -161,6 +161,7 @@ int dev_pm_opp_set_sharing_cpus(struct device *cpu_dev, const struct cpumask *cp int dev_pm_opp_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask); void dev_pm_opp_remove_table(struct device *dev); void dev_pm_opp_cpumask_remove_table(const struct cpumask *cpumask); +int dev_pm_opp_sync_regulators(struct device *dev); #else static inline struct opp_table *dev_pm_opp_get_opp_table(struct device *dev) { @@ -384,6 +385,11 @@ static inline void dev_pm_opp_cpumask_remove_table(const struct cpumask *cpumask { } +static inline int dev_pm_opp_sync_regulators(struct device *dev) +{ + return -ENOTSUPP; +} + #endif /* CONFIG_PM_OPP */ #if defined(CONFIG_PM_OPP) && defined(CONFIG_OF) -- cgit v1.2.3 From 559fef0dfd91145b59b7c61061504f344ecf9ad8 Mon Sep 17 00:00:00 2001 From: Viresh Kumar Date: Wed, 27 Jan 2021 14:23:45 +0530 Subject: opp: Add dev_pm_opp_of_add_table_noclk() A few drivers have device's clk but they don't want the OPP core to handle that. Add a new helper for them, dev_pm_opp_of_add_table_noclk(). Signed-off-by: Viresh Kumar Tested-by: Dmitry Osipenko --- drivers/opp/of.c | 18 ++++++++++++++++++ include/linux/pm_opp.h | 6 ++++++ 2 files changed, 24 insertions(+) (limited to 'include') diff --git a/drivers/opp/of.c b/drivers/opp/of.c index d4b51b2e384f..a905497c75f8 100644 --- a/drivers/opp/of.c +++ b/drivers/opp/of.c @@ -1030,6 +1030,24 @@ int dev_pm_opp_of_add_table_indexed(struct device *dev, int index) } EXPORT_SYMBOL_GPL(dev_pm_opp_of_add_table_indexed); +/** + * dev_pm_opp_of_add_table_noclk() - Initialize indexed opp table from device + * tree without getting clk for device. + * @dev: device pointer used to lookup OPP table. + * @index: Index number. + * + * Register the initial OPP table with the OPP library for given device only + * using the "operating-points-v2" property. Do not try to get the clk for the + * device. + * + * Return: Refer to dev_pm_opp_of_add_table() for return values. + */ +int dev_pm_opp_of_add_table_noclk(struct device *dev, int index) +{ + return _of_add_table_indexed(dev, index, false); +} +EXPORT_SYMBOL_GPL(dev_pm_opp_of_add_table_noclk); + /* CPU device specific helpers */ /** diff --git a/include/linux/pm_opp.h b/include/linux/pm_opp.h index 979b208bc4a8..158158620dde 100644 --- a/include/linux/pm_opp.h +++ b/include/linux/pm_opp.h @@ -395,6 +395,7 @@ static inline int dev_pm_opp_sync_regulators(struct device *dev) #if defined(CONFIG_PM_OPP) && defined(CONFIG_OF) int dev_pm_opp_of_add_table(struct device *dev); int dev_pm_opp_of_add_table_indexed(struct device *dev, int index); +int dev_pm_opp_of_add_table_noclk(struct device *dev, int index); void dev_pm_opp_of_remove_table(struct device *dev); int dev_pm_opp_of_cpumask_add_table(const struct cpumask *cpumask); void dev_pm_opp_of_cpumask_remove_table(const struct cpumask *cpumask); @@ -419,6 +420,11 @@ static inline int dev_pm_opp_of_add_table_indexed(struct device *dev, int index) return -ENOTSUPP; } +static inline int dev_pm_opp_of_add_table_noclk(struct device *dev, int index) +{ + return -ENOTSUPP; +} + static inline void dev_pm_opp_of_remove_table(struct device *dev) { } -- cgit v1.2.3 From a3c47af6942dc8e07a4328913d0263a965786895 Mon Sep 17 00:00:00 2001 From: Dmitry Osipenko Date: Mon, 18 Jan 2021 03:55:20 +0300 Subject: opp: Add devm_pm_opp_register_set_opp_helper Add resource-managed version of dev_pm_opp_register_set_opp_helper(). Tested-by: Peter Geis Tested-by: Nicolas Chauvet Tested-by: Matt Merhar Signed-off-by: Dmitry Osipenko [ Viresh: Manually apply the patch and relocate the routines ] Signed-off-by: Viresh Kumar --- drivers/opp/core.c | 34 ++++++++++++++++++++++++++++++++++ include/linux/pm_opp.h | 8 ++++++++ 2 files changed, 42 insertions(+) (limited to 'include') diff --git a/drivers/opp/core.c b/drivers/opp/core.c index 52f4a64926e6..09069a564896 100644 --- a/drivers/opp/core.c +++ b/drivers/opp/core.c @@ -2106,6 +2106,40 @@ void dev_pm_opp_unregister_set_opp_helper(struct opp_table *opp_table) } EXPORT_SYMBOL_GPL(dev_pm_opp_unregister_set_opp_helper); +static void devm_pm_opp_unregister_set_opp_helper(void *data) +{ + dev_pm_opp_unregister_set_opp_helper(data); +} + +/** + * devm_pm_opp_register_set_opp_helper() - Register custom set OPP helper + * @dev: Device for which the helper is getting registered. + * @set_opp: Custom set OPP helper. + * + * This is a resource-managed version of dev_pm_opp_register_set_opp_helper(). + * + * Return: pointer to 'struct opp_table' on success and errorno otherwise. + */ +struct opp_table * +devm_pm_opp_register_set_opp_helper(struct device *dev, + int (*set_opp)(struct dev_pm_set_opp_data *data)) +{ + struct opp_table *opp_table; + int err; + + opp_table = dev_pm_opp_register_set_opp_helper(dev, set_opp); + if (IS_ERR(opp_table)) + return opp_table; + + err = devm_add_action_or_reset(dev, devm_pm_opp_unregister_set_opp_helper, + opp_table); + if (err) + return ERR_PTR(err); + + return opp_table; +} +EXPORT_SYMBOL_GPL(devm_pm_opp_register_set_opp_helper); + static void _opp_detach_genpd(struct opp_table *opp_table) { int index; diff --git a/include/linux/pm_opp.h b/include/linux/pm_opp.h index 158158620dde..473daf34160d 100644 --- a/include/linux/pm_opp.h +++ b/include/linux/pm_opp.h @@ -152,6 +152,7 @@ struct opp_table *dev_pm_opp_set_clkname(struct device *dev, const char * name); void dev_pm_opp_put_clkname(struct opp_table *opp_table); struct opp_table *dev_pm_opp_register_set_opp_helper(struct device *dev, int (*set_opp)(struct dev_pm_set_opp_data *data)); void dev_pm_opp_unregister_set_opp_helper(struct opp_table *opp_table); +struct opp_table *devm_pm_opp_register_set_opp_helper(struct device *dev, int (*set_opp)(struct dev_pm_set_opp_data *data)); struct opp_table *dev_pm_opp_attach_genpd(struct device *dev, const char **names, struct device ***virt_devs); void dev_pm_opp_detach_genpd(struct opp_table *opp_table); int dev_pm_opp_xlate_performance_state(struct opp_table *src_table, struct opp_table *dst_table, unsigned int pstate); @@ -324,6 +325,13 @@ static inline struct opp_table *dev_pm_opp_register_set_opp_helper(struct device static inline void dev_pm_opp_unregister_set_opp_helper(struct opp_table *opp_table) {} +static inline struct opp_table * +devm_pm_opp_register_set_opp_helper(struct device *dev, + int (*set_opp)(struct dev_pm_set_opp_data *data)) +{ + return ERR_PTR(-ENOTSUPP); +} + static inline struct opp_table *dev_pm_opp_set_prop_name(struct device *dev, const char *name) { return ERR_PTR(-ENOTSUPP); -- cgit v1.2.3 From b4b9e223eccaeec6e05d927c292d4425fd18f243 Mon Sep 17 00:00:00 2001 From: Dmitry Osipenko Date: Mon, 18 Jan 2021 03:55:21 +0300 Subject: opp: Add devm_pm_opp_attach_genpd Add resource-managed version of dev_pm_opp_attach_genpd(). Signed-off-by: Dmitry Osipenko [ Viresh: Manually apply the patch and relocate the routines ] Signed-off-by: Viresh Kumar --- drivers/opp/core.c | 36 ++++++++++++++++++++++++++++++++++++ include/linux/pm_opp.h | 7 +++++++ 2 files changed, 43 insertions(+) (limited to 'include') diff --git a/drivers/opp/core.c b/drivers/opp/core.c index 09069a564896..ce0ec5bde22a 100644 --- a/drivers/opp/core.c +++ b/drivers/opp/core.c @@ -2275,6 +2275,42 @@ void dev_pm_opp_detach_genpd(struct opp_table *opp_table) } EXPORT_SYMBOL_GPL(dev_pm_opp_detach_genpd); +static void devm_pm_opp_detach_genpd(void *data) +{ + dev_pm_opp_detach_genpd(data); +} + +/** + * devm_pm_opp_attach_genpd - Attach genpd(s) for the device and save virtual + * device pointer + * @dev: Consumer device for which the genpd is getting attached. + * @names: Null terminated array of pointers containing names of genpd to attach. + * @virt_devs: Pointer to return the array of virtual devices. + * + * This is a resource-managed version of dev_pm_opp_attach_genpd(). + * + * Return: pointer to 'struct opp_table' on success and errorno otherwise. + */ +struct opp_table * +devm_pm_opp_attach_genpd(struct device *dev, const char **names, + struct device ***virt_devs) +{ + struct opp_table *opp_table; + int err; + + opp_table = dev_pm_opp_attach_genpd(dev, names, virt_devs); + if (IS_ERR(opp_table)) + return opp_table; + + err = devm_add_action_or_reset(dev, devm_pm_opp_detach_genpd, + opp_table); + if (err) + return ERR_PTR(err); + + return opp_table; +} +EXPORT_SYMBOL_GPL(devm_pm_opp_attach_genpd); + /** * dev_pm_opp_xlate_performance_state() - Find required OPP's pstate for src_table. * @src_table: OPP table which has dst_table as one of its required OPP table. diff --git a/include/linux/pm_opp.h b/include/linux/pm_opp.h index 473daf34160d..a2c871799603 100644 --- a/include/linux/pm_opp.h +++ b/include/linux/pm_opp.h @@ -155,6 +155,7 @@ void dev_pm_opp_unregister_set_opp_helper(struct opp_table *opp_table); struct opp_table *devm_pm_opp_register_set_opp_helper(struct device *dev, int (*set_opp)(struct dev_pm_set_opp_data *data)); struct opp_table *dev_pm_opp_attach_genpd(struct device *dev, const char **names, struct device ***virt_devs); void dev_pm_opp_detach_genpd(struct opp_table *opp_table); +struct opp_table *devm_pm_opp_attach_genpd(struct device *dev, const char **names, struct device ***virt_devs); int dev_pm_opp_xlate_performance_state(struct opp_table *src_table, struct opp_table *dst_table, unsigned int pstate); int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq); int dev_pm_opp_set_bw(struct device *dev, struct dev_pm_opp *opp); @@ -360,6 +361,12 @@ static inline struct opp_table *dev_pm_opp_attach_genpd(struct device *dev, cons static inline void dev_pm_opp_detach_genpd(struct opp_table *opp_table) {} +static inline struct opp_table *devm_pm_opp_attach_genpd(struct device *dev, + const char **names, struct device ***virt_devs) +{ + return ERR_PTR(-ENOTSUPP); +} + static inline int dev_pm_opp_xlate_performance_state(struct opp_table *src_table, struct opp_table *dst_table, unsigned int pstate) { return -ENOTSUPP; -- cgit v1.2.3 From abbe348340c7df9e08fd7c24491c1be31ab65370 Mon Sep 17 00:00:00 2001 From: Viresh Kumar Date: Thu, 21 Jan 2021 12:15:36 +0530 Subject: opp: Implement dev_pm_opp_set_opp() The new helper dev_pm_opp_set_opp() can be used for configuring the devices for a particular OPP and can be used by different type of devices, even the ones which don't change frequency (like power domains). Signed-off-by: Viresh Kumar Tested-by: Dmitry Osipenko --- drivers/opp/core.c | 28 ++++++++++++++++++++++++++++ include/linux/pm_opp.h | 6 ++++++ 2 files changed, 34 insertions(+) (limited to 'include') diff --git a/drivers/opp/core.c b/drivers/opp/core.c index 2b5584ef0350..fac84d5a1d45 100644 --- a/drivers/opp/core.c +++ b/drivers/opp/core.c @@ -1136,6 +1136,34 @@ put_opp_table: } EXPORT_SYMBOL_GPL(dev_pm_opp_set_rate); +/** + * dev_pm_opp_set_opp() - Configure device for OPP + * @dev: device for which we do this operation + * @opp: OPP to set to + * + * This configures the device based on the properties of the OPP passed to this + * routine. + * + * Return: 0 on success, a negative error number otherwise. + */ +int dev_pm_opp_set_opp(struct device *dev, struct dev_pm_opp *opp) +{ + struct opp_table *opp_table; + int ret; + + opp_table = _find_opp_table(dev); + if (IS_ERR(opp_table)) { + dev_err(dev, "%s: device opp doesn't exist\n", __func__); + return PTR_ERR(opp_table); + } + + ret = _set_opp(dev, opp_table, opp, opp ? opp->rate : 0); + dev_pm_opp_put_opp_table(opp_table); + + return ret; +} +EXPORT_SYMBOL_GPL(dev_pm_opp_set_opp); + /* OPP-dev Helpers */ static void _remove_opp_dev(struct opp_device *opp_dev, struct opp_table *opp_table) diff --git a/include/linux/pm_opp.h b/include/linux/pm_opp.h index a2c871799603..7b1d47ab3fb3 100644 --- a/include/linux/pm_opp.h +++ b/include/linux/pm_opp.h @@ -158,6 +158,7 @@ void dev_pm_opp_detach_genpd(struct opp_table *opp_table); struct opp_table *devm_pm_opp_attach_genpd(struct device *dev, const char **names, struct device ***virt_devs); int dev_pm_opp_xlate_performance_state(struct opp_table *src_table, struct opp_table *dst_table, unsigned int pstate); int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq); +int dev_pm_opp_set_opp(struct device *dev, struct dev_pm_opp *opp); int dev_pm_opp_set_bw(struct device *dev, struct dev_pm_opp *opp); int dev_pm_opp_set_sharing_cpus(struct device *cpu_dev, const struct cpumask *cpumask); int dev_pm_opp_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask); @@ -377,6 +378,11 @@ static inline int dev_pm_opp_set_rate(struct device *dev, unsigned long target_f return -ENOTSUPP; } +static inline int dev_pm_opp_set_opp(struct device *dev, struct dev_pm_opp *opp) +{ + return -ENOTSUPP; +} + static inline int dev_pm_opp_set_bw(struct device *dev, struct dev_pm_opp *opp) { return -EOPNOTSUPP; -- cgit v1.2.3 From 240ae50e23061cd1fe1937daab195c17226ffd2e Mon Sep 17 00:00:00 2001 From: Viresh Kumar Date: Thu, 21 Jan 2021 15:27:55 +0530 Subject: opp: Remove dev_pm_opp_set_bw() All the users have migrated to dev_pm_opp_set_opp() now, get rid of the duplicate API, dev_pm_opp_set_bw(), which only performs a part of the new API. While at it, remove the unnecessary parameter to _set_opp_bw(). Signed-off-by: Viresh Kumar Tested-by: Dmitry Osipenko --- drivers/opp/core.c | 41 +++++------------------------------------ include/linux/pm_opp.h | 6 ------ 2 files changed, 5 insertions(+), 42 deletions(-) (limited to 'include') diff --git a/drivers/opp/core.c b/drivers/opp/core.c index fac84d5a1d45..6958a5cd2fd8 100644 --- a/drivers/opp/core.c +++ b/drivers/opp/core.c @@ -798,7 +798,7 @@ restore_voltage: } static int _set_opp_bw(const struct opp_table *opp_table, - struct dev_pm_opp *opp, struct device *dev, bool remove) + struct dev_pm_opp *opp, struct device *dev) { u32 avg, peak; int i, ret; @@ -807,7 +807,7 @@ static int _set_opp_bw(const struct opp_table *opp_table, return 0; for (i = 0; i < opp_table->path_count; i++) { - if (remove) { + if (!opp) { avg = 0; peak = 0; } else { @@ -817,7 +817,7 @@ static int _set_opp_bw(const struct opp_table *opp_table, ret = icc_set_bw(opp_table->paths[i], avg, peak); if (ret) { dev_err(dev, "Failed to %s bandwidth[%d]: %d\n", - remove ? "remove" : "set", i, ret); + opp ? "set" : "remove", i, ret); return ret; } } @@ -917,37 +917,6 @@ static int _set_required_opps(struct device *dev, return ret; } -/** - * dev_pm_opp_set_bw() - sets bandwidth levels corresponding to an opp - * @dev: device for which we do this operation - * @opp: opp based on which the bandwidth levels are to be configured - * - * This configures the bandwidth to the levels specified by the OPP. However - * if the OPP specified is NULL the bandwidth levels are cleared out. - * - * Return: 0 on success or a negative error value. - */ -int dev_pm_opp_set_bw(struct device *dev, struct dev_pm_opp *opp) -{ - struct opp_table *opp_table; - int ret; - - opp_table = _find_opp_table(dev); - if (IS_ERR(opp_table)) { - dev_err(dev, "%s: device opp table doesn't exist\n", __func__); - return PTR_ERR(opp_table); - } - - if (opp) - ret = _set_opp_bw(opp_table, opp, dev, false); - else - ret = _set_opp_bw(opp_table, NULL, dev, true); - - dev_pm_opp_put_opp_table(opp_table); - return ret; -} -EXPORT_SYMBOL_GPL(dev_pm_opp_set_bw); - static void _find_current_opp(struct device *dev, struct opp_table *opp_table) { struct dev_pm_opp *opp = ERR_PTR(-ENODEV); @@ -988,7 +957,7 @@ static int _disable_opp_table(struct device *dev, struct opp_table *opp_table) if (!_get_opp_count(opp_table)) return 0; - ret = _set_opp_bw(opp_table, NULL, dev, true); + ret = _set_opp_bw(opp_table, NULL, dev); if (ret) return ret; @@ -1056,7 +1025,7 @@ static int _set_opp(struct device *dev, struct opp_table *opp_table, } if (!ret) { - ret = _set_opp_bw(opp_table, opp, dev, false); + ret = _set_opp_bw(opp_table, opp, dev); if (!ret) { opp_table->enabled = true; dev_pm_opp_put(old_opp); diff --git a/include/linux/pm_opp.h b/include/linux/pm_opp.h index 7b1d47ab3fb3..25e47ab937b9 100644 --- a/include/linux/pm_opp.h +++ b/include/linux/pm_opp.h @@ -159,7 +159,6 @@ struct opp_table *devm_pm_opp_attach_genpd(struct device *dev, const char **name int dev_pm_opp_xlate_performance_state(struct opp_table *src_table, struct opp_table *dst_table, unsigned int pstate); int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq); int dev_pm_opp_set_opp(struct device *dev, struct dev_pm_opp *opp); -int dev_pm_opp_set_bw(struct device *dev, struct dev_pm_opp *opp); int dev_pm_opp_set_sharing_cpus(struct device *cpu_dev, const struct cpumask *cpumask); int dev_pm_opp_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask); void dev_pm_opp_remove_table(struct device *dev); @@ -383,11 +382,6 @@ static inline int dev_pm_opp_set_opp(struct device *dev, struct dev_pm_opp *opp) return -ENOTSUPP; } -static inline int dev_pm_opp_set_bw(struct device *dev, struct dev_pm_opp *opp) -{ - return -EOPNOTSUPP; -} - static inline int dev_pm_opp_set_sharing_cpus(struct device *cpu_dev, const struct cpumask *cpumask) { return -ENOTSUPP; -- cgit v1.2.3 From f3988bc5d58b768c5cf0dadf5f0e49f7176432df Mon Sep 17 00:00:00 2001 From: Viresh Kumar Date: Mon, 1 Feb 2021 10:35:07 +0530 Subject: opp: Fix "foo * bar" should be "foo *bar" Fix checkpatch warning: ERROR: "foo * bar" should be "foo *bar". Signed-off-by: Viresh Kumar --- include/linux/pm_opp.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/include/linux/pm_opp.h b/include/linux/pm_opp.h index 25e47ab937b9..c6c7d73eb015 100644 --- a/include/linux/pm_opp.h +++ b/include/linux/pm_opp.h @@ -148,7 +148,7 @@ struct opp_table *dev_pm_opp_set_prop_name(struct device *dev, const char *name) void dev_pm_opp_put_prop_name(struct opp_table *opp_table); struct opp_table *dev_pm_opp_set_regulators(struct device *dev, const char * const names[], unsigned int count); void dev_pm_opp_put_regulators(struct opp_table *opp_table); -struct opp_table *dev_pm_opp_set_clkname(struct device *dev, const char * name); +struct opp_table *dev_pm_opp_set_clkname(struct device *dev, const char *name); void dev_pm_opp_put_clkname(struct opp_table *opp_table); struct opp_table *dev_pm_opp_register_set_opp_helper(struct device *dev, int (*set_opp)(struct dev_pm_set_opp_data *data)); void dev_pm_opp_unregister_set_opp_helper(struct opp_table *opp_table); @@ -347,7 +347,7 @@ static inline struct opp_table *dev_pm_opp_set_regulators(struct device *dev, co static inline void dev_pm_opp_put_regulators(struct opp_table *opp_table) {} -static inline struct opp_table *dev_pm_opp_set_clkname(struct device *dev, const char * name) +static inline struct opp_table *dev_pm_opp_set_clkname(struct device *dev, const char *name) { return ERR_PTR(-ENOTSUPP); } -- cgit v1.2.3 From 1d614920318b914f86c1fec2adec06ad2f7c3f55 Mon Sep 17 00:00:00 2001 From: Viresh Kumar Date: Mon, 1 Feb 2021 10:48:54 +0530 Subject: opp: Replace ENOTSUPP with EOPNOTSUPP Checkpatch gives following warning for new patches, and the new patches normally follow the existing standards for such stuff. Lets fix it properly. WARNING: ENOTSUPP is not a SUSV4 error code, prefer EOPNOTSUPP. Signed-off-by: Viresh Kumar --- include/linux/pm_opp.h | 64 +++++++++++++++++++++++++------------------------- 1 file changed, 32 insertions(+), 32 deletions(-) (limited to 'include') diff --git a/include/linux/pm_opp.h b/include/linux/pm_opp.h index c6c7d73eb015..ab1d15ce559d 100644 --- a/include/linux/pm_opp.h +++ b/include/linux/pm_opp.h @@ -167,12 +167,12 @@ int dev_pm_opp_sync_regulators(struct device *dev); #else static inline struct opp_table *dev_pm_opp_get_opp_table(struct device *dev) { - return ERR_PTR(-ENOTSUPP); + return ERR_PTR(-EOPNOTSUPP); } static inline struct opp_table *dev_pm_opp_get_opp_table_indexed(struct device *dev, int index) { - return ERR_PTR(-ENOTSUPP); + return ERR_PTR(-EOPNOTSUPP); } static inline void dev_pm_opp_put_opp_table(struct opp_table *opp_table) {} @@ -232,37 +232,37 @@ static inline unsigned long dev_pm_opp_get_suspend_opp_freq(struct device *dev) static inline struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev, unsigned long freq, bool available) { - return ERR_PTR(-ENOTSUPP); + return ERR_PTR(-EOPNOTSUPP); } static inline struct dev_pm_opp *dev_pm_opp_find_level_exact(struct device *dev, unsigned int level) { - return ERR_PTR(-ENOTSUPP); + return ERR_PTR(-EOPNOTSUPP); } static inline struct dev_pm_opp *dev_pm_opp_find_level_ceil(struct device *dev, unsigned int *level) { - return ERR_PTR(-ENOTSUPP); + return ERR_PTR(-EOPNOTSUPP); } static inline struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev, unsigned long *freq) { - return ERR_PTR(-ENOTSUPP); + return ERR_PTR(-EOPNOTSUPP); } static inline struct dev_pm_opp *dev_pm_opp_find_freq_ceil_by_volt(struct device *dev, unsigned long u_volt) { - return ERR_PTR(-ENOTSUPP); + return ERR_PTR(-EOPNOTSUPP); } static inline struct dev_pm_opp *dev_pm_opp_find_freq_ceil(struct device *dev, unsigned long *freq) { - return ERR_PTR(-ENOTSUPP); + return ERR_PTR(-EOPNOTSUPP); } static inline void dev_pm_opp_put(struct dev_pm_opp *opp) {} @@ -270,7 +270,7 @@ static inline void dev_pm_opp_put(struct dev_pm_opp *opp) {} static inline int dev_pm_opp_add(struct device *dev, unsigned long freq, unsigned long u_volt) { - return -ENOTSUPP; + return -EOPNOTSUPP; } static inline void dev_pm_opp_remove(struct device *dev, unsigned long freq) @@ -301,19 +301,19 @@ static inline int dev_pm_opp_disable(struct device *dev, unsigned long freq) static inline int dev_pm_opp_register_notifier(struct device *dev, struct notifier_block *nb) { - return -ENOTSUPP; + return -EOPNOTSUPP; } static inline int dev_pm_opp_unregister_notifier(struct device *dev, struct notifier_block *nb) { - return -ENOTSUPP; + return -EOPNOTSUPP; } static inline struct opp_table *dev_pm_opp_set_supported_hw(struct device *dev, const u32 *versions, unsigned int count) { - return ERR_PTR(-ENOTSUPP); + return ERR_PTR(-EOPNOTSUPP); } static inline void dev_pm_opp_put_supported_hw(struct opp_table *opp_table) {} @@ -321,7 +321,7 @@ static inline void dev_pm_opp_put_supported_hw(struct opp_table *opp_table) {} static inline struct opp_table *dev_pm_opp_register_set_opp_helper(struct device *dev, int (*set_opp)(struct dev_pm_set_opp_data *data)) { - return ERR_PTR(-ENOTSUPP); + return ERR_PTR(-EOPNOTSUPP); } static inline void dev_pm_opp_unregister_set_opp_helper(struct opp_table *opp_table) {} @@ -330,33 +330,33 @@ static inline struct opp_table * devm_pm_opp_register_set_opp_helper(struct device *dev, int (*set_opp)(struct dev_pm_set_opp_data *data)) { - return ERR_PTR(-ENOTSUPP); + return ERR_PTR(-EOPNOTSUPP); } static inline struct opp_table *dev_pm_opp_set_prop_name(struct device *dev, const char *name) { - return ERR_PTR(-ENOTSUPP); + return ERR_PTR(-EOPNOTSUPP); } static inline void dev_pm_opp_put_prop_name(struct opp_table *opp_table) {} static inline struct opp_table *dev_pm_opp_set_regulators(struct device *dev, const char * const names[], unsigned int count) { - return ERR_PTR(-ENOTSUPP); + return ERR_PTR(-EOPNOTSUPP); } static inline void dev_pm_opp_put_regulators(struct opp_table *opp_table) {} static inline struct opp_table *dev_pm_opp_set_clkname(struct device *dev, const char *name) { - return ERR_PTR(-ENOTSUPP); + return ERR_PTR(-EOPNOTSUPP); } static inline void dev_pm_opp_put_clkname(struct opp_table *opp_table) {} static inline struct opp_table *dev_pm_opp_attach_genpd(struct device *dev, const char **names, struct device ***virt_devs) { - return ERR_PTR(-ENOTSUPP); + return ERR_PTR(-EOPNOTSUPP); } static inline void dev_pm_opp_detach_genpd(struct opp_table *opp_table) {} @@ -364,27 +364,27 @@ static inline void dev_pm_opp_detach_genpd(struct opp_table *opp_table) {} static inline struct opp_table *devm_pm_opp_attach_genpd(struct device *dev, const char **names, struct device ***virt_devs) { - return ERR_PTR(-ENOTSUPP); + return ERR_PTR(-EOPNOTSUPP); } static inline int dev_pm_opp_xlate_performance_state(struct opp_table *src_table, struct opp_table *dst_table, unsigned int pstate) { - return -ENOTSUPP; + return -EOPNOTSUPP; } static inline int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq) { - return -ENOTSUPP; + return -EOPNOTSUPP; } static inline int dev_pm_opp_set_opp(struct device *dev, struct dev_pm_opp *opp) { - return -ENOTSUPP; + return -EOPNOTSUPP; } static inline int dev_pm_opp_set_sharing_cpus(struct device *cpu_dev, const struct cpumask *cpumask) { - return -ENOTSUPP; + return -EOPNOTSUPP; } static inline int dev_pm_opp_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask) @@ -402,7 +402,7 @@ static inline void dev_pm_opp_cpumask_remove_table(const struct cpumask *cpumask static inline int dev_pm_opp_sync_regulators(struct device *dev) { - return -ENOTSUPP; + return -EOPNOTSUPP; } #endif /* CONFIG_PM_OPP */ @@ -427,17 +427,17 @@ static inline void dev_pm_opp_of_unregister_em(struct device *dev) #else static inline int dev_pm_opp_of_add_table(struct device *dev) { - return -ENOTSUPP; + return -EOPNOTSUPP; } static inline int dev_pm_opp_of_add_table_indexed(struct device *dev, int index) { - return -ENOTSUPP; + return -EOPNOTSUPP; } static inline int dev_pm_opp_of_add_table_noclk(struct device *dev, int index) { - return -ENOTSUPP; + return -EOPNOTSUPP; } static inline void dev_pm_opp_of_remove_table(struct device *dev) @@ -446,7 +446,7 @@ static inline void dev_pm_opp_of_remove_table(struct device *dev) static inline int dev_pm_opp_of_cpumask_add_table(const struct cpumask *cpumask) { - return -ENOTSUPP; + return -EOPNOTSUPP; } static inline void dev_pm_opp_of_cpumask_remove_table(const struct cpumask *cpumask) @@ -455,7 +455,7 @@ static inline void dev_pm_opp_of_cpumask_remove_table(const struct cpumask *cpum static inline int dev_pm_opp_of_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask) { - return -ENOTSUPP; + return -EOPNOTSUPP; } static inline struct device_node *dev_pm_opp_of_get_opp_desc_node(struct device *dev) @@ -471,7 +471,7 @@ static inline struct device_node *dev_pm_opp_get_of_node(struct dev_pm_opp *opp) static inline int dev_pm_opp_of_register_em(struct device *dev, struct cpumask *cpus) { - return -ENOTSUPP; + return -EOPNOTSUPP; } static inline void dev_pm_opp_of_unregister_em(struct device *dev) @@ -480,12 +480,12 @@ static inline void dev_pm_opp_of_unregister_em(struct device *dev) static inline int of_get_required_opp_performance_state(struct device_node *np, int index) { - return -ENOTSUPP; + return -EOPNOTSUPP; } static inline int dev_pm_opp_of_find_icc_paths(struct device *dev, struct opp_table *opp_table) { - return -ENOTSUPP; + return -EOPNOTSUPP; } #endif -- cgit v1.2.3 From 7d8658ef65a4f891d0cff6340fa717b378384642 Mon Sep 17 00:00:00 2001 From: Saravana Kannan Date: Thu, 4 Feb 2021 16:14:22 +0800 Subject: OPP: Add function to look up required OPP's for a given OPP Add a function that allows looking up required OPPs given a source OPP table, destination OPP table and the source OPP. Signed-off-by: Saravana Kannan Signed-off-by: Hsin-Yi Wang [ Viresh: Rearranged code, fixed return errors ] Signed-off-by: Viresh Kumar --- drivers/opp/core.c | 55 ++++++++++++++++++++++++++++++++++++++++++++++++++ include/linux/pm_opp.h | 7 +++++++ 2 files changed, 62 insertions(+) (limited to 'include') diff --git a/drivers/opp/core.c b/drivers/opp/core.c index dc95d29e94c1..c3f3d9249cc5 100644 --- a/drivers/opp/core.c +++ b/drivers/opp/core.c @@ -2398,6 +2398,61 @@ devm_pm_opp_attach_genpd(struct device *dev, const char **names, } EXPORT_SYMBOL_GPL(devm_pm_opp_attach_genpd); +/** + * dev_pm_opp_xlate_required_opp() - Find required OPP for @src_table OPP. + * @src_table: OPP table which has @dst_table as one of its required OPP table. + * @dst_table: Required OPP table of the @src_table. + * @src_opp: OPP from the @src_table. + * + * This function returns the OPP (present in @dst_table) pointed out by the + * "required-opps" property of the @src_opp (present in @src_table). + * + * The callers are required to call dev_pm_opp_put() for the returned OPP after + * use. + * + * Return: pointer to 'struct dev_pm_opp' on success and errorno otherwise. + */ +struct dev_pm_opp *dev_pm_opp_xlate_required_opp(struct opp_table *src_table, + struct opp_table *dst_table, + struct dev_pm_opp *src_opp) +{ + struct dev_pm_opp *opp, *dest_opp = ERR_PTR(-ENODEV); + int i; + + if (!src_table || !dst_table || !src_opp || + !src_table->required_opp_tables) + return ERR_PTR(-EINVAL); + + /* required-opps not fully initialized yet */ + if (lazy_linking_pending(src_table)) + return ERR_PTR(-EBUSY); + + for (i = 0; i < src_table->required_opp_count; i++) { + if (src_table->required_opp_tables[i] == dst_table) { + mutex_lock(&src_table->lock); + + list_for_each_entry(opp, &src_table->opp_list, node) { + if (opp == src_opp) { + dest_opp = opp->required_opps[i]; + dev_pm_opp_get(dest_opp); + break; + } + } + + mutex_unlock(&src_table->lock); + break; + } + } + + if (IS_ERR(dest_opp)) { + pr_err("%s: Couldn't find matching OPP (%p: %p)\n", __func__, + src_table, dst_table); + } + + return dest_opp; +} +EXPORT_SYMBOL_GPL(dev_pm_opp_xlate_required_opp); + /** * dev_pm_opp_xlate_performance_state() - Find required OPP's pstate for src_table. * @src_table: OPP table which has dst_table as one of its required OPP table. diff --git a/include/linux/pm_opp.h b/include/linux/pm_opp.h index ab1d15ce559d..c0371efa4a0f 100644 --- a/include/linux/pm_opp.h +++ b/include/linux/pm_opp.h @@ -156,6 +156,7 @@ struct opp_table *devm_pm_opp_register_set_opp_helper(struct device *dev, int (* struct opp_table *dev_pm_opp_attach_genpd(struct device *dev, const char **names, struct device ***virt_devs); void dev_pm_opp_detach_genpd(struct opp_table *opp_table); struct opp_table *devm_pm_opp_attach_genpd(struct device *dev, const char **names, struct device ***virt_devs); +struct dev_pm_opp *dev_pm_opp_xlate_required_opp(struct opp_table *src_table, struct opp_table *dst_table, struct dev_pm_opp *src_opp); int dev_pm_opp_xlate_performance_state(struct opp_table *src_table, struct opp_table *dst_table, unsigned int pstate); int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq); int dev_pm_opp_set_opp(struct device *dev, struct dev_pm_opp *opp); @@ -367,6 +368,12 @@ static inline struct opp_table *devm_pm_opp_attach_genpd(struct device *dev, return ERR_PTR(-EOPNOTSUPP); } +static inline struct dev_pm_opp *dev_pm_opp_xlate_required_opp(struct opp_table *src_table, + struct opp_table *dst_table, struct dev_pm_opp *src_opp) +{ + return ERR_PTR(-EOPNOTSUPP); +} + static inline int dev_pm_opp_xlate_performance_state(struct opp_table *src_table, struct opp_table *dst_table, unsigned int pstate) { return -EOPNOTSUPP; -- cgit v1.2.3 From 26f9c7cc42a6dc036edf871544fd0e6b3a0601c1 Mon Sep 17 00:00:00 2001 From: Saravana Kannan Date: Thu, 4 Feb 2021 16:14:23 +0800 Subject: PM / devfreq: Cache OPP table reference in devfreq The OPP table can be used often in devfreq. Trying to get it each time can be expensive, so cache it in the devfreq struct. Signed-off-by: Saravana Kannan Acked-by: MyungJoo Ham Acked-by: Chanwoo Choi Signed-off-by: Hsin-Yi Wang [ Viresh: Added a blank line ] Signed-off-by: Viresh Kumar --- drivers/devfreq/devfreq.c | 7 +++++++ include/linux/devfreq.h | 2 ++ 2 files changed, 9 insertions(+) (limited to 'include') diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c index 6aa10de792b3..cefe84a10824 100644 --- a/drivers/devfreq/devfreq.c +++ b/drivers/devfreq/devfreq.c @@ -757,6 +757,9 @@ static void devfreq_dev_release(struct device *dev) if (devfreq->profile->exit) devfreq->profile->exit(devfreq->dev.parent); + if (devfreq->opp_table) + dev_pm_opp_put_opp_table(devfreq->opp_table); + mutex_destroy(&devfreq->lock); kfree(devfreq); } @@ -844,6 +847,10 @@ struct devfreq *devfreq_add_device(struct device *dev, } devfreq->suspend_freq = dev_pm_opp_get_suspend_opp_freq(dev); + devfreq->opp_table = dev_pm_opp_get_opp_table(dev); + if (IS_ERR(devfreq->opp_table)) + devfreq->opp_table = NULL; + atomic_set(&devfreq->suspend_count, 0); dev_set_name(&devfreq->dev, "%s", dev_name(dev)); diff --git a/include/linux/devfreq.h b/include/linux/devfreq.h index b6d3bae1c74d..26ea0850be9b 100644 --- a/include/linux/devfreq.h +++ b/include/linux/devfreq.h @@ -137,6 +137,7 @@ struct devfreq_stats { * using devfreq. * @profile: device-specific devfreq profile * @governor: method how to choose frequency based on the usage. + * @opp_table: Reference to OPP table of dev.parent, if one exists. * @nb: notifier block used to notify devfreq object that it should * reevaluate operable frequencies. Devfreq users may use * devfreq.nb to the corresponding register notifier call chain. @@ -173,6 +174,7 @@ struct devfreq { struct device dev; struct devfreq_dev_profile *profile; const struct devfreq_governor *governor; + struct opp_table *opp_table; struct notifier_block nb; struct delayed_work work; -- cgit v1.2.3 From 5ae4a4b45d4396aa7f7c008c4ae9eca981d43f8c Mon Sep 17 00:00:00 2001 From: Viresh Kumar Date: Tue, 2 Feb 2021 10:25:11 +0530 Subject: cpufreq: Remove CPUFREQ_STICKY flag During cpufreq driver's registration, if the ->init() callback for all the CPUs fail then there is not much point in keeping the driver around as it will only account for more of unnecessary noise, for example cpufreq core will try to suspend/resume the driver which never got registered properly. The removal of such a driver is avoided if the driver carries the CPUFREQ_STICKY flag. This was added way back [1] in 2004 and perhaps no one should ever need it now. A lot of drivers do set this flag, probably because they just copied it from other drivers. This was added earlier for some platforms [2] because their cpufreq drivers were getting registered before the CPUs were registered with subsys framework. And hence they used to fail. The same isn't true anymore though. The current code flow in the kernel is: start_kernel() -> kernel_init() -> kernel_init_freeable() -> do_basic_setup() -> driver_init() -> cpu_dev_init() -> subsys_system_register() //For CPUs -> do_initcalls() -> cpufreq_register_driver() Clearly, the CPUs will always get registered with subsys framework before any cpufreq driver can get probed. Remove the flag and update the relevant drivers. Link: https://git.kernel.org/pub/scm/linux/kernel/git/tglx/history.git/commit/include/linux/cpufreq.h?id=7cc9f0d9a1ab04cedc60d64fd8dcf7df224a3b4d # [1] Link: https://git.kernel.org/pub/scm/linux/kernel/git/tglx/history.git/commit/arch/arm/mach-sa1100/cpu-sa1100.c?id=f59d3bbe35f6268d729f51be82af8325d62f20f5 # [2] Signed-off-by: Viresh Kumar Signed-off-by: Rafael J. Wysocki --- drivers/cpufreq/cpufreq-dt.c | 2 +- drivers/cpufreq/cpufreq.c | 3 +-- drivers/cpufreq/davinci-cpufreq.c | 2 +- drivers/cpufreq/loongson1-cpufreq.c | 2 +- drivers/cpufreq/mediatek-cpufreq.c | 2 +- drivers/cpufreq/omap-cpufreq.c | 2 +- drivers/cpufreq/qcom-cpufreq-hw.c | 2 +- drivers/cpufreq/s3c24xx-cpufreq.c | 2 +- drivers/cpufreq/s5pv210-cpufreq.c | 2 +- drivers/cpufreq/sa1100-cpufreq.c | 2 +- drivers/cpufreq/sa1110-cpufreq.c | 2 +- drivers/cpufreq/scmi-cpufreq.c | 2 +- drivers/cpufreq/scpi-cpufreq.c | 2 +- drivers/cpufreq/spear-cpufreq.c | 2 +- drivers/cpufreq/tegra186-cpufreq.c | 2 +- drivers/cpufreq/tegra194-cpufreq.c | 3 +-- drivers/cpufreq/vexpress-spc-cpufreq.c | 3 +-- include/linux/cpufreq.h | 17 +++++++---------- 18 files changed, 24 insertions(+), 30 deletions(-) (limited to 'include') diff --git a/drivers/cpufreq/cpufreq-dt.c b/drivers/cpufreq/cpufreq-dt.c index ad4234518ef6..b1e1bdc63b01 100644 --- a/drivers/cpufreq/cpufreq-dt.c +++ b/drivers/cpufreq/cpufreq-dt.c @@ -175,7 +175,7 @@ static int cpufreq_exit(struct cpufreq_policy *policy) } static struct cpufreq_driver dt_cpufreq_driver = { - .flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK | + .flags = CPUFREQ_NEED_INITIAL_FREQ_CHECK | CPUFREQ_IS_COOLING_DEV, .verify = cpufreq_generic_frequency_table_verify, .target_index = set_target, diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index d0a3525ce27f..7d0ae968def7 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c @@ -2810,8 +2810,7 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data) if (ret) goto err_boost_unreg; - if (!(cpufreq_driver->flags & CPUFREQ_STICKY) && - list_empty(&cpufreq_policy_list)) { + if (unlikely(list_empty(&cpufreq_policy_list))) { /* if all ->init() calls failed, unregister */ ret = -ENODEV; pr_debug("%s: No CPU initialized for driver %s\n", __func__, diff --git a/drivers/cpufreq/davinci-cpufreq.c b/drivers/cpufreq/davinci-cpufreq.c index 91f477a6cbc4..9e97f60f8199 100644 --- a/drivers/cpufreq/davinci-cpufreq.c +++ b/drivers/cpufreq/davinci-cpufreq.c @@ -95,7 +95,7 @@ static int davinci_cpu_init(struct cpufreq_policy *policy) } static struct cpufreq_driver davinci_driver = { - .flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK, + .flags = CPUFREQ_NEED_INITIAL_FREQ_CHECK, .verify = cpufreq_generic_frequency_table_verify, .target_index = davinci_target, .get = cpufreq_generic_get, diff --git a/drivers/cpufreq/loongson1-cpufreq.c b/drivers/cpufreq/loongson1-cpufreq.c index 86f612593e49..fb72d709db56 100644 --- a/drivers/cpufreq/loongson1-cpufreq.c +++ b/drivers/cpufreq/loongson1-cpufreq.c @@ -116,7 +116,7 @@ static int ls1x_cpufreq_exit(struct cpufreq_policy *policy) static struct cpufreq_driver ls1x_cpufreq_driver = { .name = "cpufreq-ls1x", - .flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK, + .flags = CPUFREQ_NEED_INITIAL_FREQ_CHECK, .verify = cpufreq_generic_frequency_table_verify, .target_index = ls1x_cpufreq_target, .get = cpufreq_generic_get, diff --git a/drivers/cpufreq/mediatek-cpufreq.c b/drivers/cpufreq/mediatek-cpufreq.c index 022e3e966e71..f2e491b25b07 100644 --- a/drivers/cpufreq/mediatek-cpufreq.c +++ b/drivers/cpufreq/mediatek-cpufreq.c @@ -463,7 +463,7 @@ static int mtk_cpufreq_exit(struct cpufreq_policy *policy) } static struct cpufreq_driver mtk_cpufreq_driver = { - .flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK | + .flags = CPUFREQ_NEED_INITIAL_FREQ_CHECK | CPUFREQ_HAVE_GOVERNOR_PER_POLICY | CPUFREQ_IS_COOLING_DEV, .verify = cpufreq_generic_frequency_table_verify, diff --git a/drivers/cpufreq/omap-cpufreq.c b/drivers/cpufreq/omap-cpufreq.c index 3694bb030df3..e035ee216b0f 100644 --- a/drivers/cpufreq/omap-cpufreq.c +++ b/drivers/cpufreq/omap-cpufreq.c @@ -144,7 +144,7 @@ static int omap_cpu_exit(struct cpufreq_policy *policy) } static struct cpufreq_driver omap_driver = { - .flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK, + .flags = CPUFREQ_NEED_INITIAL_FREQ_CHECK, .verify = cpufreq_generic_frequency_table_verify, .target_index = omap_target, .get = cpufreq_generic_get, diff --git a/drivers/cpufreq/qcom-cpufreq-hw.c b/drivers/cpufreq/qcom-cpufreq-hw.c index 9ed5341dc515..2a3b4f44488b 100644 --- a/drivers/cpufreq/qcom-cpufreq-hw.c +++ b/drivers/cpufreq/qcom-cpufreq-hw.c @@ -374,7 +374,7 @@ static struct freq_attr *qcom_cpufreq_hw_attr[] = { }; static struct cpufreq_driver cpufreq_qcom_hw_driver = { - .flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK | + .flags = CPUFREQ_NEED_INITIAL_FREQ_CHECK | CPUFREQ_HAVE_GOVERNOR_PER_POLICY | CPUFREQ_IS_COOLING_DEV, .verify = cpufreq_generic_frequency_table_verify, diff --git a/drivers/cpufreq/s3c24xx-cpufreq.c b/drivers/cpufreq/s3c24xx-cpufreq.c index 37efc0dc3f91..7380c32b238e 100644 --- a/drivers/cpufreq/s3c24xx-cpufreq.c +++ b/drivers/cpufreq/s3c24xx-cpufreq.c @@ -420,7 +420,7 @@ static int s3c_cpufreq_resume(struct cpufreq_policy *policy) #endif static struct cpufreq_driver s3c24xx_driver = { - .flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK, + .flags = CPUFREQ_NEED_INITIAL_FREQ_CHECK, .target = s3c_cpufreq_target, .get = cpufreq_generic_get, .init = s3c_cpufreq_init, diff --git a/drivers/cpufreq/s5pv210-cpufreq.c b/drivers/cpufreq/s5pv210-cpufreq.c index bed496cf8d24..69786e5bbf05 100644 --- a/drivers/cpufreq/s5pv210-cpufreq.c +++ b/drivers/cpufreq/s5pv210-cpufreq.c @@ -574,7 +574,7 @@ static int s5pv210_cpufreq_reboot_notifier_event(struct notifier_block *this, } static struct cpufreq_driver s5pv210_driver = { - .flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK, + .flags = CPUFREQ_NEED_INITIAL_FREQ_CHECK, .verify = cpufreq_generic_frequency_table_verify, .target_index = s5pv210_target, .get = cpufreq_generic_get, diff --git a/drivers/cpufreq/sa1100-cpufreq.c b/drivers/cpufreq/sa1100-cpufreq.c index 5c075ef6adc0..252b9fc26124 100644 --- a/drivers/cpufreq/sa1100-cpufreq.c +++ b/drivers/cpufreq/sa1100-cpufreq.c @@ -186,7 +186,7 @@ static int __init sa1100_cpu_init(struct cpufreq_policy *policy) } static struct cpufreq_driver sa1100_driver __refdata = { - .flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK | + .flags = CPUFREQ_NEED_INITIAL_FREQ_CHECK | CPUFREQ_NO_AUTO_DYNAMIC_SWITCHING, .verify = cpufreq_generic_frequency_table_verify, .target_index = sa1100_target, diff --git a/drivers/cpufreq/sa1110-cpufreq.c b/drivers/cpufreq/sa1110-cpufreq.c index d9d04d935b3a..1a83c8678a63 100644 --- a/drivers/cpufreq/sa1110-cpufreq.c +++ b/drivers/cpufreq/sa1110-cpufreq.c @@ -310,7 +310,7 @@ static int __init sa1110_cpu_init(struct cpufreq_policy *policy) /* sa1110_driver needs __refdata because it must remain after init registers * it with cpufreq_register_driver() */ static struct cpufreq_driver sa1110_driver __refdata = { - .flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK | + .flags = CPUFREQ_NEED_INITIAL_FREQ_CHECK | CPUFREQ_NO_AUTO_DYNAMIC_SWITCHING, .verify = cpufreq_generic_frequency_table_verify, .target_index = sa1110_target, diff --git a/drivers/cpufreq/scmi-cpufreq.c b/drivers/cpufreq/scmi-cpufreq.c index 491a0a24fb1e..5bd03b59887f 100644 --- a/drivers/cpufreq/scmi-cpufreq.c +++ b/drivers/cpufreq/scmi-cpufreq.c @@ -217,7 +217,7 @@ static int scmi_cpufreq_exit(struct cpufreq_policy *policy) static struct cpufreq_driver scmi_cpufreq_driver = { .name = "scmi", - .flags = CPUFREQ_STICKY | CPUFREQ_HAVE_GOVERNOR_PER_POLICY | + .flags = CPUFREQ_HAVE_GOVERNOR_PER_POLICY | CPUFREQ_NEED_INITIAL_FREQ_CHECK | CPUFREQ_IS_COOLING_DEV, .verify = cpufreq_generic_frequency_table_verify, diff --git a/drivers/cpufreq/scpi-cpufreq.c b/drivers/cpufreq/scpi-cpufreq.c index e5140ad63db8..d6a698a1b5d1 100644 --- a/drivers/cpufreq/scpi-cpufreq.c +++ b/drivers/cpufreq/scpi-cpufreq.c @@ -191,7 +191,7 @@ static int scpi_cpufreq_exit(struct cpufreq_policy *policy) static struct cpufreq_driver scpi_cpufreq_driver = { .name = "scpi-cpufreq", - .flags = CPUFREQ_STICKY | CPUFREQ_HAVE_GOVERNOR_PER_POLICY | + .flags = CPUFREQ_HAVE_GOVERNOR_PER_POLICY | CPUFREQ_NEED_INITIAL_FREQ_CHECK | CPUFREQ_IS_COOLING_DEV, .verify = cpufreq_generic_frequency_table_verify, diff --git a/drivers/cpufreq/spear-cpufreq.c b/drivers/cpufreq/spear-cpufreq.c index 73bd8dc47074..7d0d62a06bf3 100644 --- a/drivers/cpufreq/spear-cpufreq.c +++ b/drivers/cpufreq/spear-cpufreq.c @@ -160,7 +160,7 @@ static int spear_cpufreq_init(struct cpufreq_policy *policy) static struct cpufreq_driver spear_cpufreq_driver = { .name = "cpufreq-spear", - .flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK, + .flags = CPUFREQ_NEED_INITIAL_FREQ_CHECK, .verify = cpufreq_generic_frequency_table_verify, .target_index = spear_cpufreq_target, .get = cpufreq_generic_get, diff --git a/drivers/cpufreq/tegra186-cpufreq.c b/drivers/cpufreq/tegra186-cpufreq.c index e566ea298b59..5d1943e787b0 100644 --- a/drivers/cpufreq/tegra186-cpufreq.c +++ b/drivers/cpufreq/tegra186-cpufreq.c @@ -117,7 +117,7 @@ static unsigned int tegra186_cpufreq_get(unsigned int cpu) static struct cpufreq_driver tegra186_cpufreq_driver = { .name = "tegra186", - .flags = CPUFREQ_STICKY | CPUFREQ_HAVE_GOVERNOR_PER_POLICY | + .flags = CPUFREQ_HAVE_GOVERNOR_PER_POLICY | CPUFREQ_NEED_INITIAL_FREQ_CHECK, .get = tegra186_cpufreq_get, .verify = cpufreq_generic_frequency_table_verify, diff --git a/drivers/cpufreq/tegra194-cpufreq.c b/drivers/cpufreq/tegra194-cpufreq.c index 6a67f36f3b80..a9620e4489ae 100644 --- a/drivers/cpufreq/tegra194-cpufreq.c +++ b/drivers/cpufreq/tegra194-cpufreq.c @@ -272,8 +272,7 @@ static int tegra194_cpufreq_set_target(struct cpufreq_policy *policy, static struct cpufreq_driver tegra194_cpufreq_driver = { .name = "tegra194", - .flags = CPUFREQ_STICKY | CPUFREQ_CONST_LOOPS | - CPUFREQ_NEED_INITIAL_FREQ_CHECK, + .flags = CPUFREQ_CONST_LOOPS | CPUFREQ_NEED_INITIAL_FREQ_CHECK, .verify = cpufreq_generic_frequency_table_verify, .target_index = tegra194_cpufreq_set_target, .get = tegra194_get_speed, diff --git a/drivers/cpufreq/vexpress-spc-cpufreq.c b/drivers/cpufreq/vexpress-spc-cpufreq.c index f711d8eaea6a..51dfa9ae6cf5 100644 --- a/drivers/cpufreq/vexpress-spc-cpufreq.c +++ b/drivers/cpufreq/vexpress-spc-cpufreq.c @@ -486,8 +486,7 @@ static void ve_spc_cpufreq_ready(struct cpufreq_policy *policy) static struct cpufreq_driver ve_spc_cpufreq_driver = { .name = "vexpress-spc", - .flags = CPUFREQ_STICKY | - CPUFREQ_HAVE_GOVERNOR_PER_POLICY | + .flags = CPUFREQ_HAVE_GOVERNOR_PER_POLICY | CPUFREQ_NEED_INITIAL_FREQ_CHECK, .verify = cpufreq_generic_frequency_table_verify, .target_index = ve_spc_cpufreq_set_target, diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h index 9c8b7437b6cd..c8e40e91fe9b 100644 --- a/include/linux/cpufreq.h +++ b/include/linux/cpufreq.h @@ -387,8 +387,13 @@ struct cpufreq_driver { /* flags */ -/* driver isn't removed even if all ->init() calls failed */ -#define CPUFREQ_STICKY BIT(0) +/* + * Set by drivers that need to update internale upper and lower boundaries along + * with the target frequency and so the core and governors should also invoke + * the diver if the target frequency does not change, but the policy min or max + * may have changed. + */ +#define CPUFREQ_NEED_UPDATE_LIMITS BIT(0) /* loops_per_jiffy or other kernel "constants" aren't affected by frequency transitions */ #define CPUFREQ_CONST_LOOPS BIT(1) @@ -432,14 +437,6 @@ struct cpufreq_driver { */ #define CPUFREQ_IS_COOLING_DEV BIT(7) -/* - * Set by drivers that need to update internale upper and lower boundaries along - * with the target frequency and so the core and governors should also invoke - * the diver if the target frequency does not change, but the policy min or max - * may have changed. - */ -#define CPUFREQ_NEED_UPDATE_LIMITS BIT(8) - int cpufreq_register_driver(struct cpufreq_driver *driver_data); int cpufreq_unregister_driver(struct cpufreq_driver *driver_data); -- cgit v1.2.3 From 2f0531869fd22182e769b10dd6cf151861ede791 Mon Sep 17 00:00:00 2001 From: Viresh Kumar Date: Tue, 2 Feb 2021 11:11:55 +0530 Subject: cpufreq: Remove unused flag CPUFREQ_PM_NO_WARN This flag is set by one of the drivers but it isn't used in the code otherwise. Remove the unused flag and update the driver. Signed-off-by: Viresh Kumar Signed-off-by: Rafael J. Wysocki --- drivers/cpufreq/pmac32-cpufreq.c | 3 +-- include/linux/cpufreq.h | 13 +++++-------- 2 files changed, 6 insertions(+), 10 deletions(-) (limited to 'include') diff --git a/drivers/cpufreq/pmac32-cpufreq.c b/drivers/cpufreq/pmac32-cpufreq.c index 73621bc11976..4f20c6a9108d 100644 --- a/drivers/cpufreq/pmac32-cpufreq.c +++ b/drivers/cpufreq/pmac32-cpufreq.c @@ -439,8 +439,7 @@ static struct cpufreq_driver pmac_cpufreq_driver = { .init = pmac_cpufreq_cpu_init, .suspend = pmac_cpufreq_suspend, .resume = pmac_cpufreq_resume, - .flags = CPUFREQ_PM_NO_WARN | - CPUFREQ_NO_AUTO_DYNAMIC_SWITCHING, + .flags = CPUFREQ_NO_AUTO_DYNAMIC_SWITCHING, .attr = cpufreq_generic_attr, .name = "powermac", }; diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h index c8e40e91fe9b..353969c7acd3 100644 --- a/include/linux/cpufreq.h +++ b/include/linux/cpufreq.h @@ -398,8 +398,11 @@ struct cpufreq_driver { /* loops_per_jiffy or other kernel "constants" aren't affected by frequency transitions */ #define CPUFREQ_CONST_LOOPS BIT(1) -/* don't warn on suspend/resume speed mismatches */ -#define CPUFREQ_PM_NO_WARN BIT(2) +/* + * Set by drivers that want the core to automatically register the cpufreq + * driver as a thermal cooling device. + */ +#define CPUFREQ_IS_COOLING_DEV BIT(2) /* * This should be set by platforms having multiple clock-domains, i.e. @@ -431,12 +434,6 @@ struct cpufreq_driver { */ #define CPUFREQ_NO_AUTO_DYNAMIC_SWITCHING BIT(6) -/* - * Set by drivers that want the core to automatically register the cpufreq - * driver as a thermal cooling device. - */ -#define CPUFREQ_IS_COOLING_DEV BIT(7) - int cpufreq_register_driver(struct cpufreq_driver *driver_data); int cpufreq_unregister_driver(struct cpufreq_driver *driver_data); -- cgit v1.2.3