summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2020-03-31 01:05:01 +0300
committerLinus Torvalds <torvalds@linux-foundation.org>2020-03-31 01:05:01 +0300
commit49835c15a55225e9b3ff9cc9317135b334ea2d49 (patch)
treedeb20b64d9b5ca9b16f0d8f3a2e23822ec7dcd65 /drivers
parenta231bed2267cf45b0759da1d3ad62483b8bd0925 (diff)
parent2409000a0cad2242fd4e2578f761f97069625478 (diff)
downloadlinux-49835c15a55225e9b3ff9cc9317135b334ea2d49.tar.xz
Merge tag 'pm-5.7-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm
Pull power management updates from Rafael Wysocki: "These clean up and rework the PM QoS API, address a suspend-to-idle wakeup regression on some ACPI-based platforms, clean up and extend a few cpuidle drivers, update multiple cpufreq drivers and cpufreq documentation, and fix a number of issues in devfreq and several other things all over. Specifics: - Clean up and rework the PM QoS API to simplify the code and reduce the size of it (Rafael Wysocki). - Fix a suspend-to-idle wakeup regression on Dell XPS13 9370 and similar platforms where the USB plug/unplug events are handled by the EC (Rafael Wysocki). - CLean up the intel_idle and PSCI cpuidle drivers (Rafael Wysocki, Ulf Hansson). - Extend the haltpoll cpuidle driver so that it can be forced to run on some systems where it refused to load (Maciej Szmigiero). - Convert several cpufreq documents to the .rst format and move the legacy driver documentation into one common file (Mauro Carvalho Chehab, Rafael Wysocki). - Update several cpufreq drivers: * Extend and fix the imx-cpufreq-dt driver (Anson Huang). * Improve the -EPROBE_DEFER handling and fix unwanted CPU overclocking on i.MX6ULL in imx6q-cpufreq (Anson Huang, Christoph Niedermaier). * Add support for Krait based SoCs to the qcom driver (Ansuel Smith). * Add support for OPP_PLUS to ti-cpufreq (Lokesh Vutla). * Add platform specific intermediate callbacks support to cpufreq-dt and update the imx6q driver (Peng Fan). * Simplify and consolidate some pieces of the intel_pstate driver and update its documentation (Rafael Wysocki, Alex Hung). - Fix several devfreq issues: * Remove unneeded extern keyword from a devfreq header file and use the DEVFREQ_GOV_UPDATE_INTERNAL event name instead of DEVFREQ_GOV_INTERNAL (Chanwoo Choi). * Fix the handling of dev_pm_qos_remove_request() result (Leonard Crestez). * Use constant name for userspace governor (Pierre Kuo). * Get rid of doc warnings and fix a typo (Christophe JAILLET). - Use built-in RCU list checking in some places in the PM core to avoid false-positive RCU usage warnings (Madhuparna Bhowmik). - Add explicit READ_ONCE()/WRITE_ONCE() annotations to low-level PM QoS routines (Qian Cai). - Fix removal of wakeup sources to avoid NULL pointer dereferences in a corner case (Neeraj Upadhyay). - Clean up the handling of hibernate compat ioctls and fix the related documentation (Eric Biggers). - Update the idle_inject power capping driver to use variable-length arrays instead of zero-length arrays (Gustavo Silva). - Fix list format in a PM QoS document (Randy Dunlap). - Make the cpufreq stats module use scnprintf() to avoid potential buffer overflows (Takashi Iwai). - Add pm_runtime_get_if_active() to PM-runtime API (Sakari Ailus). - Allow no domain-idle-states DT property in generic PM domains (Ulf Hansson). - Fix a broken y-axis scale in the intel_pstate_tracer utility (Doug Smythies)" * tag 'pm-5.7-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm: (78 commits) cpufreq: intel_pstate: Simplify intel_pstate_cpu_init() tools/power/x86/intel_pstate_tracer: fix a broken y-axis scale ACPI: PM: s2idle: Refine active GPEs check ACPICA: Allow acpi_any_gpe_status_set() to skip one GPE PM: sleep: wakeup: Skip wakeup_source_sysfs_remove() if device is not there PM / devfreq: Get rid of some doc warnings PM / devfreq: Fix handling dev_pm_qos_remove_request result PM / devfreq: Fix a typo in a comment PM / devfreq: Change to DEVFREQ_GOV_UPDATE_INTERVAL event name PM / devfreq: Remove unneeded extern keyword PM / devfreq: Use constant name of userspace governor ACPI: PM: s2idle: Fix comment in acpi_s2idle_prepare_late() cpufreq: qcom: Add support for krait based socs cpufreq: imx6q-cpufreq: Improve the logic of -EPROBE_DEFER handling cpufreq: Use scnprintf() for avoiding potential buffer overflow cpuidle: psci: Split psci_dt_cpu_init_idle() PM / Domains: Allow no domain-idle-states DT property in genpd when parsing PM / hibernate: Remove unnecessary compat ioctl overrides PM: hibernate: fix docs for ioctls that return loff_t via pointer Documentation: intel_pstate: update links for references ...
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/acpica/achware.h2
-rw-r--r--drivers/acpi/acpica/evxfgpe.c17
-rw-r--r--drivers/acpi/acpica/hwgpe.c47
-rw-r--r--drivers/acpi/ec.c5
-rw-r--r--drivers/acpi/internal.h1
-rw-r--r--drivers/acpi/sleep.c24
-rw-r--r--drivers/base/power/domain.c2
-rw-r--r--drivers/base/power/main.c12
-rw-r--r--drivers/base/power/runtime.c36
-rw-r--r--drivers/base/power/wakeup.c17
-rw-r--r--drivers/cpufreq/Kconfig.arm2
-rw-r--r--drivers/cpufreq/Kconfig.x862
-rw-r--r--drivers/cpufreq/cpufreq-dt-platdev.c5
-rw-r--r--drivers/cpufreq/cpufreq-dt.c4
-rw-r--r--drivers/cpufreq/cpufreq-dt.h4
-rw-r--r--drivers/cpufreq/cpufreq_stats.c14
-rw-r--r--drivers/cpufreq/imx-cpufreq-dt.c13
-rw-r--r--drivers/cpufreq/imx6q-cpufreq.c71
-rw-r--r--drivers/cpufreq/intel_pstate.c26
-rw-r--r--drivers/cpufreq/qcom-cpufreq-nvmem.c191
-rw-r--r--drivers/cpufreq/ti-cpufreq.c7
-rw-r--r--drivers/cpuidle/cpuidle-haltpoll.c12
-rw-r--r--drivers/cpuidle/cpuidle-psci.c46
-rw-r--r--drivers/cpuidle/cpuidle.c40
-rw-r--r--drivers/cpuidle/governor.c2
-rw-r--r--drivers/devfreq/devfreq.c14
-rw-r--r--drivers/devfreq/governor.h21
-rw-r--r--drivers/devfreq/governor_simpleondemand.c4
-rw-r--r--drivers/devfreq/governor_userspace.c2
-rw-r--r--drivers/devfreq/tegra30-devfreq.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp.c4
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c12
-rw-r--r--drivers/gpu/drm/i915/intel_sideband.c5
-rw-r--r--drivers/hsi/clients/cmt_speech.c9
-rw-r--r--drivers/idle/intel_idle.c302
-rw-r--r--drivers/media/pci/saa7134/saa7134-video.c5
-rw-r--r--drivers/media/platform/via-camera.c4
-rw-r--r--drivers/mmc/host/sdhci-esdhc-imx.c14
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c13
-rw-r--r--drivers/net/wireless/ath/ath10k/core.c4
-rw-r--r--drivers/net/wireless/intel/ipw2x00/ipw2100.c10
-rw-r--r--drivers/powercap/idle_inject.c2
-rw-r--r--drivers/spi/spi-fsl-qspi.c4
-rw-r--r--drivers/tty/serial/8250/8250_omap.c13
-rw-r--r--drivers/tty/serial/omap-serial.c15
-rw-r--r--drivers/usb/chipidea/ci_hdrc_imx.c12
46 files changed, 661 insertions, 414 deletions
diff --git a/drivers/acpi/acpica/achware.h b/drivers/acpi/acpica/achware.h
index 6ad0517553d5..ebf6453d0e21 100644
--- a/drivers/acpi/acpica/achware.h
+++ b/drivers/acpi/acpica/achware.h
@@ -101,7 +101,7 @@ acpi_status acpi_hw_enable_all_runtime_gpes(void);
acpi_status acpi_hw_enable_all_wakeup_gpes(void);
-u8 acpi_hw_check_all_gpes(void);
+u8 acpi_hw_check_all_gpes(acpi_handle gpe_skip_device, u32 gpe_skip_number);
acpi_status
acpi_hw_enable_runtime_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
diff --git a/drivers/acpi/acpica/evxfgpe.c b/drivers/acpi/acpica/evxfgpe.c
index f2de66bfd8a7..3be60673e461 100644
--- a/drivers/acpi/acpica/evxfgpe.c
+++ b/drivers/acpi/acpica/evxfgpe.c
@@ -799,17 +799,19 @@ ACPI_EXPORT_SYMBOL(acpi_enable_all_wakeup_gpes)
*
* FUNCTION: acpi_any_gpe_status_set
*
- * PARAMETERS: None
+ * PARAMETERS: gpe_skip_number - Number of the GPE to skip
*
* RETURN: Whether or not the status bit is set for any GPE
*
- * DESCRIPTION: Check the status bits of all enabled GPEs and return TRUE if any
- * of them is set or FALSE otherwise.
+ * DESCRIPTION: Check the status bits of all enabled GPEs, except for the one
+ * represented by the "skip" argument, and return TRUE if any of
+ * them is set or FALSE otherwise.
*
******************************************************************************/
-u32 acpi_any_gpe_status_set(void)
+u32 acpi_any_gpe_status_set(u32 gpe_skip_number)
{
acpi_status status;
+ acpi_handle gpe_device;
u8 ret;
ACPI_FUNCTION_TRACE(acpi_any_gpe_status_set);
@@ -819,7 +821,12 @@ u32 acpi_any_gpe_status_set(void)
return (FALSE);
}
- ret = acpi_hw_check_all_gpes();
+ status = acpi_get_gpe_device(gpe_skip_number, &gpe_device);
+ if (ACPI_FAILURE(status)) {
+ gpe_device = NULL;
+ }
+
+ ret = acpi_hw_check_all_gpes(gpe_device, gpe_skip_number);
(void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
return (ret);
diff --git a/drivers/acpi/acpica/hwgpe.c b/drivers/acpi/acpica/hwgpe.c
index f4c285c2f595..49c46d4dd070 100644
--- a/drivers/acpi/acpica/hwgpe.c
+++ b/drivers/acpi/acpica/hwgpe.c
@@ -444,12 +444,19 @@ acpi_hw_enable_wakeup_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
return (AE_OK);
}
+struct acpi_gpe_block_status_context {
+ struct acpi_gpe_register_info *gpe_skip_register_info;
+ u8 gpe_skip_mask;
+ u8 retval;
+};
+
/******************************************************************************
*
* FUNCTION: acpi_hw_get_gpe_block_status
*
* PARAMETERS: gpe_xrupt_info - GPE Interrupt info
* gpe_block - Gpe Block info
+ * context - GPE list walk context data
*
* RETURN: Success
*
@@ -460,12 +467,13 @@ acpi_hw_enable_wakeup_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
static acpi_status
acpi_hw_get_gpe_block_status(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
struct acpi_gpe_block_info *gpe_block,
- void *ret_ptr)
+ void *context)
{
+ struct acpi_gpe_block_status_context *c = context;
struct acpi_gpe_register_info *gpe_register_info;
u64 in_enable, in_status;
acpi_status status;
- u8 *ret = ret_ptr;
+ u8 ret_mask;
u32 i;
/* Examine each GPE Register within the block */
@@ -485,7 +493,11 @@ acpi_hw_get_gpe_block_status(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
continue;
}
- *ret |= in_enable & in_status;
+ ret_mask = in_enable & in_status;
+ if (ret_mask && c->gpe_skip_register_info == gpe_register_info) {
+ ret_mask &= ~c->gpe_skip_mask;
+ }
+ c->retval |= ret_mask;
}
return (AE_OK);
@@ -561,24 +573,41 @@ acpi_status acpi_hw_enable_all_wakeup_gpes(void)
*
* FUNCTION: acpi_hw_check_all_gpes
*
- * PARAMETERS: None
+ * PARAMETERS: gpe_skip_device - GPE devoce of the GPE to skip
+ * gpe_skip_number - Number of the GPE to skip
*
* RETURN: Combined status of all GPEs
*
- * DESCRIPTION: Check all enabled GPEs in all GPE blocks and return TRUE if the
+ * DESCRIPTION: Check all enabled GPEs in all GPE blocks, except for the one
+ * represented by the "skip" arguments, and return TRUE if the
* status bit is set for at least one of them of FALSE otherwise.
*
******************************************************************************/
-u8 acpi_hw_check_all_gpes(void)
+u8 acpi_hw_check_all_gpes(acpi_handle gpe_skip_device, u32 gpe_skip_number)
{
- u8 ret = 0;
+ struct acpi_gpe_block_status_context context = {
+ .gpe_skip_register_info = NULL,
+ .retval = 0,
+ };
+ struct acpi_gpe_event_info *gpe_event_info;
+ acpi_cpu_flags flags;
ACPI_FUNCTION_TRACE(acpi_hw_check_all_gpes);
- (void)acpi_ev_walk_gpe_list(acpi_hw_get_gpe_block_status, &ret);
+ flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
+
+ gpe_event_info = acpi_ev_get_gpe_event_info(gpe_skip_device,
+ gpe_skip_number);
+ if (gpe_event_info) {
+ context.gpe_skip_register_info = gpe_event_info->register_info;
+ context.gpe_skip_mask = acpi_hw_get_gpe_register_bit(gpe_event_info);
+ }
+
+ acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
- return (ret != 0);
+ (void)acpi_ev_walk_gpe_list(acpi_hw_get_gpe_block_status, &context);
+ return (context.retval != 0);
}
#endif /* !ACPI_REDUCED_HARDWARE */
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index d1f1cf5d4bf0..9b9094b0b73f 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -2037,6 +2037,11 @@ void acpi_ec_set_gpe_wake_mask(u8 action)
acpi_set_gpe_wake_mask(NULL, first_ec->gpe, action);
}
+bool acpi_ec_other_gpes_active(void)
+{
+ return acpi_any_gpe_status_set(first_ec ? first_ec->gpe : U32_MAX);
+}
+
bool acpi_ec_dispatch_gpe(void)
{
u32 ret;
diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h
index 3616daec650b..d44c591c4ee4 100644
--- a/drivers/acpi/internal.h
+++ b/drivers/acpi/internal.h
@@ -202,6 +202,7 @@ void acpi_ec_remove_query_handler(struct acpi_ec *ec, u8 query_bit);
#ifdef CONFIG_PM_SLEEP
void acpi_ec_flush_work(void);
+bool acpi_ec_other_gpes_active(void);
bool acpi_ec_dispatch_gpe(void);
#endif
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index e5f95922bc21..bb1ae400ec1f 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -982,10 +982,7 @@ static int acpi_s2idle_prepare_late(void)
static void acpi_s2idle_sync(void)
{
- /*
- * The EC driver uses the system workqueue and an additional special
- * one, so those need to be flushed too.
- */
+ /* The EC driver uses special workqueues that need to be flushed. */
acpi_ec_flush_work();
acpi_os_wait_events_complete(); /* synchronize Notify handling */
}
@@ -1013,19 +1010,20 @@ static bool acpi_s2idle_wake(void)
return true;
/*
- * If there are no EC events to process and at least one of the
- * other enabled GPEs is active, the wakeup is regarded as a
- * genuine one.
- *
- * Note that the checks below must be carried out in this order
- * to avoid returning prematurely due to a change of the EC GPE
- * status bit from unset to set between the checks with the
- * status bits of all the other GPEs unset.
+ * If the status bit is set for any enabled GPE other than the
+ * EC one, the wakeup is regarded as a genuine one.
*/
- if (acpi_any_gpe_status_set() && !acpi_ec_dispatch_gpe())
+ if (acpi_ec_other_gpes_active())
return true;
/*
+ * If the EC GPE status bit has not been set, the wakeup is
+ * regarded as a spurious one.
+ */
+ if (!acpi_ec_dispatch_gpe())
+ return false;
+
+ /*
* Cancel the wakeup and process all pending events in case
* there are any wakeup ones in there.
*
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
index 959d6d5eb000..0a01df608849 100644
--- a/drivers/base/power/domain.c
+++ b/drivers/base/power/domain.c
@@ -2653,7 +2653,7 @@ static int genpd_iterate_idle_states(struct device_node *dn,
ret = of_count_phandle_with_args(dn, "domain-idle-states", NULL);
if (ret <= 0)
- return ret;
+ return ret == -ENOENT ? 0 : ret;
/* Loop over the phandles until all the requested entry is found */
of_for_each_phandle(&it, ret, dn, "domain-idle-states", NULL, 0) {
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index 0e99a760aebd..6d1dee7051eb 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -40,6 +40,10 @@
typedef int (*pm_callback_t)(struct device *);
+#define list_for_each_entry_rcu_locked(pos, head, member) \
+ list_for_each_entry_rcu(pos, head, member, \
+ device_links_read_lock_held())
+
/*
* The entries in the dpm_list list are in a depth first order, simply
* because children are guaranteed to be discovered after parents, and
@@ -266,7 +270,7 @@ static void dpm_wait_for_suppliers(struct device *dev, bool async)
* callbacks freeing the link objects for the links in the list we're
* walking.
*/
- list_for_each_entry_rcu(link, &dev->links.suppliers, c_node)
+ list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node)
if (READ_ONCE(link->status) != DL_STATE_DORMANT)
dpm_wait(link->supplier, async);
@@ -323,7 +327,7 @@ static void dpm_wait_for_consumers(struct device *dev, bool async)
* continue instead of trying to continue in parallel with its
* unregistration).
*/
- list_for_each_entry_rcu(link, &dev->links.consumers, s_node)
+ list_for_each_entry_rcu_locked(link, &dev->links.consumers, s_node)
if (READ_ONCE(link->status) != DL_STATE_DORMANT)
dpm_wait(link->consumer, async);
@@ -1235,7 +1239,7 @@ static void dpm_superior_set_must_resume(struct device *dev)
idx = device_links_read_lock();
- list_for_each_entry_rcu(link, &dev->links.suppliers, c_node)
+ list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node)
link->supplier->power.must_resume = true;
device_links_read_unlock(idx);
@@ -1695,7 +1699,7 @@ static void dpm_clear_superiors_direct_complete(struct device *dev)
idx = device_links_read_lock();
- list_for_each_entry_rcu(link, &dev->links.suppliers, c_node) {
+ list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node) {
spin_lock_irq(&link->supplier->power.lock);
link->supplier->power.direct_complete = false;
spin_unlock_irq(&link->supplier->power.lock);
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index 16134a69bf6f..99c7da112c95 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -1087,29 +1087,47 @@ int __pm_runtime_resume(struct device *dev, int rpmflags)
EXPORT_SYMBOL_GPL(__pm_runtime_resume);
/**
- * pm_runtime_get_if_in_use - Conditionally bump up the device's usage counter.
+ * pm_runtime_get_if_active - Conditionally bump up the device's usage counter.
* @dev: Device to handle.
*
* Return -EINVAL if runtime PM is disabled for the device.
*
- * If that's not the case and if the device's runtime PM status is RPM_ACTIVE
- * and the runtime PM usage counter is nonzero, increment the counter and
- * return 1. Otherwise return 0 without changing the counter.
+ * Otherwise, if the device's runtime PM status is RPM_ACTIVE and either
+ * ign_usage_count is true or the device's usage_count is non-zero, increment
+ * the counter and return 1. Otherwise return 0 without changing the counter.
+ *
+ * If ign_usage_count is true, the function can be used to prevent suspending
+ * the device when its runtime PM status is RPM_ACTIVE.
+ *
+ * If ign_usage_count is false, the function can be used to prevent suspending
+ * the device when both its runtime PM status is RPM_ACTIVE and its usage_count
+ * is non-zero.
+ *
+ * The caller is resposible for putting the device's usage count when ther
+ * return value is greater than zero.
*/
-int pm_runtime_get_if_in_use(struct device *dev)
+int pm_runtime_get_if_active(struct device *dev, bool ign_usage_count)
{
unsigned long flags;
int retval;
spin_lock_irqsave(&dev->power.lock, flags);
- retval = dev->power.disable_depth > 0 ? -EINVAL :
- dev->power.runtime_status == RPM_ACTIVE
- && atomic_inc_not_zero(&dev->power.usage_count);
+ if (dev->power.disable_depth > 0) {
+ retval = -EINVAL;
+ } else if (dev->power.runtime_status != RPM_ACTIVE) {
+ retval = 0;
+ } else if (ign_usage_count) {
+ retval = 1;
+ atomic_inc(&dev->power.usage_count);
+ } else {
+ retval = atomic_inc_not_zero(&dev->power.usage_count);
+ }
trace_rpm_usage_rcuidle(dev, 0);
spin_unlock_irqrestore(&dev->power.lock, flags);
+
return retval;
}
-EXPORT_SYMBOL_GPL(pm_runtime_get_if_in_use);
+EXPORT_SYMBOL_GPL(pm_runtime_get_if_active);
/**
* __pm_runtime_set_status - Set runtime PM status of a device.
diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
index 27f3e60608e5..92073ac68473 100644
--- a/drivers/base/power/wakeup.c
+++ b/drivers/base/power/wakeup.c
@@ -24,6 +24,9 @@ suspend_state_t pm_suspend_target_state;
#define pm_suspend_target_state (PM_SUSPEND_ON)
#endif
+#define list_for_each_entry_rcu_locked(pos, head, member) \
+ list_for_each_entry_rcu(pos, head, member, \
+ srcu_read_lock_held(&wakeup_srcu))
/*
* If set, the suspend/hibernate code will abort transitions to a sleep state
* if wakeup events are registered during or immediately before the transition.
@@ -241,7 +244,9 @@ void wakeup_source_unregister(struct wakeup_source *ws)
{
if (ws) {
wakeup_source_remove(ws);
- wakeup_source_sysfs_remove(ws);
+ if (ws->dev)
+ wakeup_source_sysfs_remove(ws);
+
wakeup_source_destroy(ws);
}
}
@@ -405,7 +410,7 @@ void device_wakeup_arm_wake_irqs(void)
int srcuidx;
srcuidx = srcu_read_lock(&wakeup_srcu);
- list_for_each_entry_rcu(ws, &wakeup_sources, entry)
+ list_for_each_entry_rcu_locked(ws, &wakeup_sources, entry)
dev_pm_arm_wake_irq(ws->wakeirq);
srcu_read_unlock(&wakeup_srcu, srcuidx);
}
@@ -421,7 +426,7 @@ void device_wakeup_disarm_wake_irqs(void)
int srcuidx;
srcuidx = srcu_read_lock(&wakeup_srcu);
- list_for_each_entry_rcu(ws, &wakeup_sources, entry)
+ list_for_each_entry_rcu_locked(ws, &wakeup_sources, entry)
dev_pm_disarm_wake_irq(ws->wakeirq);
srcu_read_unlock(&wakeup_srcu, srcuidx);
}
@@ -874,7 +879,7 @@ void pm_print_active_wakeup_sources(void)
struct wakeup_source *last_activity_ws = NULL;
srcuidx = srcu_read_lock(&wakeup_srcu);
- list_for_each_entry_rcu(ws, &wakeup_sources, entry) {
+ list_for_each_entry_rcu_locked(ws, &wakeup_sources, entry) {
if (ws->active) {
pm_pr_dbg("active wakeup source: %s\n", ws->name);
active = 1;
@@ -1025,7 +1030,7 @@ void pm_wakep_autosleep_enabled(bool set)
int srcuidx;
srcuidx = srcu_read_lock(&wakeup_srcu);
- list_for_each_entry_rcu(ws, &wakeup_sources, entry) {
+ list_for_each_entry_rcu_locked(ws, &wakeup_sources, entry) {
spin_lock_irq(&ws->lock);
if (ws->autosleep_enabled != set) {
ws->autosleep_enabled = set;
@@ -1104,7 +1109,7 @@ static void *wakeup_sources_stats_seq_start(struct seq_file *m,
}
*srcuidx = srcu_read_lock(&wakeup_srcu);
- list_for_each_entry_rcu(ws, &wakeup_sources, entry) {
+ list_for_each_entry_rcu_locked(ws, &wakeup_sources, entry) {
if (n-- <= 0)
return ws;
}
diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm
index 3858d86cf409..15c1a1231516 100644
--- a/drivers/cpufreq/Kconfig.arm
+++ b/drivers/cpufreq/Kconfig.arm
@@ -128,7 +128,7 @@ config ARM_OMAP2PLUS_CPUFREQ
config ARM_QCOM_CPUFREQ_NVMEM
tristate "Qualcomm nvmem based CPUFreq"
- depends on ARM64
+ depends on ARCH_QCOM
depends on QCOM_QFPROM
depends on QCOM_SMEM
select PM_OPP
diff --git a/drivers/cpufreq/Kconfig.x86 b/drivers/cpufreq/Kconfig.x86
index a6528388952e..62502d0e4c33 100644
--- a/drivers/cpufreq/Kconfig.x86
+++ b/drivers/cpufreq/Kconfig.x86
@@ -25,7 +25,7 @@ config X86_PCC_CPUFREQ
This driver adds support for the PCC interface.
For details, take a look at:
- <file:Documentation/cpu-freq/pcc-cpufreq.txt>.
+ <file:Documentation/admin-guide/pm/cpufreq_drivers.rst>.
To compile this driver as a module, choose M here: the
module will be called pcc-cpufreq.
diff --git a/drivers/cpufreq/cpufreq-dt-platdev.c b/drivers/cpufreq/cpufreq-dt-platdev.c
index f2ae9cd455c1..cb9db16bea61 100644
--- a/drivers/cpufreq/cpufreq-dt-platdev.c
+++ b/drivers/cpufreq/cpufreq-dt-platdev.c
@@ -141,6 +141,11 @@ static const struct of_device_id blacklist[] __initconst = {
{ .compatible = "ti,dra7", },
{ .compatible = "ti,omap3", },
+ { .compatible = "qcom,ipq8064", },
+ { .compatible = "qcom,apq8064", },
+ { .compatible = "qcom,msm8974", },
+ { .compatible = "qcom,msm8960", },
+
{ }
};
diff --git a/drivers/cpufreq/cpufreq-dt.c b/drivers/cpufreq/cpufreq-dt.c
index d2b5f062a07b..26fe8dfb9ce6 100644
--- a/drivers/cpufreq/cpufreq-dt.c
+++ b/drivers/cpufreq/cpufreq-dt.c
@@ -363,6 +363,10 @@ static int dt_cpufreq_probe(struct platform_device *pdev)
dt_cpufreq_driver.resume = data->resume;
if (data->suspend)
dt_cpufreq_driver.suspend = data->suspend;
+ if (data->get_intermediate) {
+ dt_cpufreq_driver.target_intermediate = data->target_intermediate;
+ dt_cpufreq_driver.get_intermediate = data->get_intermediate;
+ }
}
ret = cpufreq_register_driver(&dt_cpufreq_driver);
diff --git a/drivers/cpufreq/cpufreq-dt.h b/drivers/cpufreq/cpufreq-dt.h
index a5a45b547d0b..28c8af7ec5ef 100644
--- a/drivers/cpufreq/cpufreq-dt.h
+++ b/drivers/cpufreq/cpufreq-dt.h
@@ -14,6 +14,10 @@ struct cpufreq_policy;
struct cpufreq_dt_platform_data {
bool have_governor_per_policy;
+ unsigned int (*get_intermediate)(struct cpufreq_policy *policy,
+ unsigned int index);
+ int (*target_intermediate)(struct cpufreq_policy *policy,
+ unsigned int index);
int (*suspend)(struct cpufreq_policy *policy);
int (*resume)(struct cpufreq_policy *policy);
};
diff --git a/drivers/cpufreq/cpufreq_stats.c b/drivers/cpufreq/cpufreq_stats.c
index f9bcf0f3ea30..94d959a8e954 100644
--- a/drivers/cpufreq/cpufreq_stats.c
+++ b/drivers/cpufreq/cpufreq_stats.c
@@ -90,35 +90,35 @@ static ssize_t show_trans_table(struct cpufreq_policy *policy, char *buf)
if (policy->fast_switch_enabled)
return 0;
- len += snprintf(buf + len, PAGE_SIZE - len, " From : To\n");
- len += snprintf(buf + len, PAGE_SIZE - len, " : ");
+ len += scnprintf(buf + len, PAGE_SIZE - len, " From : To\n");
+ len += scnprintf(buf + len, PAGE_SIZE - len, " : ");
for (i = 0; i < stats->state_num; i++) {
if (len >= PAGE_SIZE)
break;
- len += snprintf(buf + len, PAGE_SIZE - len, "%9u ",
+ len += scnprintf(buf + len, PAGE_SIZE - len, "%9u ",
stats->freq_table[i]);
}
if (len >= PAGE_SIZE)
return PAGE_SIZE;
- len += snprintf(buf + len, PAGE_SIZE - len, "\n");
+ len += scnprintf(buf + len, PAGE_SIZE - len, "\n");
for (i = 0; i < stats->state_num; i++) {
if (len >= PAGE_SIZE)
break;
- len += snprintf(buf + len, PAGE_SIZE - len, "%9u: ",
+ len += scnprintf(buf + len, PAGE_SIZE - len, "%9u: ",
stats->freq_table[i]);
for (j = 0; j < stats->state_num; j++) {
if (len >= PAGE_SIZE)
break;
- len += snprintf(buf + len, PAGE_SIZE - len, "%9u ",
+ len += scnprintf(buf + len, PAGE_SIZE - len, "%9u ",
stats->trans_table[i*stats->max_state+j]);
}
if (len >= PAGE_SIZE)
break;
- len += snprintf(buf + len, PAGE_SIZE - len, "\n");
+ len += scnprintf(buf + len, PAGE_SIZE - len, "\n");
}
if (len >= PAGE_SIZE) {
diff --git a/drivers/cpufreq/imx-cpufreq-dt.c b/drivers/cpufreq/imx-cpufreq-dt.c
index 6cb8193421ea..de206d2745fe 100644
--- a/drivers/cpufreq/imx-cpufreq-dt.c
+++ b/drivers/cpufreq/imx-cpufreq-dt.c
@@ -19,6 +19,8 @@
#define IMX8MN_OCOTP_CFG3_SPEED_GRADE_MASK (0xf << 8)
#define OCOTP_CFG3_MKT_SEGMENT_SHIFT 6
#define OCOTP_CFG3_MKT_SEGMENT_MASK (0x3 << 6)
+#define IMX8MP_OCOTP_CFG3_MKT_SEGMENT_SHIFT 5
+#define IMX8MP_OCOTP_CFG3_MKT_SEGMENT_MASK (0x3 << 5)
/* cpufreq-dt device registered by imx-cpufreq-dt */
static struct platform_device *cpufreq_dt_pdev;
@@ -31,6 +33,9 @@ static int imx_cpufreq_dt_probe(struct platform_device *pdev)
int speed_grade, mkt_segment;
int ret;
+ if (!of_find_property(cpu_dev->of_node, "cpu-supply", NULL))
+ return -ENODEV;
+
ret = nvmem_cell_read_u32(cpu_dev, "speed_grade", &cell_value);
if (ret)
return ret;
@@ -42,7 +47,13 @@ static int imx_cpufreq_dt_probe(struct platform_device *pdev)
else
speed_grade = (cell_value & OCOTP_CFG3_SPEED_GRADE_MASK)
>> OCOTP_CFG3_SPEED_GRADE_SHIFT;
- mkt_segment = (cell_value & OCOTP_CFG3_MKT_SEGMENT_MASK) >> OCOTP_CFG3_MKT_SEGMENT_SHIFT;
+
+ if (of_machine_is_compatible("fsl,imx8mp"))
+ mkt_segment = (cell_value & IMX8MP_OCOTP_CFG3_MKT_SEGMENT_MASK)
+ >> IMX8MP_OCOTP_CFG3_MKT_SEGMENT_SHIFT;
+ else
+ mkt_segment = (cell_value & OCOTP_CFG3_MKT_SEGMENT_MASK)
+ >> OCOTP_CFG3_MKT_SEGMENT_SHIFT;
/*
* Early samples without fuses written report "0 0" which may NOT
diff --git a/drivers/cpufreq/imx6q-cpufreq.c b/drivers/cpufreq/imx6q-cpufreq.c
index 648a09a1778a..fdb2ffffbd15 100644
--- a/drivers/cpufreq/imx6q-cpufreq.c
+++ b/drivers/cpufreq/imx6q-cpufreq.c
@@ -216,31 +216,41 @@ static struct cpufreq_driver imx6q_cpufreq_driver = {
#define OCOTP_CFG3_SPEED_996MHZ 0x2
#define OCOTP_CFG3_SPEED_852MHZ 0x1
-static void imx6q_opp_check_speed_grading(struct device *dev)
+static int imx6q_opp_check_speed_grading(struct device *dev)
{
struct device_node *np;
void __iomem *base;
u32 val;
+ int ret;
- np = of_find_compatible_node(NULL, NULL, "fsl,imx6q-ocotp");
- if (!np)
- return;
+ if (of_find_property(dev->of_node, "nvmem-cells", NULL)) {
+ ret = nvmem_cell_read_u32(dev, "speed_grade", &val);
+ if (ret)
+ return ret;
+ } else {
+ np = of_find_compatible_node(NULL, NULL, "fsl,imx6q-ocotp");
+ if (!np)
+ return -ENOENT;
- base = of_iomap(np, 0);
- if (!base) {
- dev_err(dev, "failed to map ocotp\n");
- goto put_node;
+ base = of_iomap(np, 0);
+ of_node_put(np);
+ if (!base) {
+ dev_err(dev, "failed to map ocotp\n");
+ return -EFAULT;
+ }
+
+ /*
+ * SPEED_GRADING[1:0] defines the max speed of ARM:
+ * 2b'11: 1200000000Hz;
+ * 2b'10: 996000000Hz;
+ * 2b'01: 852000000Hz; -- i.MX6Q Only, exclusive with 996MHz.
+ * 2b'00: 792000000Hz;
+ * We need to set the max speed of ARM according to fuse map.
+ */
+ val = readl_relaxed(base + OCOTP_CFG3);
+ iounmap(base);
}
- /*
- * SPEED_GRADING[1:0] defines the max speed of ARM:
- * 2b'11: 1200000000Hz;
- * 2b'10: 996000000Hz;
- * 2b'01: 852000000Hz; -- i.MX6Q Only, exclusive with 996MHz.
- * 2b'00: 792000000Hz;
- * We need to set the max speed of ARM according to fuse map.
- */
- val = readl_relaxed(base + OCOTP_CFG3);
val >>= OCOTP_CFG3_SPEED_SHIFT;
val &= 0x3;
@@ -257,9 +267,8 @@ static void imx6q_opp_check_speed_grading(struct device *dev)
if (dev_pm_opp_disable(dev, 1200000000))
dev_warn(dev, "failed to disable 1.2GHz OPP\n");
}
- iounmap(base);
-put_node:
- of_node_put(np);
+
+ return 0;
}
#define OCOTP_CFG3_6UL_SPEED_696MHZ 0x2
@@ -281,6 +290,9 @@ static int imx6ul_opp_check_speed_grading(struct device *dev)
np = of_find_compatible_node(NULL, NULL, "fsl,imx6ul-ocotp");
if (!np)
+ np = of_find_compatible_node(NULL, NULL,
+ "fsl,imx6ull-ocotp");
+ if (!np)
return -ENOENT;
base = of_iomap(np, 0);
@@ -378,23 +390,22 @@ static int imx6q_cpufreq_probe(struct platform_device *pdev)
goto put_reg;
}
+ /* Because we have added the OPPs here, we must free them */
+ free_opp = true;
+
if (of_machine_is_compatible("fsl,imx6ul") ||
of_machine_is_compatible("fsl,imx6ull")) {
ret = imx6ul_opp_check_speed_grading(cpu_dev);
- if (ret) {
- if (ret == -EPROBE_DEFER)
- goto put_node;
-
+ } else {
+ ret = imx6q_opp_check_speed_grading(cpu_dev);
+ }
+ if (ret) {
+ if (ret != -EPROBE_DEFER)
dev_err(cpu_dev, "failed to read ocotp: %d\n",
ret);
- goto put_node;
- }
- } else {
- imx6q_opp_check_speed_grading(cpu_dev);
+ goto out_free_opp;
}
- /* Because we have added the OPPs here, we must free them */
- free_opp = true;
num = dev_pm_opp_get_opp_count(cpu_dev);
if (num < 0) {
ret = num;
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index c81e1ff29069..d2297839374d 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -2155,15 +2155,19 @@ static void intel_pstate_adjust_policy_max(struct cpudata *cpu,
}
}
-static int intel_pstate_verify_policy(struct cpufreq_policy_data *policy)
+static void intel_pstate_verify_cpu_policy(struct cpudata *cpu,
+ struct cpufreq_policy_data *policy)
{
- struct cpudata *cpu = all_cpu_data[policy->cpu];
-
update_turbo_state();
cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq,
intel_pstate_get_max_freq(cpu));
intel_pstate_adjust_policy_max(cpu, policy);
+}
+
+static int intel_pstate_verify_policy(struct cpufreq_policy_data *policy)
+{
+ intel_pstate_verify_cpu_policy(all_cpu_data[policy->cpu], policy);
return 0;
}
@@ -2243,10 +2247,11 @@ static int intel_pstate_cpu_init(struct cpufreq_policy *policy)
if (ret)
return ret;
- if (IS_ENABLED(CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE))
- policy->policy = CPUFREQ_POLICY_PERFORMANCE;
- else
- policy->policy = CPUFREQ_POLICY_POWERSAVE;
+ /*
+ * Set the policy to powersave to provide a valid fallback value in case
+ * the default cpufreq governor is neither powersave nor performance.
+ */
+ policy->policy = CPUFREQ_POLICY_POWERSAVE;
return 0;
}
@@ -2268,12 +2273,7 @@ static int intel_cpufreq_verify_policy(struct cpufreq_policy_data *policy)
{
struct cpudata *cpu = all_cpu_data[policy->cpu];
- update_turbo_state();
- cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq,
- intel_pstate_get_max_freq(cpu));
-
- intel_pstate_adjust_policy_max(cpu, policy);
-
+ intel_pstate_verify_cpu_policy(cpu, policy);
intel_pstate_update_perf_limits(cpu, policy->min, policy->max);
return 0;
diff --git a/drivers/cpufreq/qcom-cpufreq-nvmem.c b/drivers/cpufreq/qcom-cpufreq-nvmem.c
index f0d2d5035413..a1b8238872a2 100644
--- a/drivers/cpufreq/qcom-cpufreq-nvmem.c
+++ b/drivers/cpufreq/qcom-cpufreq-nvmem.c
@@ -49,12 +49,14 @@ struct qcom_cpufreq_drv;
struct qcom_cpufreq_match_data {
int (*get_version)(struct device *cpu_dev,
struct nvmem_cell *speedbin_nvmem,
+ char **pvs_name,
struct qcom_cpufreq_drv *drv);
const char **genpd_names;
};
struct qcom_cpufreq_drv {
- struct opp_table **opp_tables;
+ struct opp_table **names_opp_tables;
+ struct opp_table **hw_opp_tables;
struct opp_table **genpd_opp_tables;
u32 versions;
const struct qcom_cpufreq_match_data *data;
@@ -62,6 +64,84 @@ struct qcom_cpufreq_drv {
static struct platform_device *cpufreq_dt_pdev, *cpufreq_pdev;
+static void get_krait_bin_format_a(struct device *cpu_dev,
+ int *speed, int *pvs, int *pvs_ver,
+ struct nvmem_cell *pvs_nvmem, u8 *buf)
+{
+ u32 pte_efuse;
+
+ pte_efuse = *((u32 *)buf);
+
+ *speed = pte_efuse & 0xf;
+ if (*speed == 0xf)
+ *speed = (pte_efuse >> 4) & 0xf;
+
+ if (*speed == 0xf) {
+ *speed = 0;
+ dev_warn(cpu_dev, "Speed bin: Defaulting to %d\n", *speed);
+ } else {
+ dev_dbg(cpu_dev, "Speed bin: %d\n", *speed);
+ }
+
+ *pvs = (pte_efuse >> 10) & 0x7;
+ if (*pvs == 0x7)
+ *pvs = (pte_efuse >> 13) & 0x7;
+
+ if (*pvs == 0x7) {
+ *pvs = 0;
+ dev_warn(cpu_dev, "PVS bin: Defaulting to %d\n", *pvs);
+ } else {
+ dev_dbg(cpu_dev, "PVS bin: %d\n", *pvs);
+ }
+}
+
+static void get_krait_bin_format_b(struct device *cpu_dev,
+ int *speed, int *pvs, int *pvs_ver,
+ struct nvmem_cell *pvs_nvmem, u8 *buf)
+{
+ u32 pte_efuse, redundant_sel;
+
+ pte_efuse = *((u32 *)buf);
+ redundant_sel = (pte_efuse >> 24) & 0x7;
+
+ *pvs_ver = (pte_efuse >> 4) & 0x3;
+
+ switch (redundant_sel) {
+ case 1:
+ *pvs = ((pte_efuse >> 28) & 0x8) | ((pte_efuse >> 6) & 0x7);
+ *speed = (pte_efuse >> 27) & 0xf;
+ break;
+ case 2:
+ *pvs = (pte_efuse >> 27) & 0xf;
+ *speed = pte_efuse & 0x7;
+ break;
+ default:
+ /* 4 bits of PVS are in efuse register bits 31, 8-6. */
+ *pvs = ((pte_efuse >> 28) & 0x8) | ((pte_efuse >> 6) & 0x7);
+ *speed = pte_efuse & 0x7;
+ }
+
+ /* Check SPEED_BIN_BLOW_STATUS */
+ if (pte_efuse & BIT(3)) {
+ dev_dbg(cpu_dev, "Speed bin: %d\n", *speed);
+ } else {
+ dev_warn(cpu_dev, "Speed bin not set. Defaulting to 0!\n");
+ *speed = 0;
+ }
+
+ /* Check PVS_BLOW_STATUS */
+ pte_efuse = *(((u32 *)buf) + 4);
+ pte_efuse &= BIT(21);
+ if (pte_efuse) {
+ dev_dbg(cpu_dev, "PVS bin: %d\n", *pvs);
+ } else {
+ dev_warn(cpu_dev, "PVS bin not set. Defaulting to 0!\n");
+ *pvs = 0;
+ }
+
+ dev_dbg(cpu_dev, "PVS version: %d\n", *pvs_ver);
+}
+
static enum _msm8996_version qcom_cpufreq_get_msm_id(void)
{
size_t len;
@@ -93,11 +173,13 @@ static enum _msm8996_version qcom_cpufreq_get_msm_id(void)
static int qcom_cpufreq_kryo_name_version(struct device *cpu_dev,
struct nvmem_cell *speedbin_nvmem,
+ char **pvs_name,
struct qcom_cpufreq_drv *drv)
{
size_t len;
u8 *speedbin;
enum _msm8996_version msm8996_version;
+ *pvs_name = NULL;
msm8996_version = qcom_cpufreq_get_msm_id();
if (NUM_OF_MSM8996_VERSIONS == msm8996_version) {
@@ -125,10 +207,51 @@ static int qcom_cpufreq_kryo_name_version(struct device *cpu_dev,
return 0;
}
+static int qcom_cpufreq_krait_name_version(struct device *cpu_dev,
+ struct nvmem_cell *speedbin_nvmem,
+ char **pvs_name,
+ struct qcom_cpufreq_drv *drv)
+{
+ int speed = 0, pvs = 0, pvs_ver = 0;
+ u8 *speedbin;
+ size_t len;
+
+ speedbin = nvmem_cell_read(speedbin_nvmem, &len);
+
+ if (IS_ERR(speedbin))
+ return PTR_ERR(speedbin);
+
+ switch (len) {
+ case 4:
+ get_krait_bin_format_a(cpu_dev, &speed, &pvs, &pvs_ver,
+ speedbin_nvmem, speedbin);
+ break;
+ case 8:
+ get_krait_bin_format_b(cpu_dev, &speed, &pvs, &pvs_ver,
+ speedbin_nvmem, speedbin);
+ break;
+ default:
+ dev_err(cpu_dev, "Unable to read nvmem data. Defaulting to 0!\n");
+ return -ENODEV;
+ }
+
+ snprintf(*pvs_name, sizeof("speedXX-pvsXX-vXX"), "speed%d-pvs%d-v%d",
+ speed, pvs, pvs_ver);
+
+ drv->versions = (1 << speed);
+
+ kfree(speedbin);
+ return 0;
+}
+
static const struct qcom_cpufreq_match_data match_data_kryo = {
.get_version = qcom_cpufreq_kryo_name_version,
};
+static const struct qcom_cpufreq_match_data match_data_krait = {
+ .get_version = qcom_cpufreq_krait_name_version,
+};
+
static const char *qcs404_genpd_names[] = { "cpr", NULL };
static const struct qcom_cpufreq_match_data match_data_qcs404 = {
@@ -141,6 +264,7 @@ static int qcom_cpufreq_probe(struct platform_device *pdev)
struct nvmem_cell *speedbin_nvmem;
struct device_node *np;
struct device *cpu_dev;
+ char *pvs_name = "speedXX-pvsXX-vXX";
unsigned cpu;
const struct of_device_id *match;
int ret;
@@ -153,7 +277,7 @@ static int qcom_cpufreq_probe(struct platform_device *pdev)
if (!np)
return -ENOENT;
- ret = of_device_is_compatible(np, "operating-points-v2-kryo-cpu");
+ ret = of_device_is_compatible(np, "operating-points-v2-qcom-cpu");
if (!ret) {
of_node_put(np);
return -ENOENT;
@@ -181,7 +305,8 @@ static int qcom_cpufreq_probe(struct platform_device *pdev)
goto free_drv;
}
- ret = drv->data->get_version(cpu_dev, speedbin_nvmem, drv);
+ ret = drv->data->get_version(cpu_dev,
+ speedbin_nvmem, &pvs_name, drv);
if (ret) {
nvmem_cell_put(speedbin_nvmem);
goto free_drv;
@@ -190,12 +315,20 @@ static int qcom_cpufreq_probe(struct platform_device *pdev)
}
of_node_put(np);
- drv->opp_tables = kcalloc(num_possible_cpus(), sizeof(*drv->opp_tables),
+ drv->names_opp_tables = kcalloc(num_possible_cpus(),
+ sizeof(*drv->names_opp_tables),
GFP_KERNEL);
- if (!drv->opp_tables) {
+ if (!drv->names_opp_tables) {
ret = -ENOMEM;
goto free_drv;
}
+ drv->hw_opp_tables = kcalloc(num_possible_cpus(),
+ sizeof(*drv->hw_opp_tables),
+ GFP_KERNEL);
+ if (!drv->hw_opp_tables) {
+ ret = -ENOMEM;
+ goto free_opp_names;
+ }
drv->genpd_opp_tables = kcalloc(num_possible_cpus(),
sizeof(*drv->genpd_opp_tables),
@@ -213,11 +346,23 @@ static int qcom_cpufreq_probe(struct platform_device *pdev)
}
if (drv->data->get_version) {
- drv->opp_tables[cpu] =
- dev_pm_opp_set_supported_hw(cpu_dev,
- &drv->versions, 1);
- if (IS_ERR(drv->opp_tables[cpu])) {
- ret = PTR_ERR(drv->opp_tables[cpu]);
+
+ if (pvs_name) {
+ drv->names_opp_tables[cpu] = dev_pm_opp_set_prop_name(
+ cpu_dev,
+ pvs_name);
+ if (IS_ERR(drv->names_opp_tables[cpu])) {
+ ret = PTR_ERR(drv->names_opp_tables[cpu]);
+ dev_err(cpu_dev, "Failed to add OPP name %s\n",
+ pvs_name);
+ goto free_opp;
+ }
+ }
+
+ drv->hw_opp_tables[cpu] = dev_pm_opp_set_supported_hw(
+ cpu_dev, &drv->versions, 1);
+ if (IS_ERR(drv->hw_opp_tables[cpu])) {
+ ret = PTR_ERR(drv->hw_opp_tables[cpu]);
dev_err(cpu_dev,
"Failed to set supported hardware\n");
goto free_genpd_opp;
@@ -259,11 +404,18 @@ free_genpd_opp:
kfree(drv->genpd_opp_tables);
free_opp:
for_each_possible_cpu(cpu) {
- if (IS_ERR_OR_NULL(drv->opp_tables[cpu]))
+ if (IS_ERR_OR_NULL(drv->names_opp_tables[cpu]))
+ break;
+ dev_pm_opp_put_prop_name(drv->names_opp_tables[cpu]);
+ }
+ for_each_possible_cpu(cpu) {
+ if (IS_ERR_OR_NULL(drv->hw_opp_tables[cpu]))
break;
- dev_pm_opp_put_supported_hw(drv->opp_tables[cpu]);
+ dev_pm_opp_put_supported_hw(drv->hw_opp_tables[cpu]);
}
- kfree(drv->opp_tables);
+ kfree(drv->hw_opp_tables);
+free_opp_names:
+ kfree(drv->names_opp_tables);
free_drv:
kfree(drv);
@@ -278,13 +430,16 @@ static int qcom_cpufreq_remove(struct platform_device *pdev)
platform_device_unregister(cpufreq_dt_pdev);
for_each_possible_cpu(cpu) {
- if (drv->opp_tables[cpu])
- dev_pm_opp_put_supported_hw(drv->opp_tables[cpu]);
+ if (drv->names_opp_tables[cpu])
+ dev_pm_opp_put_supported_hw(drv->names_opp_tables[cpu]);
+ if (drv->hw_opp_tables[cpu])
+ dev_pm_opp_put_supported_hw(drv->hw_opp_tables[cpu]);
if (drv->genpd_opp_tables[cpu])
dev_pm_opp_detach_genpd(drv->genpd_opp_tables[cpu]);
}
- kfree(drv->opp_tables);
+ kfree(drv->names_opp_tables);
+ kfree(drv->hw_opp_tables);
kfree(drv->genpd_opp_tables);
kfree(drv);
@@ -303,6 +458,10 @@ static const struct of_device_id qcom_cpufreq_match_list[] __initconst = {
{ .compatible = "qcom,apq8096", .data = &match_data_kryo },
{ .compatible = "qcom,msm8996", .data = &match_data_kryo },
{ .compatible = "qcom,qcs404", .data = &match_data_qcs404 },
+ { .compatible = "qcom,ipq8064", .data = &match_data_krait },
+ { .compatible = "qcom,apq8064", .data = &match_data_krait },
+ { .compatible = "qcom,msm8974", .data = &match_data_krait },
+ { .compatible = "qcom,msm8960", .data = &match_data_krait },
{},
};
diff --git a/drivers/cpufreq/ti-cpufreq.c b/drivers/cpufreq/ti-cpufreq.c
index 557cb513bf7f..ab0de27539ad 100644
--- a/drivers/cpufreq/ti-cpufreq.c
+++ b/drivers/cpufreq/ti-cpufreq.c
@@ -25,11 +25,14 @@
#define DRA7_EFUSE_HAS_OD_MPU_OPP 11
#define DRA7_EFUSE_HAS_HIGH_MPU_OPP 15
+#define DRA76_EFUSE_HAS_PLUS_MPU_OPP 18
#define DRA7_EFUSE_HAS_ALL_MPU_OPP 23
+#define DRA76_EFUSE_HAS_ALL_MPU_OPP 24
#define DRA7_EFUSE_NOM_MPU_OPP BIT(0)
#define DRA7_EFUSE_OD_MPU_OPP BIT(1)
#define DRA7_EFUSE_HIGH_MPU_OPP BIT(2)
+#define DRA76_EFUSE_PLUS_MPU_OPP BIT(3)
#define OMAP3_CONTROL_DEVICE_STATUS 0x4800244C
#define OMAP3_CONTROL_IDCODE 0x4830A204
@@ -80,6 +83,10 @@ static unsigned long dra7_efuse_xlate(struct ti_cpufreq_data *opp_data,
*/
switch (efuse) {
+ case DRA76_EFUSE_HAS_PLUS_MPU_OPP:
+ case DRA76_EFUSE_HAS_ALL_MPU_OPP:
+ calculated_efuse |= DRA76_EFUSE_PLUS_MPU_OPP;
+ /* Fall through */
case DRA7_EFUSE_HAS_ALL_MPU_OPP:
case DRA7_EFUSE_HAS_HIGH_MPU_OPP:
calculated_efuse |= DRA7_EFUSE_HIGH_MPU_OPP;
diff --git a/drivers/cpuidle/cpuidle-haltpoll.c b/drivers/cpuidle/cpuidle-haltpoll.c
index b0ce9bc78113..db124bc1ca2c 100644
--- a/drivers/cpuidle/cpuidle-haltpoll.c
+++ b/drivers/cpuidle/cpuidle-haltpoll.c
@@ -18,6 +18,10 @@
#include <linux/kvm_para.h>
#include <linux/cpuidle_haltpoll.h>
+static bool force __read_mostly;
+module_param(force, bool, 0444);
+MODULE_PARM_DESC(force, "Load unconditionally");
+
static struct cpuidle_device __percpu *haltpoll_cpuidle_devices;
static enum cpuhp_state haltpoll_hp_state;
@@ -90,6 +94,11 @@ static void haltpoll_uninit(void)
haltpoll_cpuidle_devices = NULL;
}
+static bool haltpool_want(void)
+{
+ return kvm_para_has_hint(KVM_HINTS_REALTIME) || force;
+}
+
static int __init haltpoll_init(void)
{
int ret;
@@ -101,8 +110,7 @@ static int __init haltpoll_init(void)
cpuidle_poll_state_init(drv);
- if (!kvm_para_available() ||
- !kvm_para_has_hint(KVM_HINTS_REALTIME))
+ if (!kvm_para_available() || !haltpool_want())
return -ENODEV;
ret = cpuidle_register_driver(drv);
diff --git a/drivers/cpuidle/cpuidle-psci.c b/drivers/cpuidle/cpuidle-psci.c
index edd7a54ef0d3..bae9140a65a5 100644
--- a/drivers/cpuidle/cpuidle-psci.c
+++ b/drivers/cpuidle/cpuidle-psci.c
@@ -160,6 +160,29 @@ int __init psci_dt_parse_state_node(struct device_node *np, u32 *state)
return 0;
}
+static int __init psci_dt_cpu_init_topology(struct cpuidle_driver *drv,
+ struct psci_cpuidle_data *data,
+ unsigned int state_count, int cpu)
+{
+ /* Currently limit the hierarchical topology to be used in OSI mode. */
+ if (!psci_has_osi_support())
+ return 0;
+
+ data->dev = psci_dt_attach_cpu(cpu);
+ if (IS_ERR_OR_NULL(data->dev))
+ return PTR_ERR_OR_ZERO(data->dev);
+
+ /*
+ * Using the deepest state for the CPU to trigger a potential selection
+ * of a shared state for the domain, assumes the domain states are all
+ * deeper states.
+ */
+ drv->states[state_count - 1].enter = psci_enter_domain_idle_state;
+ psci_cpuidle_use_cpuhp = true;
+
+ return 0;
+}
+
static int __init psci_dt_cpu_init_idle(struct cpuidle_driver *drv,
struct device_node *cpu_node,
unsigned int state_count, int cpu)
@@ -193,25 +216,10 @@ static int __init psci_dt_cpu_init_idle(struct cpuidle_driver *drv,
goto free_mem;
}
- /* Currently limit the hierarchical topology to be used in OSI mode. */
- if (psci_has_osi_support()) {
- data->dev = psci_dt_attach_cpu(cpu);
- if (IS_ERR(data->dev)) {
- ret = PTR_ERR(data->dev);
- goto free_mem;
- }
-
- /*
- * Using the deepest state for the CPU to trigger a potential
- * selection of a shared state for the domain, assumes the
- * domain states are all deeper states.
- */
- if (data->dev) {
- drv->states[state_count - 1].enter =
- psci_enter_domain_idle_state;
- psci_cpuidle_use_cpuhp = true;
- }
- }
+ /* Initialize optional data, used for the hierarchical topology. */
+ ret = psci_dt_cpu_init_topology(drv, data, state_count, cpu);
+ if (ret < 0)
+ goto free_mem;
/* Idle states parsed correctly, store them in the per-cpu struct. */
data->psci_states = psci_states;
diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c
index de81298051b3..c149d9e20dfd 100644
--- a/drivers/cpuidle/cpuidle.c
+++ b/drivers/cpuidle/cpuidle.c
@@ -736,53 +736,15 @@ int cpuidle_register(struct cpuidle_driver *drv,
}
EXPORT_SYMBOL_GPL(cpuidle_register);
-#ifdef CONFIG_SMP
-
-/*
- * This function gets called when a part of the kernel has a new latency
- * requirement. This means we need to get all processors out of their C-state,
- * and then recalculate a new suitable C-state. Just do a cross-cpu IPI; that
- * wakes them all right up.
- */
-static int cpuidle_latency_notify(struct notifier_block *b,
- unsigned long l, void *v)
-{
- wake_up_all_idle_cpus();
- return NOTIFY_OK;
-}
-
-static struct notifier_block cpuidle_latency_notifier = {
- .notifier_call = cpuidle_latency_notify,
-};
-
-static inline void latency_notifier_init(struct notifier_block *n)
-{
- pm_qos_add_notifier(PM_QOS_CPU_DMA_LATENCY, n);
-}
-
-#else /* CONFIG_SMP */
-
-#define latency_notifier_init(x) do { } while (0)
-
-#endif /* CONFIG_SMP */
-
/**
* cpuidle_init - core initializer
*/
static int __init cpuidle_init(void)
{
- int ret;
-
if (cpuidle_disabled())
return -ENODEV;
- ret = cpuidle_add_interface(cpu_subsys.dev_root);
- if (ret)
- return ret;
-
- latency_notifier_init(&cpuidle_latency_notifier);
-
- return 0;
+ return cpuidle_add_interface(cpu_subsys.dev_root);
}
module_param(off, int, 0444);
diff --git a/drivers/cpuidle/governor.c b/drivers/cpuidle/governor.c
index e48271e117a3..29acaf48e575 100644
--- a/drivers/cpuidle/governor.c
+++ b/drivers/cpuidle/governor.c
@@ -109,9 +109,9 @@ int cpuidle_register_governor(struct cpuidle_governor *gov)
*/
s64 cpuidle_governor_latency_req(unsigned int cpu)
{
- int global_req = pm_qos_request(PM_QOS_CPU_DMA_LATENCY);
struct device *device = get_cpu_device(cpu);
int device_req = dev_pm_qos_raw_resume_latency(device);
+ int global_req = cpu_latency_qos_limit();
if (device_req > global_req)
device_req = global_req;
diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
index 7dcf2093e531..6fecd11dafdd 100644
--- a/drivers/devfreq/devfreq.c
+++ b/drivers/devfreq/devfreq.c
@@ -550,14 +550,14 @@ out:
EXPORT_SYMBOL(devfreq_monitor_resume);
/**
- * devfreq_interval_update() - Update device devfreq monitoring interval
+ * devfreq_update_interval() - Update device devfreq monitoring interval
* @devfreq: the devfreq instance.
* @delay: new polling interval to be set.
*
* Helper function to set new load monitoring polling interval. Function
- * to be called from governor in response to DEVFREQ_GOV_INTERVAL event.
+ * to be called from governor in response to DEVFREQ_GOV_UPDATE_INTERVAL event.
*/
-void devfreq_interval_update(struct devfreq *devfreq, unsigned int *delay)
+void devfreq_update_interval(struct devfreq *devfreq, unsigned int *delay)
{
unsigned int cur_delay = devfreq->profile->polling_ms;
unsigned int new_delay = *delay;
@@ -597,7 +597,7 @@ void devfreq_interval_update(struct devfreq *devfreq, unsigned int *delay)
out:
mutex_unlock(&devfreq->lock);
}
-EXPORT_SYMBOL(devfreq_interval_update);
+EXPORT_SYMBOL(devfreq_update_interval);
/**
* devfreq_notifier_call() - Notify that the device frequency requirements
@@ -705,13 +705,13 @@ static void devfreq_dev_release(struct device *dev)
if (dev_pm_qos_request_active(&devfreq->user_max_freq_req)) {
err = dev_pm_qos_remove_request(&devfreq->user_max_freq_req);
- if (err)
+ if (err < 0)
dev_warn(dev->parent,
"Failed to remove max_freq request: %d\n", err);
}
if (dev_pm_qos_request_active(&devfreq->user_min_freq_req)) {
err = dev_pm_qos_remove_request(&devfreq->user_min_freq_req);
- if (err)
+ if (err < 0)
dev_warn(dev->parent,
"Failed to remove min_freq request: %d\n", err);
}
@@ -1424,7 +1424,7 @@ static ssize_t polling_interval_store(struct device *dev,
if (ret != 1)
return -EINVAL;
- df->governor->event_handler(df, DEVFREQ_GOV_INTERVAL, &value);
+ df->governor->event_handler(df, DEVFREQ_GOV_UPDATE_INTERVAL, &value);
ret = count;
return ret;
diff --git a/drivers/devfreq/governor.h b/drivers/devfreq/governor.h
index dc7533ccc3db..ae4d0cc18359 100644
--- a/drivers/devfreq/governor.h
+++ b/drivers/devfreq/governor.h
@@ -18,7 +18,7 @@
/* Devfreq events */
#define DEVFREQ_GOV_START 0x1
#define DEVFREQ_GOV_STOP 0x2
-#define DEVFREQ_GOV_INTERVAL 0x3
+#define DEVFREQ_GOV_UPDATE_INTERVAL 0x3
#define DEVFREQ_GOV_SUSPEND 0x4
#define DEVFREQ_GOV_RESUME 0x5
@@ -30,7 +30,7 @@
* @node: list node - contains registered devfreq governors
* @name: Governor's name
* @immutable: Immutable flag for governor. If the value is 1,
- * this govenror is never changeable to other governor.
+ * this governor is never changeable to other governor.
* @interrupt_driven: Devfreq core won't schedule polling work for this
* governor if value is set to 1.
* @get_target_freq: Returns desired operating frequency for the device.
@@ -57,17 +57,16 @@ struct devfreq_governor {
unsigned int event, void *data);
};
-extern void devfreq_monitor_start(struct devfreq *devfreq);
-extern void devfreq_monitor_stop(struct devfreq *devfreq);
-extern void devfreq_monitor_suspend(struct devfreq *devfreq);
-extern void devfreq_monitor_resume(struct devfreq *devfreq);
-extern void devfreq_interval_update(struct devfreq *devfreq,
- unsigned int *delay);
+void devfreq_monitor_start(struct devfreq *devfreq);
+void devfreq_monitor_stop(struct devfreq *devfreq);
+void devfreq_monitor_suspend(struct devfreq *devfreq);
+void devfreq_monitor_resume(struct devfreq *devfreq);
+void devfreq_update_interval(struct devfreq *devfreq, unsigned int *delay);
-extern int devfreq_add_governor(struct devfreq_governor *governor);
-extern int devfreq_remove_governor(struct devfreq_governor *governor);
+int devfreq_add_governor(struct devfreq_governor *governor);
+int devfreq_remove_governor(struct devfreq_governor *governor);
-extern int devfreq_update_status(struct devfreq *devfreq, unsigned long freq);
+int devfreq_update_status(struct devfreq *devfreq, unsigned long freq);
static inline int devfreq_update_stats(struct devfreq *df)
{
diff --git a/drivers/devfreq/governor_simpleondemand.c b/drivers/devfreq/governor_simpleondemand.c
index 3d809f228619..1b314e1df028 100644
--- a/drivers/devfreq/governor_simpleondemand.c
+++ b/drivers/devfreq/governor_simpleondemand.c
@@ -96,8 +96,8 @@ static int devfreq_simple_ondemand_handler(struct devfreq *devfreq,
devfreq_monitor_stop(devfreq);
break;
- case DEVFREQ_GOV_INTERVAL:
- devfreq_interval_update(devfreq, (unsigned int *)data);
+ case DEVFREQ_GOV_UPDATE_INTERVAL:
+ devfreq_update_interval(devfreq, (unsigned int *)data);
break;
case DEVFREQ_GOV_SUSPEND:
diff --git a/drivers/devfreq/governor_userspace.c b/drivers/devfreq/governor_userspace.c
index af94942fcf95..0fd6c4851071 100644
--- a/drivers/devfreq/governor_userspace.c
+++ b/drivers/devfreq/governor_userspace.c
@@ -131,7 +131,7 @@ static int devfreq_userspace_handler(struct devfreq *devfreq,
}
static struct devfreq_governor devfreq_userspace = {
- .name = "userspace",
+ .name = DEVFREQ_GOV_USERSPACE,
.get_target_freq = devfreq_userspace_func,
.event_handler = devfreq_userspace_handler,
};
diff --git a/drivers/devfreq/tegra30-devfreq.c b/drivers/devfreq/tegra30-devfreq.c
index 0b65f89d74d5..28b2c7ca416e 100644
--- a/drivers/devfreq/tegra30-devfreq.c
+++ b/drivers/devfreq/tegra30-devfreq.c
@@ -734,7 +734,7 @@ static int tegra_governor_event_handler(struct devfreq *devfreq,
devfreq_monitor_stop(devfreq);
break;
- case DEVFREQ_GOV_INTERVAL:
+ case DEVFREQ_GOV_UPDATE_INTERVAL:
/*
* ACTMON hardware supports up to 256 milliseconds for the
* sampling period.
@@ -745,7 +745,7 @@ static int tegra_governor_event_handler(struct devfreq *devfreq,
}
tegra_actmon_pause(tegra);
- devfreq_interval_update(devfreq, new_delay);
+ devfreq_update_interval(devfreq, new_delay);
ret = tegra_actmon_resume(tegra);
break;
diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c
index c7424e2a04a3..208457005a11 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.c
+++ b/drivers/gpu/drm/i915/display/intel_dp.c
@@ -1360,7 +1360,7 @@ intel_dp_aux_xfer(struct intel_dp *intel_dp,
* lowest possible wakeup latency and so prevent the cpu from going into
* deep sleep states.
*/
- pm_qos_update_request(&i915->pm_qos, 0);
+ cpu_latency_qos_update_request(&i915->pm_qos, 0);
intel_dp_check_edp(intel_dp);
@@ -1488,7 +1488,7 @@ done:
ret = recv_bytes;
out:
- pm_qos_update_request(&i915->pm_qos, PM_QOS_DEFAULT_VALUE);
+ cpu_latency_qos_update_request(&i915->pm_qos, PM_QOS_DEFAULT_VALUE);
if (vdd)
edp_panel_vdd_off(intel_dp, false);
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 8410330ce4f0..eec1d6a6aa22 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -505,8 +505,7 @@ static int i915_driver_early_probe(struct drm_i915_private *dev_priv)
mutex_init(&dev_priv->backlight_lock);
mutex_init(&dev_priv->sb_lock);
- pm_qos_add_request(&dev_priv->sb_qos,
- PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
+ cpu_latency_qos_add_request(&dev_priv->sb_qos, PM_QOS_DEFAULT_VALUE);
mutex_init(&dev_priv->av_mutex);
mutex_init(&dev_priv->wm.wm_mutex);
@@ -571,7 +570,7 @@ static void i915_driver_late_release(struct drm_i915_private *dev_priv)
vlv_free_s0ix_state(dev_priv);
i915_workqueues_cleanup(dev_priv);
- pm_qos_remove_request(&dev_priv->sb_qos);
+ cpu_latency_qos_remove_request(&dev_priv->sb_qos);
mutex_destroy(&dev_priv->sb_lock);
}
@@ -1229,8 +1228,7 @@ static int i915_driver_hw_probe(struct drm_i915_private *dev_priv)
}
}
- pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY,
- PM_QOS_DEFAULT_VALUE);
+ cpu_latency_qos_add_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE);
intel_gt_init_workarounds(dev_priv);
@@ -1276,7 +1274,7 @@ static int i915_driver_hw_probe(struct drm_i915_private *dev_priv)
err_msi:
if (pdev->msi_enabled)
pci_disable_msi(pdev);
- pm_qos_remove_request(&dev_priv->pm_qos);
+ cpu_latency_qos_remove_request(&dev_priv->pm_qos);
err_mem_regions:
intel_memory_regions_driver_release(dev_priv);
err_ggtt:
@@ -1299,7 +1297,7 @@ static void i915_driver_hw_remove(struct drm_i915_private *dev_priv)
if (pdev->msi_enabled)
pci_disable_msi(pdev);
- pm_qos_remove_request(&dev_priv->pm_qos);
+ cpu_latency_qos_remove_request(&dev_priv->pm_qos);
}
/**
diff --git a/drivers/gpu/drm/i915/intel_sideband.c b/drivers/gpu/drm/i915/intel_sideband.c
index cbfb7171d62d..0648eda309e4 100644
--- a/drivers/gpu/drm/i915/intel_sideband.c
+++ b/drivers/gpu/drm/i915/intel_sideband.c
@@ -60,7 +60,7 @@ static void __vlv_punit_get(struct drm_i915_private *i915)
* to the Valleyview P-unit and not all sideband communications.
*/
if (IS_VALLEYVIEW(i915)) {
- pm_qos_update_request(&i915->sb_qos, 0);
+ cpu_latency_qos_update_request(&i915->sb_qos, 0);
on_each_cpu(ping, NULL, 1);
}
}
@@ -68,7 +68,8 @@ static void __vlv_punit_get(struct drm_i915_private *i915)
static void __vlv_punit_put(struct drm_i915_private *i915)
{
if (IS_VALLEYVIEW(i915))
- pm_qos_update_request(&i915->sb_qos, PM_QOS_DEFAULT_VALUE);
+ cpu_latency_qos_update_request(&i915->sb_qos,
+ PM_QOS_DEFAULT_VALUE);
iosf_mbi_punit_release();
}
diff --git a/drivers/hsi/clients/cmt_speech.c b/drivers/hsi/clients/cmt_speech.c
index 9eec970cdfa5..89869c66fb9d 100644
--- a/drivers/hsi/clients/cmt_speech.c
+++ b/drivers/hsi/clients/cmt_speech.c
@@ -965,14 +965,13 @@ static int cs_hsi_buf_config(struct cs_hsi_iface *hi,
if (old_state != hi->iface_state) {
if (hi->iface_state == CS_STATE_CONFIGURED) {
- pm_qos_add_request(&hi->pm_qos_req,
- PM_QOS_CPU_DMA_LATENCY,
+ cpu_latency_qos_add_request(&hi->pm_qos_req,
CS_QOS_LATENCY_FOR_DATA_USEC);
local_bh_disable();
cs_hsi_read_on_data(hi);
local_bh_enable();
} else if (old_state == CS_STATE_CONFIGURED) {
- pm_qos_remove_request(&hi->pm_qos_req);
+ cpu_latency_qos_remove_request(&hi->pm_qos_req);
}
}
return r;
@@ -1075,8 +1074,8 @@ static void cs_hsi_stop(struct cs_hsi_iface *hi)
WARN_ON(!cs_state_idle(hi->control_state));
WARN_ON(!cs_state_idle(hi->data_state));
- if (pm_qos_request_active(&hi->pm_qos_req))
- pm_qos_remove_request(&hi->pm_qos_req);
+ if (cpu_latency_qos_request_active(&hi->pm_qos_req))
+ cpu_latency_qos_remove_request(&hi->pm_qos_req);
spin_lock_bh(&hi->lock);
cs_hsi_free_data(hi);
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
index d55606608ac8..65b84d5dc457 100644
--- a/drivers/idle/intel_idle.c
+++ b/drivers/idle/intel_idle.c
@@ -2,8 +2,9 @@
/*
* intel_idle.c - native hardware idle loop for modern Intel processors
*
- * Copyright (c) 2013, Intel Corporation.
+ * Copyright (c) 2013 - 2020, Intel Corporation.
* Len Brown <len.brown@intel.com>
+ * Rafael J. Wysocki <rafael.j.wysocki@intel.com>
*/
/*
@@ -25,11 +26,6 @@
/*
* Known limitations
*
- * The driver currently initializes for_each_online_cpu() upon modprobe.
- * It it unaware of subsequent processors hot-added to the system.
- * This means that if you boot with maxcpus=n and later online
- * processors above n, those processors will use C1 only.
- *
* ACPI has a .suspend hack to turn off deep c-statees during suspend
* to avoid complications with the lapic timer workaround.
* Have not seen issues with suspend, but may need same workaround here.
@@ -55,7 +51,7 @@
#include <asm/mwait.h>
#include <asm/msr.h>
-#define INTEL_IDLE_VERSION "0.4.1"
+#define INTEL_IDLE_VERSION "0.5.1"
static struct cpuidle_driver intel_idle_driver = {
.name = "intel_idle",
@@ -65,11 +61,12 @@ static struct cpuidle_driver intel_idle_driver = {
static int max_cstate = CPUIDLE_STATE_MAX - 1;
static unsigned int disabled_states_mask;
-static unsigned int mwait_substates;
+static struct cpuidle_device __percpu *intel_idle_cpuidle_devices;
+
+static unsigned long auto_demotion_disable_flags;
+static bool disable_promotion_to_c1e;
-#define LAPIC_TIMER_ALWAYS_RELIABLE 0xFFFFFFFF
-/* Reliable LAPIC Timer States, bit 1 for C1 etc. */
-static unsigned int lapic_timer_reliable_states = (1 << 1); /* Default to only C1 */
+static bool lapic_timer_always_reliable;
struct idle_cpu {
struct cpuidle_state *state_table;
@@ -84,13 +81,10 @@ struct idle_cpu {
bool use_acpi;
};
-static const struct idle_cpu *icpu;
-static struct cpuidle_device __percpu *intel_idle_cpuidle_devices;
-static int intel_idle(struct cpuidle_device *dev,
- struct cpuidle_driver *drv, int index);
-static void intel_idle_s2idle(struct cpuidle_device *dev,
- struct cpuidle_driver *drv, int index);
-static struct cpuidle_state *cpuidle_state_table;
+static const struct idle_cpu *icpu __initdata;
+static struct cpuidle_state *cpuidle_state_table __initdata;
+
+static unsigned int mwait_substates __initdata;
/*
* Enable this state by default even if the ACPI _CST does not list it.
@@ -103,7 +97,7 @@ static struct cpuidle_state *cpuidle_state_table;
* If this flag is set, SW flushes the TLB, so even if the
* HW doesn't do the flushing, this flag is safe to use.
*/
-#define CPUIDLE_FLAG_TLB_FLUSHED 0x10000
+#define CPUIDLE_FLAG_TLB_FLUSHED BIT(16)
/*
* MWAIT takes an 8-bit "hint" in EAX "suggesting"
@@ -115,12 +109,87 @@ static struct cpuidle_state *cpuidle_state_table;
#define flg2MWAIT(flags) (((flags) >> 24) & 0xFF)
#define MWAIT2flg(eax) ((eax & 0xFF) << 24)
+/**
+ * intel_idle - Ask the processor to enter the given idle state.
+ * @dev: cpuidle device of the target CPU.
+ * @drv: cpuidle driver (assumed to point to intel_idle_driver).
+ * @index: Target idle state index.
+ *
+ * Use the MWAIT instruction to notify the processor that the CPU represented by
+ * @dev is idle and it can try to enter the idle state corresponding to @index.
+ *
+ * If the local APIC timer is not known to be reliable in the target idle state,
+ * enable one-shot tick broadcasting for the target CPU before executing MWAIT.
+ *
+ * Optionally call leave_mm() for the target CPU upfront to avoid wakeups due to
+ * flushing user TLBs.
+ *
+ * Must be called under local_irq_disable().
+ */
+static __cpuidle int intel_idle(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv, int index)
+{
+ struct cpuidle_state *state = &drv->states[index];
+ unsigned long eax = flg2MWAIT(state->flags);
+ unsigned long ecx = 1; /* break on interrupt flag */
+ bool uninitialized_var(tick);
+ int cpu = smp_processor_id();
+
+ /*
+ * leave_mm() to avoid costly and often unnecessary wakeups
+ * for flushing the user TLB's associated with the active mm.
+ */
+ if (state->flags & CPUIDLE_FLAG_TLB_FLUSHED)
+ leave_mm(cpu);
+
+ if (!static_cpu_has(X86_FEATURE_ARAT) && !lapic_timer_always_reliable) {
+ /*
+ * Switch over to one-shot tick broadcast if the target C-state
+ * is deeper than C1.
+ */
+ if ((eax >> MWAIT_SUBSTATE_SIZE) & MWAIT_CSTATE_MASK) {
+ tick = true;
+ tick_broadcast_enter();
+ } else {
+ tick = false;
+ }
+ }
+
+ mwait_idle_with_hints(eax, ecx);
+
+ if (!static_cpu_has(X86_FEATURE_ARAT) && tick)
+ tick_broadcast_exit();
+
+ return index;
+}
+
+/**
+ * intel_idle_s2idle - Ask the processor to enter the given idle state.
+ * @dev: cpuidle device of the target CPU.
+ * @drv: cpuidle driver (assumed to point to intel_idle_driver).
+ * @index: Target idle state index.
+ *
+ * Use the MWAIT instruction to notify the processor that the CPU represented by
+ * @dev is idle and it can try to enter the idle state corresponding to @index.
+ *
+ * Invoked as a suspend-to-idle callback routine with frozen user space, frozen
+ * scheduler tick and suspended scheduler clock on the target CPU.
+ */
+static __cpuidle void intel_idle_s2idle(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv, int index)
+{
+ unsigned long eax = flg2MWAIT(drv->states[index].flags);
+ unsigned long ecx = 1; /* break on interrupt flag */
+
+ mwait_idle_with_hints(eax, ecx);
+}
+
/*
* States are indexed by the cstate number,
* which is also the index into the MWAIT hint array.
* Thus C0 is a dummy.
*/
-static struct cpuidle_state nehalem_cstates[] = {
+static struct cpuidle_state nehalem_cstates[] __initdata = {
{
.name = "C1",
.desc = "MWAIT 0x00",
@@ -157,7 +226,7 @@ static struct cpuidle_state nehalem_cstates[] = {
.enter = NULL }
};
-static struct cpuidle_state snb_cstates[] = {
+static struct cpuidle_state snb_cstates[] __initdata = {
{
.name = "C1",
.desc = "MWAIT 0x00",
@@ -202,7 +271,7 @@ static struct cpuidle_state snb_cstates[] = {
.enter = NULL }
};
-static struct cpuidle_state byt_cstates[] = {
+static struct cpuidle_state byt_cstates[] __initdata = {
{
.name = "C1",
.desc = "MWAIT 0x00",
@@ -247,7 +316,7 @@ static struct cpuidle_state byt_cstates[] = {
.enter = NULL }
};
-static struct cpuidle_state cht_cstates[] = {
+static struct cpuidle_state cht_cstates[] __initdata = {
{
.name = "C1",
.desc = "MWAIT 0x00",
@@ -292,7 +361,7 @@ static struct cpuidle_state cht_cstates[] = {
.enter = NULL }
};
-static struct cpuidle_state ivb_cstates[] = {
+static struct cpuidle_state ivb_cstates[] __initdata = {
{
.name = "C1",
.desc = "MWAIT 0x00",
@@ -337,7 +406,7 @@ static struct cpuidle_state ivb_cstates[] = {
.enter = NULL }
};
-static struct cpuidle_state ivt_cstates[] = {
+static struct cpuidle_state ivt_cstates[] __initdata = {
{
.name = "C1",
.desc = "MWAIT 0x00",
@@ -374,7 +443,7 @@ static struct cpuidle_state ivt_cstates[] = {
.enter = NULL }
};
-static struct cpuidle_state ivt_cstates_4s[] = {
+static struct cpuidle_state ivt_cstates_4s[] __initdata = {
{
.name = "C1",
.desc = "MWAIT 0x00",
@@ -411,7 +480,7 @@ static struct cpuidle_state ivt_cstates_4s[] = {
.enter = NULL }
};
-static struct cpuidle_state ivt_cstates_8s[] = {
+static struct cpuidle_state ivt_cstates_8s[] __initdata = {
{
.name = "C1",
.desc = "MWAIT 0x00",
@@ -448,7 +517,7 @@ static struct cpuidle_state ivt_cstates_8s[] = {
.enter = NULL }
};
-static struct cpuidle_state hsw_cstates[] = {
+static struct cpuidle_state hsw_cstates[] __initdata = {
{
.name = "C1",
.desc = "MWAIT 0x00",
@@ -516,7 +585,7 @@ static struct cpuidle_state hsw_cstates[] = {
{
.enter = NULL }
};
-static struct cpuidle_state bdw_cstates[] = {
+static struct cpuidle_state bdw_cstates[] __initdata = {
{
.name = "C1",
.desc = "MWAIT 0x00",
@@ -585,7 +654,7 @@ static struct cpuidle_state bdw_cstates[] = {
.enter = NULL }
};
-static struct cpuidle_state skl_cstates[] = {
+static struct cpuidle_state skl_cstates[] __initdata = {
{
.name = "C1",
.desc = "MWAIT 0x00",
@@ -654,7 +723,7 @@ static struct cpuidle_state skl_cstates[] = {
.enter = NULL }
};
-static struct cpuidle_state skx_cstates[] = {
+static struct cpuidle_state skx_cstates[] __initdata = {
{
.name = "C1",
.desc = "MWAIT 0x00",
@@ -683,7 +752,7 @@ static struct cpuidle_state skx_cstates[] = {
.enter = NULL }
};
-static struct cpuidle_state atom_cstates[] = {
+static struct cpuidle_state atom_cstates[] __initdata = {
{
.name = "C1E",
.desc = "MWAIT 0x00",
@@ -719,7 +788,7 @@ static struct cpuidle_state atom_cstates[] = {
{
.enter = NULL }
};
-static struct cpuidle_state tangier_cstates[] = {
+static struct cpuidle_state tangier_cstates[] __initdata = {
{
.name = "C1",
.desc = "MWAIT 0x00",
@@ -763,7 +832,7 @@ static struct cpuidle_state tangier_cstates[] = {
{
.enter = NULL }
};
-static struct cpuidle_state avn_cstates[] = {
+static struct cpuidle_state avn_cstates[] __initdata = {
{
.name = "C1",
.desc = "MWAIT 0x00",
@@ -783,7 +852,7 @@ static struct cpuidle_state avn_cstates[] = {
{
.enter = NULL }
};
-static struct cpuidle_state knl_cstates[] = {
+static struct cpuidle_state knl_cstates[] __initdata = {
{
.name = "C1",
.desc = "MWAIT 0x00",
@@ -804,7 +873,7 @@ static struct cpuidle_state knl_cstates[] = {
.enter = NULL }
};
-static struct cpuidle_state bxt_cstates[] = {
+static struct cpuidle_state bxt_cstates[] __initdata = {
{
.name = "C1",
.desc = "MWAIT 0x00",
@@ -865,7 +934,7 @@ static struct cpuidle_state bxt_cstates[] = {
.enter = NULL }
};
-static struct cpuidle_state dnv_cstates[] = {
+static struct cpuidle_state dnv_cstates[] __initdata = {
{
.name = "C1",
.desc = "MWAIT 0x00",
@@ -894,174 +963,116 @@ static struct cpuidle_state dnv_cstates[] = {
.enter = NULL }
};
-/**
- * intel_idle
- * @dev: cpuidle_device
- * @drv: cpuidle driver
- * @index: index of cpuidle state
- *
- * Must be called under local_irq_disable().
- */
-static __cpuidle int intel_idle(struct cpuidle_device *dev,
- struct cpuidle_driver *drv, int index)
-{
- unsigned long ecx = 1; /* break on interrupt flag */
- struct cpuidle_state *state = &drv->states[index];
- unsigned long eax = flg2MWAIT(state->flags);
- unsigned int cstate;
- bool uninitialized_var(tick);
- int cpu = smp_processor_id();
-
- /*
- * leave_mm() to avoid costly and often unnecessary wakeups
- * for flushing the user TLB's associated with the active mm.
- */
- if (state->flags & CPUIDLE_FLAG_TLB_FLUSHED)
- leave_mm(cpu);
-
- if (!static_cpu_has(X86_FEATURE_ARAT)) {
- cstate = (((eax) >> MWAIT_SUBSTATE_SIZE) &
- MWAIT_CSTATE_MASK) + 1;
- tick = false;
- if (!(lapic_timer_reliable_states & (1 << (cstate)))) {
- tick = true;
- tick_broadcast_enter();
- }
- }
-
- mwait_idle_with_hints(eax, ecx);
-
- if (!static_cpu_has(X86_FEATURE_ARAT) && tick)
- tick_broadcast_exit();
-
- return index;
-}
-
-/**
- * intel_idle_s2idle - simplified "enter" callback routine for suspend-to-idle
- * @dev: cpuidle_device
- * @drv: cpuidle driver
- * @index: state index
- */
-static void intel_idle_s2idle(struct cpuidle_device *dev,
- struct cpuidle_driver *drv, int index)
-{
- unsigned long ecx = 1; /* break on interrupt flag */
- unsigned long eax = flg2MWAIT(drv->states[index].flags);
-
- mwait_idle_with_hints(eax, ecx);
-}
-
-static const struct idle_cpu idle_cpu_nehalem = {
+static const struct idle_cpu idle_cpu_nehalem __initconst = {
.state_table = nehalem_cstates,
.auto_demotion_disable_flags = NHM_C1_AUTO_DEMOTE | NHM_C3_AUTO_DEMOTE,
.disable_promotion_to_c1e = true,
};
-static const struct idle_cpu idle_cpu_nhx = {
+static const struct idle_cpu idle_cpu_nhx __initconst = {
.state_table = nehalem_cstates,
.auto_demotion_disable_flags = NHM_C1_AUTO_DEMOTE | NHM_C3_AUTO_DEMOTE,
.disable_promotion_to_c1e = true,
.use_acpi = true,
};
-static const struct idle_cpu idle_cpu_atom = {
+static const struct idle_cpu idle_cpu_atom __initconst = {
.state_table = atom_cstates,
};
-static const struct idle_cpu idle_cpu_tangier = {
+static const struct idle_cpu idle_cpu_tangier __initconst = {
.state_table = tangier_cstates,
};
-static const struct idle_cpu idle_cpu_lincroft = {
+static const struct idle_cpu idle_cpu_lincroft __initconst = {
.state_table = atom_cstates,
.auto_demotion_disable_flags = ATM_LNC_C6_AUTO_DEMOTE,
};
-static const struct idle_cpu idle_cpu_snb = {
+static const struct idle_cpu idle_cpu_snb __initconst = {
.state_table = snb_cstates,
.disable_promotion_to_c1e = true,
};
-static const struct idle_cpu idle_cpu_snx = {
+static const struct idle_cpu idle_cpu_snx __initconst = {
.state_table = snb_cstates,
.disable_promotion_to_c1e = true,
.use_acpi = true,
};
-static const struct idle_cpu idle_cpu_byt = {
+static const struct idle_cpu idle_cpu_byt __initconst = {
.state_table = byt_cstates,
.disable_promotion_to_c1e = true,
.byt_auto_demotion_disable_flag = true,
};
-static const struct idle_cpu idle_cpu_cht = {
+static const struct idle_cpu idle_cpu_cht __initconst = {
.state_table = cht_cstates,
.disable_promotion_to_c1e = true,
.byt_auto_demotion_disable_flag = true,
};
-static const struct idle_cpu idle_cpu_ivb = {
+static const struct idle_cpu idle_cpu_ivb __initconst = {
.state_table = ivb_cstates,
.disable_promotion_to_c1e = true,
};
-static const struct idle_cpu idle_cpu_ivt = {
+static const struct idle_cpu idle_cpu_ivt __initconst = {
.state_table = ivt_cstates,
.disable_promotion_to_c1e = true,
.use_acpi = true,
};
-static const struct idle_cpu idle_cpu_hsw = {
+static const struct idle_cpu idle_cpu_hsw __initconst = {
.state_table = hsw_cstates,
.disable_promotion_to_c1e = true,
};
-static const struct idle_cpu idle_cpu_hsx = {
+static const struct idle_cpu idle_cpu_hsx __initconst = {
.state_table = hsw_cstates,
.disable_promotion_to_c1e = true,
.use_acpi = true,
};
-static const struct idle_cpu idle_cpu_bdw = {
+static const struct idle_cpu idle_cpu_bdw __initconst = {
.state_table = bdw_cstates,
.disable_promotion_to_c1e = true,
};
-static const struct idle_cpu idle_cpu_bdx = {
+static const struct idle_cpu idle_cpu_bdx __initconst = {
.state_table = bdw_cstates,
.disable_promotion_to_c1e = true,
.use_acpi = true,
};
-static const struct idle_cpu idle_cpu_skl = {
+static const struct idle_cpu idle_cpu_skl __initconst = {
.state_table = skl_cstates,
.disable_promotion_to_c1e = true,
};
-static const struct idle_cpu idle_cpu_skx = {
+static const struct idle_cpu idle_cpu_skx __initconst = {
.state_table = skx_cstates,
.disable_promotion_to_c1e = true,
.use_acpi = true,
};
-static const struct idle_cpu idle_cpu_avn = {
+static const struct idle_cpu idle_cpu_avn __initconst = {
.state_table = avn_cstates,
.disable_promotion_to_c1e = true,
.use_acpi = true,
};
-static const struct idle_cpu idle_cpu_knl = {
+static const struct idle_cpu idle_cpu_knl __initconst = {
.state_table = knl_cstates,
.use_acpi = true,
};
-static const struct idle_cpu idle_cpu_bxt = {
+static const struct idle_cpu idle_cpu_bxt __initconst = {
.state_table = bxt_cstates,
.disable_promotion_to_c1e = true,
};
-static const struct idle_cpu idle_cpu_dnv = {
+static const struct idle_cpu idle_cpu_dnv __initconst = {
.state_table = dnv_cstates,
.disable_promotion_to_c1e = true,
.use_acpi = true,
@@ -1273,11 +1284,11 @@ static inline void intel_idle_init_cstates_acpi(struct cpuidle_driver *drv) { }
static inline bool intel_idle_off_by_default(u32 mwait_hint) { return false; }
#endif /* !CONFIG_ACPI_PROCESSOR_CSTATE */
-/*
- * ivt_idle_state_table_update(void)
+/**
+ * ivt_idle_state_table_update - Tune the idle states table for Ivy Town.
*
- * Tune IVT multi-socket targets
- * Assumption: num_sockets == (max_package_num + 1)
+ * Tune IVT multi-socket targets.
+ * Assumption: num_sockets == (max_package_num + 1).
*/
static void __init ivt_idle_state_table_update(void)
{
@@ -1323,11 +1334,11 @@ static unsigned long long __init irtl_2_usec(unsigned long long irtl)
return div_u64((irtl & 0x3FF) * ns, NSEC_PER_USEC);
}
-/*
- * bxt_idle_state_table_update(void)
+/**
+ * bxt_idle_state_table_update - Fix up the Broxton idle states table.
*
- * On BXT, we trust the IRTL to show the definitive maximum latency
- * We use the same value for target_residency.
+ * On BXT, trust the IRTL (Interrupt Response Time Limit) MSR to show the
+ * definitive maximum latency and use the same value for target_residency.
*/
static void __init bxt_idle_state_table_update(void)
{
@@ -1370,11 +1381,11 @@ static void __init bxt_idle_state_table_update(void)
}
}
-/*
- * sklh_idle_state_table_update(void)
+
+/**
+ * sklh_idle_state_table_update - Fix up the Sky Lake idle states table.
*
- * On SKL-H (model 0x5e) disable C8 and C9 if:
- * C10 is enabled and SGX disabled
+ * On SKL-H (model 0x5e) skip C8 and C9 if C10 is enabled and SGX disabled.
*/
static void __init sklh_idle_state_table_update(void)
{
@@ -1485,9 +1496,9 @@ static void __init intel_idle_init_cstates_icpu(struct cpuidle_driver *drv)
}
}
-/*
- * intel_idle_cpuidle_driver_init()
- * allocate, initialize cpuidle_states
+/**
+ * intel_idle_cpuidle_driver_init - Create the list of available idle states.
+ * @drv: cpuidle driver structure to initialize.
*/
static void __init intel_idle_cpuidle_driver_init(struct cpuidle_driver *drv)
{
@@ -1509,7 +1520,7 @@ static void auto_demotion_disable(void)
unsigned long long msr_bits;
rdmsrl(MSR_PKG_CST_CONFIG_CONTROL, msr_bits);
- msr_bits &= ~(icpu->auto_demotion_disable_flags);
+ msr_bits &= ~auto_demotion_disable_flags;
wrmsrl(MSR_PKG_CST_CONFIG_CONTROL, msr_bits);
}
@@ -1522,10 +1533,12 @@ static void c1e_promotion_disable(void)
wrmsrl(MSR_IA32_POWER_CTL, msr_bits);
}
-/*
- * intel_idle_cpu_init()
- * allocate, initialize, register cpuidle_devices
- * @cpu: cpu/core to initialize
+/**
+ * intel_idle_cpu_init - Register the target CPU with the cpuidle core.
+ * @cpu: CPU to initialize.
+ *
+ * Register a cpuidle device object for @cpu and update its MSRs in accordance
+ * with the processor model flags.
*/
static int intel_idle_cpu_init(unsigned int cpu)
{
@@ -1539,13 +1552,10 @@ static int intel_idle_cpu_init(unsigned int cpu)
return -EIO;
}
- if (!icpu)
- return 0;
-
- if (icpu->auto_demotion_disable_flags)
+ if (auto_demotion_disable_flags)
auto_demotion_disable();
- if (icpu->disable_promotion_to_c1e)
+ if (disable_promotion_to_c1e)
c1e_promotion_disable();
return 0;
@@ -1555,7 +1565,7 @@ static int intel_idle_cpu_online(unsigned int cpu)
{
struct cpuidle_device *dev;
- if (lapic_timer_reliable_states != LAPIC_TIMER_ALWAYS_RELIABLE)
+ if (!lapic_timer_always_reliable)
tick_broadcast_enable();
/*
@@ -1623,6 +1633,8 @@ static int __init intel_idle_init(void)
icpu = (const struct idle_cpu *)id->driver_data;
if (icpu) {
cpuidle_state_table = icpu->state_table;
+ auto_demotion_disable_flags = icpu->auto_demotion_disable_flags;
+ disable_promotion_to_c1e = icpu->disable_promotion_to_c1e;
if (icpu->use_acpi || force_use_acpi)
intel_idle_acpi_cst_extract();
} else if (!intel_idle_acpi_cst_extract()) {
@@ -1647,15 +1659,15 @@ static int __init intel_idle_init(void)
}
if (boot_cpu_has(X86_FEATURE_ARAT)) /* Always Reliable APIC Timer */
- lapic_timer_reliable_states = LAPIC_TIMER_ALWAYS_RELIABLE;
+ lapic_timer_always_reliable = true;
retval = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "idle/intel:online",
intel_idle_cpu_online, NULL);
if (retval < 0)
goto hp_setup_fail;
- pr_debug("lapic_timer_reliable_states 0x%x\n",
- lapic_timer_reliable_states);
+ pr_debug("Local APIC timer is reliable in %s\n",
+ lapic_timer_always_reliable ? "all C-states" : "C1");
return 0;
diff --git a/drivers/media/pci/saa7134/saa7134-video.c b/drivers/media/pci/saa7134/saa7134-video.c
index 342cabf48064..a8ac94fadc14 100644
--- a/drivers/media/pci/saa7134/saa7134-video.c
+++ b/drivers/media/pci/saa7134/saa7134-video.c
@@ -1008,8 +1008,7 @@ int saa7134_vb2_start_streaming(struct vb2_queue *vq, unsigned int count)
*/
if ((dmaq == &dev->video_q && !vb2_is_streaming(&dev->vbi_vbq)) ||
(dmaq == &dev->vbi_q && !vb2_is_streaming(&dev->video_vbq)))
- pm_qos_add_request(&dev->qos_request,
- PM_QOS_CPU_DMA_LATENCY, 20);
+ cpu_latency_qos_add_request(&dev->qos_request, 20);
dmaq->seq_nr = 0;
return 0;
@@ -1024,7 +1023,7 @@ void saa7134_vb2_stop_streaming(struct vb2_queue *vq)
if ((dmaq == &dev->video_q && !vb2_is_streaming(&dev->vbi_vbq)) ||
(dmaq == &dev->vbi_q && !vb2_is_streaming(&dev->video_vbq)))
- pm_qos_remove_request(&dev->qos_request);
+ cpu_latency_qos_remove_request(&dev->qos_request);
}
static const struct vb2_ops vb2_qops = {
diff --git a/drivers/media/platform/via-camera.c b/drivers/media/platform/via-camera.c
index c88fd3403eda..ed0ad68c5c48 100644
--- a/drivers/media/platform/via-camera.c
+++ b/drivers/media/platform/via-camera.c
@@ -646,7 +646,7 @@ static int viacam_vb2_start_streaming(struct vb2_queue *vq, unsigned int count)
* requirement which will keep the CPU out of the deeper sleep
* states.
*/
- pm_qos_add_request(&cam->qos_request, PM_QOS_CPU_DMA_LATENCY, 50);
+ cpu_latency_qos_add_request(&cam->qos_request, 50);
viacam_start_engine(cam);
return 0;
out:
@@ -662,7 +662,7 @@ static void viacam_vb2_stop_streaming(struct vb2_queue *vq)
struct via_camera *cam = vb2_get_drv_priv(vq);
struct via_buffer *buf, *tmp;
- pm_qos_remove_request(&cam->qos_request);
+ cpu_latency_qos_remove_request(&cam->qos_request);
viacam_stop_engine(cam);
list_for_each_entry_safe(buf, tmp, &cam->buffer_queue, queue) {
diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c
index 382f25b2fa45..b2bdf5012c55 100644
--- a/drivers/mmc/host/sdhci-esdhc-imx.c
+++ b/drivers/mmc/host/sdhci-esdhc-imx.c
@@ -1452,8 +1452,7 @@ static int sdhci_esdhc_imx_probe(struct platform_device *pdev)
pdev->id_entry->driver_data;
if (imx_data->socdata->flags & ESDHC_FLAG_PMQOS)
- pm_qos_add_request(&imx_data->pm_qos_req,
- PM_QOS_CPU_DMA_LATENCY, 0);
+ cpu_latency_qos_add_request(&imx_data->pm_qos_req, 0);
imx_data->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
if (IS_ERR(imx_data->clk_ipg)) {
@@ -1572,7 +1571,7 @@ disable_per_clk:
clk_disable_unprepare(imx_data->clk_per);
free_sdhci:
if (imx_data->socdata->flags & ESDHC_FLAG_PMQOS)
- pm_qos_remove_request(&imx_data->pm_qos_req);
+ cpu_latency_qos_remove_request(&imx_data->pm_qos_req);
sdhci_pltfm_free(pdev);
return err;
}
@@ -1595,7 +1594,7 @@ static int sdhci_esdhc_imx_remove(struct platform_device *pdev)
clk_disable_unprepare(imx_data->clk_ahb);
if (imx_data->socdata->flags & ESDHC_FLAG_PMQOS)
- pm_qos_remove_request(&imx_data->pm_qos_req);
+ cpu_latency_qos_remove_request(&imx_data->pm_qos_req);
sdhci_pltfm_free(pdev);
@@ -1667,7 +1666,7 @@ static int sdhci_esdhc_runtime_suspend(struct device *dev)
clk_disable_unprepare(imx_data->clk_ahb);
if (imx_data->socdata->flags & ESDHC_FLAG_PMQOS)
- pm_qos_remove_request(&imx_data->pm_qos_req);
+ cpu_latency_qos_remove_request(&imx_data->pm_qos_req);
return ret;
}
@@ -1680,8 +1679,7 @@ static int sdhci_esdhc_runtime_resume(struct device *dev)
int err;
if (imx_data->socdata->flags & ESDHC_FLAG_PMQOS)
- pm_qos_add_request(&imx_data->pm_qos_req,
- PM_QOS_CPU_DMA_LATENCY, 0);
+ cpu_latency_qos_add_request(&imx_data->pm_qos_req, 0);
err = clk_prepare_enable(imx_data->clk_ahb);
if (err)
@@ -1714,7 +1712,7 @@ disable_ahb_clk:
clk_disable_unprepare(imx_data->clk_ahb);
remove_pm_qos_request:
if (imx_data->socdata->flags & ESDHC_FLAG_PMQOS)
- pm_qos_remove_request(&imx_data->pm_qos_req);
+ cpu_latency_qos_remove_request(&imx_data->pm_qos_req);
return err;
}
#endif
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index db4ea58bac82..0f02c7a5ee9b 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -3280,10 +3280,10 @@ static void e1000_configure_rx(struct e1000_adapter *adapter)
dev_info(&adapter->pdev->dev,
"Some CPU C-states have been disabled in order to enable jumbo frames\n");
- pm_qos_update_request(&adapter->pm_qos_req, lat);
+ cpu_latency_qos_update_request(&adapter->pm_qos_req, lat);
} else {
- pm_qos_update_request(&adapter->pm_qos_req,
- PM_QOS_DEFAULT_VALUE);
+ cpu_latency_qos_update_request(&adapter->pm_qos_req,
+ PM_QOS_DEFAULT_VALUE);
}
/* Enable Receives */
@@ -4636,8 +4636,7 @@ int e1000e_open(struct net_device *netdev)
e1000_update_mng_vlan(adapter);
/* DMA latency requirement to workaround jumbo issue */
- pm_qos_add_request(&adapter->pm_qos_req, PM_QOS_CPU_DMA_LATENCY,
- PM_QOS_DEFAULT_VALUE);
+ cpu_latency_qos_add_request(&adapter->pm_qos_req, PM_QOS_DEFAULT_VALUE);
/* before we allocate an interrupt, we must be ready to handle it.
* Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
@@ -4679,7 +4678,7 @@ int e1000e_open(struct net_device *netdev)
return 0;
err_req_irq:
- pm_qos_remove_request(&adapter->pm_qos_req);
+ cpu_latency_qos_remove_request(&adapter->pm_qos_req);
e1000e_release_hw_control(adapter);
e1000_power_down_phy(adapter);
e1000e_free_rx_resources(adapter->rx_ring);
@@ -4743,7 +4742,7 @@ int e1000e_close(struct net_device *netdev)
!test_bit(__E1000_TESTING, &adapter->state))
e1000e_release_hw_control(adapter);
- pm_qos_remove_request(&adapter->pm_qos_req);
+ cpu_latency_qos_remove_request(&adapter->pm_qos_req);
pm_runtime_put_sync(&pdev->dev);
diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c
index 5ec16ce19b69..a202a4eea76a 100644
--- a/drivers/net/wireless/ath/ath10k/core.c
+++ b/drivers/net/wireless/ath/ath10k/core.c
@@ -1052,11 +1052,11 @@ static int ath10k_download_fw(struct ath10k *ar)
}
memset(&latency_qos, 0, sizeof(latency_qos));
- pm_qos_add_request(&latency_qos, PM_QOS_CPU_DMA_LATENCY, 0);
+ cpu_latency_qos_add_request(&latency_qos, 0);
ret = ath10k_bmi_fast_download(ar, address, data, data_len);
- pm_qos_remove_request(&latency_qos);
+ cpu_latency_qos_remove_request(&latency_qos);
return ret;
}
diff --git a/drivers/net/wireless/intel/ipw2x00/ipw2100.c b/drivers/net/wireless/intel/ipw2x00/ipw2100.c
index 536cd729c086..5dfcce77d094 100644
--- a/drivers/net/wireless/intel/ipw2x00/ipw2100.c
+++ b/drivers/net/wireless/intel/ipw2x00/ipw2100.c
@@ -1730,7 +1730,7 @@ static int ipw2100_up(struct ipw2100_priv *priv, int deferred)
/* the ipw2100 hardware really doesn't want power management delays
* longer than 175usec
*/
- pm_qos_update_request(&ipw2100_pm_qos_req, 175);
+ cpu_latency_qos_update_request(&ipw2100_pm_qos_req, 175);
/* If the interrupt is enabled, turn it off... */
spin_lock_irqsave(&priv->low_lock, flags);
@@ -1875,7 +1875,8 @@ static void ipw2100_down(struct ipw2100_priv *priv)
ipw2100_disable_interrupts(priv);
spin_unlock_irqrestore(&priv->low_lock, flags);
- pm_qos_update_request(&ipw2100_pm_qos_req, PM_QOS_DEFAULT_VALUE);
+ cpu_latency_qos_update_request(&ipw2100_pm_qos_req,
+ PM_QOS_DEFAULT_VALUE);
/* We have to signal any supplicant if we are disassociating */
if (associated)
@@ -6566,8 +6567,7 @@ static int __init ipw2100_init(void)
printk(KERN_INFO DRV_NAME ": %s, %s\n", DRV_DESCRIPTION, DRV_VERSION);
printk(KERN_INFO DRV_NAME ": %s\n", DRV_COPYRIGHT);
- pm_qos_add_request(&ipw2100_pm_qos_req, PM_QOS_CPU_DMA_LATENCY,
- PM_QOS_DEFAULT_VALUE);
+ cpu_latency_qos_add_request(&ipw2100_pm_qos_req, PM_QOS_DEFAULT_VALUE);
ret = pci_register_driver(&ipw2100_pci_driver);
if (ret)
@@ -6594,7 +6594,7 @@ static void __exit ipw2100_exit(void)
&driver_attr_debug_level);
#endif
pci_unregister_driver(&ipw2100_pci_driver);
- pm_qos_remove_request(&ipw2100_pm_qos_req);
+ cpu_latency_qos_remove_request(&ipw2100_pm_qos_req);
}
module_init(ipw2100_init);
diff --git a/drivers/powercap/idle_inject.c b/drivers/powercap/idle_inject.c
index cd1270614cc6..e9bbd3c42eef 100644
--- a/drivers/powercap/idle_inject.c
+++ b/drivers/powercap/idle_inject.c
@@ -67,7 +67,7 @@ struct idle_inject_device {
struct hrtimer timer;
unsigned int idle_duration_us;
unsigned int run_duration_us;
- unsigned long int cpumask[0];
+ unsigned long cpumask[];
};
static DEFINE_PER_CPU(struct idle_inject_thread, idle_inject_thread);
diff --git a/drivers/spi/spi-fsl-qspi.c b/drivers/spi/spi-fsl-qspi.c
index e8a499cd1f13..02e5cba0a5bb 100644
--- a/drivers/spi/spi-fsl-qspi.c
+++ b/drivers/spi/spi-fsl-qspi.c
@@ -484,7 +484,7 @@ static int fsl_qspi_clk_prep_enable(struct fsl_qspi *q)
}
if (needs_wakeup_wait_mode(q))
- pm_qos_add_request(&q->pm_qos_req, PM_QOS_CPU_DMA_LATENCY, 0);
+ cpu_latency_qos_add_request(&q->pm_qos_req, 0);
return 0;
}
@@ -492,7 +492,7 @@ static int fsl_qspi_clk_prep_enable(struct fsl_qspi *q)
static void fsl_qspi_clk_disable_unprep(struct fsl_qspi *q)
{
if (needs_wakeup_wait_mode(q))
- pm_qos_remove_request(&q->pm_qos_req);
+ cpu_latency_qos_remove_request(&q->pm_qos_req);
clk_disable_unprepare(q->clk);
clk_disable_unprepare(q->clk_en);
diff --git a/drivers/tty/serial/8250/8250_omap.c b/drivers/tty/serial/8250/8250_omap.c
index 6f343ca08440..76fe72bfb8bb 100644
--- a/drivers/tty/serial/8250/8250_omap.c
+++ b/drivers/tty/serial/8250/8250_omap.c
@@ -569,7 +569,7 @@ static void omap8250_uart_qos_work(struct work_struct *work)
struct omap8250_priv *priv;
priv = container_of(work, struct omap8250_priv, qos_work);
- pm_qos_update_request(&priv->pm_qos_request, priv->latency);
+ cpu_latency_qos_update_request(&priv->pm_qos_request, priv->latency);
}
#ifdef CONFIG_SERIAL_8250_DMA
@@ -1222,10 +1222,9 @@ static int omap8250_probe(struct platform_device *pdev)
DEFAULT_CLK_SPEED);
}
- priv->latency = PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE;
- priv->calc_latency = PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE;
- pm_qos_add_request(&priv->pm_qos_request, PM_QOS_CPU_DMA_LATENCY,
- priv->latency);
+ priv->latency = PM_QOS_CPU_LATENCY_DEFAULT_VALUE;
+ priv->calc_latency = PM_QOS_CPU_LATENCY_DEFAULT_VALUE;
+ cpu_latency_qos_add_request(&priv->pm_qos_request, priv->latency);
INIT_WORK(&priv->qos_work, omap8250_uart_qos_work);
spin_lock_init(&priv->rx_dma_lock);
@@ -1295,7 +1294,7 @@ static int omap8250_remove(struct platform_device *pdev)
pm_runtime_put_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
serial8250_unregister_port(priv->line);
- pm_qos_remove_request(&priv->pm_qos_request);
+ cpu_latency_qos_remove_request(&priv->pm_qos_request);
device_init_wakeup(&pdev->dev, false);
return 0;
}
@@ -1445,7 +1444,7 @@ static int omap8250_runtime_suspend(struct device *dev)
if (up->dma && up->dma->rxchan)
omap_8250_rx_dma_flush(up);
- priv->latency = PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE;
+ priv->latency = PM_QOS_CPU_LATENCY_DEFAULT_VALUE;
schedule_work(&priv->qos_work);
return 0;
diff --git a/drivers/tty/serial/omap-serial.c b/drivers/tty/serial/omap-serial.c
index 48017cec7f2f..e0b720ac754b 100644
--- a/drivers/tty/serial/omap-serial.c
+++ b/drivers/tty/serial/omap-serial.c
@@ -831,7 +831,7 @@ static void serial_omap_uart_qos_work(struct work_struct *work)
struct uart_omap_port *up = container_of(work, struct uart_omap_port,
qos_work);
- pm_qos_update_request(&up->pm_qos_request, up->latency);
+ cpu_latency_qos_update_request(&up->pm_qos_request, up->latency);
}
static void
@@ -1722,10 +1722,9 @@ static int serial_omap_probe(struct platform_device *pdev)
DEFAULT_CLK_SPEED);
}
- up->latency = PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE;
- up->calc_latency = PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE;
- pm_qos_add_request(&up->pm_qos_request,
- PM_QOS_CPU_DMA_LATENCY, up->latency);
+ up->latency = PM_QOS_CPU_LATENCY_DEFAULT_VALUE;
+ up->calc_latency = PM_QOS_CPU_LATENCY_DEFAULT_VALUE;
+ cpu_latency_qos_add_request(&up->pm_qos_request, up->latency);
INIT_WORK(&up->qos_work, serial_omap_uart_qos_work);
platform_set_drvdata(pdev, up);
@@ -1759,7 +1758,7 @@ err_add_port:
pm_runtime_dont_use_autosuspend(&pdev->dev);
pm_runtime_put_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
- pm_qos_remove_request(&up->pm_qos_request);
+ cpu_latency_qos_remove_request(&up->pm_qos_request);
device_init_wakeup(up->dev, false);
err_rs485:
err_port_line:
@@ -1777,7 +1776,7 @@ static int serial_omap_remove(struct platform_device *dev)
pm_runtime_dont_use_autosuspend(up->dev);
pm_runtime_put_sync(up->dev);
pm_runtime_disable(up->dev);
- pm_qos_remove_request(&up->pm_qos_request);
+ cpu_latency_qos_remove_request(&up->pm_qos_request);
device_init_wakeup(&dev->dev, false);
return 0;
@@ -1869,7 +1868,7 @@ static int serial_omap_runtime_suspend(struct device *dev)
serial_omap_enable_wakeup(up, true);
- up->latency = PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE;
+ up->latency = PM_QOS_CPU_LATENCY_DEFAULT_VALUE;
schedule_work(&up->qos_work);
return 0;
diff --git a/drivers/usb/chipidea/ci_hdrc_imx.c b/drivers/usb/chipidea/ci_hdrc_imx.c
index d8e7eb2f97b9..a479af3ae31d 100644
--- a/drivers/usb/chipidea/ci_hdrc_imx.c
+++ b/drivers/usb/chipidea/ci_hdrc_imx.c
@@ -393,8 +393,7 @@ static int ci_hdrc_imx_probe(struct platform_device *pdev)
}
if (pdata.flags & CI_HDRC_PMQOS)
- pm_qos_add_request(&data->pm_qos_req,
- PM_QOS_CPU_DMA_LATENCY, 0);
+ cpu_latency_qos_add_request(&data->pm_qos_req, 0);
ret = imx_get_clks(dev);
if (ret)
@@ -478,7 +477,7 @@ disable_hsic_regulator:
/* don't overwrite original ret (cf. EPROBE_DEFER) */
regulator_disable(data->hsic_pad_regulator);
if (pdata.flags & CI_HDRC_PMQOS)
- pm_qos_remove_request(&data->pm_qos_req);
+ cpu_latency_qos_remove_request(&data->pm_qos_req);
data->ci_pdev = NULL;
return ret;
}
@@ -499,7 +498,7 @@ static int ci_hdrc_imx_remove(struct platform_device *pdev)
if (data->ci_pdev) {
imx_disable_unprepare_clks(&pdev->dev);
if (data->plat_data->flags & CI_HDRC_PMQOS)
- pm_qos_remove_request(&data->pm_qos_req);
+ cpu_latency_qos_remove_request(&data->pm_qos_req);
if (data->hsic_pad_regulator)
regulator_disable(data->hsic_pad_regulator);
}
@@ -527,7 +526,7 @@ static int __maybe_unused imx_controller_suspend(struct device *dev)
imx_disable_unprepare_clks(dev);
if (data->plat_data->flags & CI_HDRC_PMQOS)
- pm_qos_remove_request(&data->pm_qos_req);
+ cpu_latency_qos_remove_request(&data->pm_qos_req);
data->in_lpm = true;
@@ -547,8 +546,7 @@ static int __maybe_unused imx_controller_resume(struct device *dev)
}
if (data->plat_data->flags & CI_HDRC_PMQOS)
- pm_qos_add_request(&data->pm_qos_req,
- PM_QOS_CPU_DMA_LATENCY, 0);
+ cpu_latency_qos_add_request(&data->pm_qos_req, 0);
ret = imx_prepare_enable_clks(dev);
if (ret)