From 14f1ba3af6209f0394192ef07fe2bd9bccdc755f Mon Sep 17 00:00:00 2001 From: "Jon Medhurst \\(Tixy\\)" Date: Wed, 21 Oct 2015 10:55:33 +0100 Subject: cpufreq: arm_big_little: fix frequency check when bL switcher is active The check for correct frequency being set in bL_cpufreq_set_rate is broken when the big.LITTLE switcher is active, for two reasons. 1. The 'new_rate' variable gets overwritten before the test by the code calculating the frequency of the old cluster. 2. The frequency returned by bL_cpufreq_get_rate will be the virtual frequency, not the actual one the intended version of new_rate contains. This means the function always returns an error causing an endless stream of: "cpufreq: __target_index: Failed to change cpu frequency: -5" As the intent is to check for errors that clk_set_rate doesn't report lets move the check to immediately after that and directly use clk_get_rate, rather than the arm_big_little helpers which only confuse matters. Also, update the comment to be hopefully clearer about the purpose of the code. Fixes: 0a95e630b49a (cpufreq: arm_big_little: check if the frequency is set correctly) Signed-off-by: Jon Medhurst Acked-by: Sudeep Holla Acked-by: Viresh Kumar Reviewed-by: Michael Turquette Signed-off-by: Rafael J. Wysocki --- drivers/cpufreq/arm_big_little.c | 22 +++++++++++++--------- 1 file changed, 13 insertions(+), 9 deletions(-) (limited to 'drivers/cpufreq') diff --git a/drivers/cpufreq/arm_big_little.c b/drivers/cpufreq/arm_big_little.c index f1e42f8ce0fc..c5d256caa664 100644 --- a/drivers/cpufreq/arm_big_little.c +++ b/drivers/cpufreq/arm_big_little.c @@ -149,6 +149,19 @@ bL_cpufreq_set_rate(u32 cpu, u32 old_cluster, u32 new_cluster, u32 rate) __func__, cpu, old_cluster, new_cluster, new_rate); ret = clk_set_rate(clk[new_cluster], new_rate * 1000); + if (!ret) { + /* + * FIXME: clk_set_rate hasn't returned an error here however it + * may be that clk_change_rate failed due to hardware or + * firmware issues and wasn't able to report that due to the + * current design of the clk core layer. To work around this + * problem we will read back the clock rate and check it is + * correct. This needs to be removed once clk core is fixed. + */ + if (clk_get_rate(clk[new_cluster]) != new_rate * 1000) + ret = -EIO; + } + if (WARN_ON(ret)) { pr_err("clk_set_rate failed: %d, new cluster: %d\n", ret, new_cluster); @@ -189,15 +202,6 @@ bL_cpufreq_set_rate(u32 cpu, u32 old_cluster, u32 new_cluster, u32 rate) mutex_unlock(&cluster_lock[old_cluster]); } - /* - * FIXME: clk_set_rate has to handle the case where clk_change_rate - * can fail due to hardware or firmware issues. Until the clk core - * layer is fixed, we can check here. In most of the cases we will - * be reading only the cached value anyway. This needs to be removed - * once clk core is fixed. - */ - if (bL_cpufreq_get_rate(cpu) != new_rate) - return -EIO; return 0; } -- cgit v1.2.3 From 539342f60b93c9f98c47f75b63fe5b8b13c1d226 Mon Sep 17 00:00:00 2001 From: Prarit Bhargava Date: Thu, 22 Oct 2015 09:43:31 -0400 Subject: intel_pstate: decrease number of "HWP enabled" messages When booting an HWP enabled system the kernel displays one "HWP enabled" message for each cpu. The messages are superfluous since HWP is globally enabled across all CPUs. This patch also adds an informational message when HWP is disabled via intel_pstate=no_hwp. Signed-off-by: Prarit Bhargava Reviewed-by: Viresh Kumar Signed-off-by: Rafael J. Wysocki --- drivers/cpufreq/intel_pstate.c | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) (limited to 'drivers/cpufreq') diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index 93a3c635ea27..2e31d097def6 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c @@ -684,8 +684,6 @@ static void __init intel_pstate_sysfs_expose_params(void) static void intel_pstate_hwp_enable(struct cpudata *cpudata) { - pr_info("intel_pstate: HWP enabled\n"); - wrmsrl_on_cpu(cpudata->cpu, MSR_PM_ENABLE, 0x1); } @@ -1557,8 +1555,10 @@ static int __init intel_pstate_init(void) if (!all_cpu_data) return -ENOMEM; - if (static_cpu_has_safe(X86_FEATURE_HWP) && !no_hwp) + if (static_cpu_has_safe(X86_FEATURE_HWP) && !no_hwp) { + pr_info("intel_pstate: HWP enabled\n"); hwp_active++; + } if (!hwp_active && hwp_only) goto out; @@ -1593,8 +1593,10 @@ static int __init intel_pstate_setup(char *str) if (!strcmp(str, "disable")) no_load = 1; - if (!strcmp(str, "no_hwp")) + if (!strcmp(str, "no_hwp")) { + pr_info("intel_pstate: HWP disabled\n"); no_hwp = 1; + } if (!strcmp(str, "force")) force_load = 1; if (!strcmp(str, "hwp_only")) -- cgit v1.2.3 From 3a91b069eabf5dc8d4cd6f3e66dcd700536ef9f8 Mon Sep 17 00:00:00 2001 From: Viresh Kumar Date: Thu, 29 Oct 2015 08:08:38 +0530 Subject: cpufreq: governor: Quit work-handlers early if governor is stopped gov_queue_work() acquires cpufreq_governor_lock to allow cpufreq_governor_stop() to drain delayed work items possibly scheduled on CPUs that share the policy with a CPU being taken offline. However, the same goal may be achieved in a more straightforward way if the policy pointer in the struct cpu_dbs_info matching the policy CPU is reset upfront by cpufreq_governor_stop() under the timer_mutex belonging to it and checked against NULL, under the same lock, at the beginning of dbs_timer(). In that case every instance of dbs_timer() run for a struct cpu_dbs_info sharing the policy pointer in question after cpufreq_governor_stop() has started will notice that that pointer is NULL and bail out immediately without queuing up any new work items. In turn, gov_cancel_work() called by cpufreq_governor_stop() before destroying timer_mutex will wait for all of the delayed work items currently running on the CPUs sharing the policy to drop the mutex, so it may be destroyed safely. Make cpufreq_governor_stop() and dbs_timer() work as described and modify gov_queue_work() so it does not acquire cpufreq_governor_lock any more. Signed-off-by: Viresh Kumar Signed-off-by: Rafael J. Wysocki --- drivers/cpufreq/cpufreq_governor.c | 33 +++++++++++++++++++++++---------- 1 file changed, 23 insertions(+), 10 deletions(-) (limited to 'drivers/cpufreq') diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c index 11258c4c1b17..b260576ddb12 100644 --- a/drivers/cpufreq/cpufreq_governor.c +++ b/drivers/cpufreq/cpufreq_governor.c @@ -171,10 +171,6 @@ void gov_queue_work(struct dbs_data *dbs_data, struct cpufreq_policy *policy, { int i; - mutex_lock(&cpufreq_governor_lock); - if (!policy->governor_enabled) - goto out_unlock; - if (!all_cpus) { /* * Use raw_smp_processor_id() to avoid preemptible warnings. @@ -188,9 +184,6 @@ void gov_queue_work(struct dbs_data *dbs_data, struct cpufreq_policy *policy, for_each_cpu(i, policy->cpus) __gov_queue_work(i, dbs_data, delay); } - -out_unlock: - mutex_unlock(&cpufreq_governor_lock); } EXPORT_SYMBOL_GPL(gov_queue_work); @@ -229,13 +222,24 @@ static void dbs_timer(struct work_struct *work) struct cpu_dbs_info *cdbs = container_of(work, struct cpu_dbs_info, dwork.work); struct cpu_common_dbs_info *shared = cdbs->shared; - struct cpufreq_policy *policy = shared->policy; - struct dbs_data *dbs_data = policy->governor_data; + struct cpufreq_policy *policy; + struct dbs_data *dbs_data; unsigned int sampling_rate, delay; bool modify_all = true; mutex_lock(&shared->timer_mutex); + policy = shared->policy; + + /* + * Governor might already be disabled and there is no point continuing + * with the work-handler. + */ + if (!policy) + goto unlock; + + dbs_data = policy->governor_data; + if (dbs_data->cdata->governor == GOV_CONSERVATIVE) { struct cs_dbs_tuners *cs_tuners = dbs_data->tuners; @@ -252,6 +256,7 @@ static void dbs_timer(struct work_struct *work) delay = dbs_data->cdata->gov_dbs_timer(cdbs, dbs_data, modify_all); gov_queue_work(dbs_data, policy, delay, modify_all); +unlock: mutex_unlock(&shared->timer_mutex); } @@ -478,9 +483,17 @@ static int cpufreq_governor_stop(struct cpufreq_policy *policy, if (!shared || !shared->policy) return -EBUSY; + /* + * Work-handler must see this updated, as it should not proceed any + * further after governor is disabled. And so timer_mutex is taken while + * updating this value. + */ + mutex_lock(&shared->timer_mutex); + shared->policy = NULL; + mutex_unlock(&shared->timer_mutex); + gov_cancel_work(dbs_data, policy); - shared->policy = NULL; mutex_destroy(&shared->timer_mutex); return 0; } -- cgit v1.2.3 From d7e53e35f9f54cdfa09a8456ae8e9874ec66bb36 Mon Sep 17 00:00:00 2001 From: Nicolas Pitre Date: Tue, 3 Nov 2015 17:13:57 -0500 Subject: cpufreq: s5pv210-cpufreq: fix wrong do_div() usage It is wrong to use do_div() with 32-bit dividends (unsigned long is 32 bits on 32-bit architectures). Signed-off-by: Nicolas Pitre Reviewed-by: Krzysztof Kozlowski Reviewed-by: Viresh Kumar Signed-off-by: Rafael J. Wysocki --- drivers/cpufreq/s5pv210-cpufreq.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers/cpufreq') diff --git a/drivers/cpufreq/s5pv210-cpufreq.c b/drivers/cpufreq/s5pv210-cpufreq.c index 9e231f52150c..051a8a8224cd 100644 --- a/drivers/cpufreq/s5pv210-cpufreq.c +++ b/drivers/cpufreq/s5pv210-cpufreq.c @@ -212,11 +212,11 @@ static void s5pv210_set_refresh(enum s5pv210_dmc_port ch, unsigned long freq) /* Find current DRAM frequency */ tmp = s5pv210_dram_conf[ch].freq; - do_div(tmp, freq); + tmp /= freq; tmp1 = s5pv210_dram_conf[ch].refresh; - do_div(tmp1, tmp); + tmp1 /= tmp; __raw_writel(tmp1, reg); } -- cgit v1.2.3