summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Documentation/admin-guide/kernel-parameters.txt3
-rw-r--r--drivers/cpufreq/cpufreq.c1
-rw-r--r--drivers/cpufreq/intel_pstate.c67
3 files changed, 35 insertions, 36 deletions
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
index 986e44387dad..41851d42b84d 100644
--- a/Documentation/admin-guide/kernel-parameters.txt
+++ b/Documentation/admin-guide/kernel-parameters.txt
@@ -653,6 +653,9 @@
cpuidle.off=1 [CPU_IDLE]
disable the cpuidle sub-system
+ cpufreq.off=1 [CPU_FREQ]
+ disable the cpufreq sub-system
+
cpu_init_udelay=N
[X86] Delay for N microsec between assert and de-assert
of APIC INIT to start processors. This delay occurs
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index a47543281864..38b9fdf854a4 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -2532,4 +2532,5 @@ static int __init cpufreq_core_init(void)
return 0;
}
+module_param(off, int, 0444);
core_initcall(cpufreq_core_init);
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index b1fbaa30ae04..3d37219a0dd7 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -377,6 +377,7 @@ static void intel_pstate_set_performance_limits(struct perf_limits *limits)
intel_pstate_init_limits(limits);
limits->min_perf_pct = 100;
limits->min_perf = int_ext_tofp(1);
+ limits->min_sysfs_pct = 100;
}
static DEFINE_MUTEX(intel_pstate_driver_lock);
@@ -968,11 +969,20 @@ static int intel_pstate_resume(struct cpufreq_policy *policy)
}
static void intel_pstate_update_policies(void)
+ __releases(&intel_pstate_limits_lock)
+ __acquires(&intel_pstate_limits_lock)
{
+ struct perf_limits *saved_limits = limits;
int cpu;
+ mutex_unlock(&intel_pstate_limits_lock);
+
for_each_possible_cpu(cpu)
cpufreq_update_policy(cpu);
+
+ mutex_lock(&intel_pstate_limits_lock);
+
+ limits = saved_limits;
}
/************************** debugfs begin ************************/
@@ -1180,10 +1190,10 @@ static ssize_t store_no_turbo(struct kobject *a, struct attribute *b,
limits->no_turbo = clamp_t(int, input, 0, 1);
- mutex_unlock(&intel_pstate_limits_lock);
-
intel_pstate_update_policies();
+ mutex_unlock(&intel_pstate_limits_lock);
+
mutex_unlock(&intel_pstate_driver_lock);
return count;
@@ -1217,10 +1227,10 @@ static ssize_t store_max_perf_pct(struct kobject *a, struct attribute *b,
limits->max_perf_pct);
limits->max_perf = div_ext_fp(limits->max_perf_pct, 100);
- mutex_unlock(&intel_pstate_limits_lock);
-
intel_pstate_update_policies();
+ mutex_unlock(&intel_pstate_limits_lock);
+
mutex_unlock(&intel_pstate_driver_lock);
return count;
@@ -1254,10 +1264,10 @@ static ssize_t store_min_perf_pct(struct kobject *a, struct attribute *b,
limits->min_perf_pct);
limits->min_perf = div_ext_fp(limits->min_perf_pct, 100);
- mutex_unlock(&intel_pstate_limits_lock);
-
intel_pstate_update_policies();
+ mutex_unlock(&intel_pstate_limits_lock);
+
mutex_unlock(&intel_pstate_driver_lock);
return count;
@@ -1874,13 +1884,11 @@ static int intel_pstate_prepare_request(struct cpudata *cpu, int pstate)
intel_pstate_get_min_max(cpu, &min_perf, &max_perf);
pstate = clamp_t(int, pstate, min_perf, max_perf);
- trace_cpu_frequency(pstate * cpu->pstate.scaling, cpu->cpu);
return pstate;
}
static void intel_pstate_update_pstate(struct cpudata *cpu, int pstate)
{
- pstate = intel_pstate_prepare_request(cpu, pstate);
if (pstate == cpu->pstate.current_pstate)
return;
@@ -1900,6 +1908,8 @@ static inline void intel_pstate_adjust_busy_pstate(struct cpudata *cpu)
update_turbo_state();
+ target_pstate = intel_pstate_prepare_request(cpu, target_pstate);
+ trace_cpu_frequency(target_pstate * cpu->pstate.scaling, cpu->cpu);
intel_pstate_update_pstate(cpu, target_pstate);
sample = &cpu->sample;
@@ -2132,16 +2142,11 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
mutex_lock(&intel_pstate_limits_lock);
if (policy->policy == CPUFREQ_POLICY_PERFORMANCE) {
+ pr_debug("set performance\n");
if (!perf_limits) {
limits = &performance_limits;
perf_limits = limits;
}
- if (policy->max >= policy->cpuinfo.max_freq &&
- !limits->no_turbo) {
- pr_debug("set performance\n");
- intel_pstate_set_performance_limits(perf_limits);
- goto out;
- }
} else {
pr_debug("set powersave\n");
if (!perf_limits) {
@@ -2152,7 +2157,7 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
}
intel_pstate_update_perf_limits(policy, perf_limits);
- out:
+
if (cpu->policy == CPUFREQ_POLICY_PERFORMANCE) {
/*
* NOHZ_FULL CPUs need this as the governor callback may not
@@ -2198,9 +2203,9 @@ static int intel_pstate_verify_policy(struct cpufreq_policy *policy)
unsigned int max_freq, min_freq;
max_freq = policy->cpuinfo.max_freq *
- limits->max_sysfs_pct / 100;
+ perf_limits->max_sysfs_pct / 100;
min_freq = policy->cpuinfo.max_freq *
- limits->min_sysfs_pct / 100;
+ perf_limits->min_sysfs_pct / 100;
cpufreq_verify_within_limits(policy, min_freq, max_freq);
}
@@ -2243,13 +2248,8 @@ static int __intel_pstate_cpu_init(struct cpufreq_policy *policy)
cpu = all_cpu_data[policy->cpu];
- /*
- * We need sane value in the cpu->perf_limits, so inherit from global
- * perf_limits limits, which are seeded with values based on the
- * CONFIG_CPU_FREQ_DEFAULT_GOV_*, during boot up.
- */
if (per_cpu_limits)
- memcpy(cpu->perf_limits, limits, sizeof(struct perf_limits));
+ intel_pstate_init_limits(cpu->perf_limits);
policy->min = cpu->pstate.min_pstate * cpu->pstate.scaling;
policy->max = cpu->pstate.turbo_pstate * cpu->pstate.scaling;
@@ -2301,7 +2301,6 @@ static struct cpufreq_driver intel_pstate = {
static int intel_cpufreq_verify_policy(struct cpufreq_policy *policy)
{
struct cpudata *cpu = all_cpu_data[policy->cpu];
- struct perf_limits *perf_limits = limits;
update_turbo_state();
policy->cpuinfo.max_freq = limits->turbo_disabled ?
@@ -2309,15 +2308,6 @@ static int intel_cpufreq_verify_policy(struct cpufreq_policy *policy)
cpufreq_verify_within_cpu_limits(policy);
- if (per_cpu_limits)
- perf_limits = cpu->perf_limits;
-
- mutex_lock(&intel_pstate_limits_lock);
-
- intel_pstate_update_perf_limits(policy, perf_limits);
-
- mutex_unlock(&intel_pstate_limits_lock);
-
return 0;
}
@@ -2370,6 +2360,7 @@ static int intel_cpufreq_target(struct cpufreq_policy *policy,
wrmsrl_on_cpu(policy->cpu, MSR_IA32_PERF_CTL,
pstate_funcs.get_val(cpu, target_pstate));
}
+ freqs.new = target_pstate * cpu->pstate.scaling;
cpufreq_freq_transition_end(policy, &freqs, false);
return 0;
@@ -2383,8 +2374,9 @@ static unsigned int intel_cpufreq_fast_switch(struct cpufreq_policy *policy,
target_freq = intel_cpufreq_turbo_update(cpu, policy, target_freq);
target_pstate = DIV_ROUND_UP(target_freq, cpu->pstate.scaling);
+ target_pstate = intel_pstate_prepare_request(cpu, target_pstate);
intel_pstate_update_pstate(cpu, target_pstate);
- return target_freq;
+ return target_pstate * cpu->pstate.scaling;
}
static int intel_cpufreq_cpu_init(struct cpufreq_policy *policy)
@@ -2437,8 +2429,11 @@ static int intel_pstate_register_driver(void)
intel_pstate_init_limits(&powersave_limits);
intel_pstate_set_performance_limits(&performance_limits);
- limits = IS_ENABLED(CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE) ?
- &performance_limits : &powersave_limits;
+ if (IS_ENABLED(CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE) &&
+ intel_pstate_driver == &intel_pstate)
+ limits = &performance_limits;
+ else
+ limits = &powersave_limits;
ret = cpufreq_register_driver(intel_pstate_driver);
if (ret) {