summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--drivers/cpufreq/cpufreq_conservative.c26
-rw-r--r--drivers/cpufreq/cpufreq_governor.c15
-rw-r--r--drivers/cpufreq/cpufreq_governor.h3
-rw-r--r--drivers/cpufreq/cpufreq_ondemand.c15
4 files changed, 27 insertions, 32 deletions
diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c
index 6243502ce24d..2e9040e8640c 100644
--- a/drivers/cpufreq/cpufreq_conservative.c
+++ b/drivers/cpufreq/cpufreq_conservative.c
@@ -44,20 +44,20 @@ static inline unsigned int get_freq_target(struct cs_dbs_tuners *cs_tuners,
* Any frequency increase takes it to the maximum frequency. Frequency reduction
* happens at minimum steps of 5% (default) of maximum frequency
*/
-static void cs_check_cpu(int cpu, unsigned int load)
+static unsigned int cs_dbs_timer(struct cpufreq_policy *policy)
{
- struct cs_cpu_dbs_info_s *dbs_info = &per_cpu(cs_cpu_dbs_info, cpu);
- struct cpufreq_policy *policy = dbs_info->cdbs.policy_dbs->policy;
+ struct cs_cpu_dbs_info_s *dbs_info = &per_cpu(cs_cpu_dbs_info, policy->cpu);
struct policy_dbs_info *policy_dbs = policy->governor_data;
struct dbs_data *dbs_data = policy_dbs->dbs_data;
struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
+ unsigned int load = dbs_update(policy);
/*
* break out if we 'cannot' reduce the speed as the user might
* want freq_step to be zero
*/
if (cs_tuners->freq_step == 0)
- return;
+ goto out;
/* Check for frequency increase */
if (load > dbs_data->up_threshold) {
@@ -65,7 +65,7 @@ static void cs_check_cpu(int cpu, unsigned int load)
/* if we are already at full speed then break out early */
if (dbs_info->requested_freq == policy->max)
- return;
+ goto out;
dbs_info->requested_freq += get_freq_target(cs_tuners, policy);
@@ -74,12 +74,12 @@ static void cs_check_cpu(int cpu, unsigned int load)
__cpufreq_driver_target(policy, dbs_info->requested_freq,
CPUFREQ_RELATION_H);
- return;
+ goto out;
}
/* if sampling_down_factor is active break out early */
if (++dbs_info->down_skip < dbs_data->sampling_down_factor)
- return;
+ goto out;
dbs_info->down_skip = 0;
/* Check for frequency decrease */
@@ -89,7 +89,7 @@ static void cs_check_cpu(int cpu, unsigned int load)
* if we cannot reduce the frequency anymore, break out early
*/
if (policy->cur == policy->min)
- return;
+ goto out;
freq_target = get_freq_target(cs_tuners, policy);
if (dbs_info->requested_freq > freq_target)
@@ -99,16 +99,9 @@ static void cs_check_cpu(int cpu, unsigned int load)
__cpufreq_driver_target(policy, dbs_info->requested_freq,
CPUFREQ_RELATION_L);
- return;
}
-}
-
-static unsigned int cs_dbs_timer(struct cpufreq_policy *policy)
-{
- struct policy_dbs_info *policy_dbs = policy->governor_data;
- struct dbs_data *dbs_data = policy_dbs->dbs_data;
- dbs_check_cpu(policy);
+ out:
return delay_for_sampling_rate(dbs_data->sampling_rate);
}
@@ -300,7 +293,6 @@ static struct dbs_governor cs_dbs_gov = {
.get_cpu_cdbs = get_cpu_cdbs,
.get_cpu_dbs_info_s = get_cpu_dbs_info_s,
.gov_dbs_timer = cs_dbs_timer,
- .gov_check_cpu = cs_check_cpu,
.init = cs_init,
.exit = cs_exit,
};
diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c
index 1f580cb62902..99d25af6485b 100644
--- a/drivers/cpufreq/cpufreq_governor.c
+++ b/drivers/cpufreq/cpufreq_governor.c
@@ -140,9 +140,8 @@ static const struct sysfs_ops governor_sysfs_ops = {
.store = governor_store,
};
-void dbs_check_cpu(struct cpufreq_policy *policy)
+unsigned int dbs_update(struct cpufreq_policy *policy)
{
- int cpu = policy->cpu;
struct dbs_governor *gov = dbs_governor_of(policy);
struct policy_dbs_info *policy_dbs = policy->governor_data;
struct dbs_data *dbs_data = policy_dbs->dbs_data;
@@ -154,7 +153,7 @@ void dbs_check_cpu(struct cpufreq_policy *policy)
if (gov->governor == GOV_ONDEMAND) {
struct od_cpu_dbs_info_s *od_dbs_info =
- gov->get_cpu_dbs_info_s(cpu);
+ gov->get_cpu_dbs_info_s(policy->cpu);
/*
* Sometimes, the ondemand governor uses an additional
@@ -250,10 +249,9 @@ void dbs_check_cpu(struct cpufreq_policy *policy)
if (load > max_load)
max_load = load;
}
-
- gov->gov_check_cpu(cpu, max_load);
+ return max_load;
}
-EXPORT_SYMBOL_GPL(dbs_check_cpu);
+EXPORT_SYMBOL_GPL(dbs_update);
void gov_set_update_util(struct policy_dbs_info *policy_dbs,
unsigned int delay_us)
@@ -601,11 +599,14 @@ static int cpufreq_governor_limits(struct cpufreq_policy *policy)
struct policy_dbs_info *policy_dbs = policy->governor_data;
mutex_lock(&policy_dbs->timer_mutex);
+
if (policy->max < policy->cur)
__cpufreq_driver_target(policy, policy->max, CPUFREQ_RELATION_H);
else if (policy->min > policy->cur)
__cpufreq_driver_target(policy, policy->min, CPUFREQ_RELATION_L);
- dbs_check_cpu(policy);
+
+ gov_update_sample_delay(policy_dbs, 0);
+
mutex_unlock(&policy_dbs->timer_mutex);
return 0;
diff --git a/drivers/cpufreq/cpufreq_governor.h b/drivers/cpufreq/cpufreq_governor.h
index 521daac38ba5..38b9512820b0 100644
--- a/drivers/cpufreq/cpufreq_governor.h
+++ b/drivers/cpufreq/cpufreq_governor.h
@@ -202,7 +202,6 @@ struct dbs_governor {
struct cpu_dbs_info *(*get_cpu_cdbs)(int cpu);
void *(*get_cpu_dbs_info_s)(int cpu);
unsigned int (*gov_dbs_timer)(struct cpufreq_policy *policy);
- void (*gov_check_cpu)(int cpu, unsigned int load);
int (*init)(struct dbs_data *dbs_data, bool notify);
void (*exit)(struct dbs_data *dbs_data, bool notify);
@@ -235,7 +234,7 @@ static inline int delay_for_sampling_rate(unsigned int sampling_rate)
}
extern struct mutex dbs_data_mutex;
-void dbs_check_cpu(struct cpufreq_policy *policy);
+unsigned int dbs_update(struct cpufreq_policy *policy);
int cpufreq_governor_dbs(struct cpufreq_policy *policy, unsigned int event);
void od_register_powersave_bias_handler(unsigned int (*f)
(struct cpufreq_policy *, unsigned int, unsigned int),
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index 0b79f1488be4..707c017f4e67 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -150,13 +150,13 @@ static void dbs_freq_increase(struct cpufreq_policy *policy, unsigned int freq)
* (default), then we try to increase frequency. Else, we adjust the frequency
* proportional to load.
*/
-static void od_check_cpu(int cpu, unsigned int load)
+static void od_update(struct cpufreq_policy *policy)
{
- struct od_cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, cpu);
+ struct od_cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, policy->cpu);
struct policy_dbs_info *policy_dbs = dbs_info->cdbs.policy_dbs;
- struct cpufreq_policy *policy = policy_dbs->policy;
struct dbs_data *dbs_data = policy_dbs->dbs_data;
struct od_dbs_tuners *od_tuners = dbs_data->tuners;
+ unsigned int load = dbs_update(policy);
dbs_info->freq_lo = 0;
@@ -198,12 +198,16 @@ static unsigned int od_dbs_timer(struct cpufreq_policy *policy)
/* Common NORMAL_SAMPLE setup */
dbs_info->sample_type = OD_NORMAL_SAMPLE;
- if (sample_type == OD_SUB_SAMPLE) {
+ /*
+ * OD_SUB_SAMPLE doesn't make sense if sample_delay_ns is 0, so ignore
+ * it then.
+ */
+ if (sample_type == OD_SUB_SAMPLE && policy_dbs->sample_delay_ns > 0) {
delay = dbs_info->freq_lo_jiffies;
__cpufreq_driver_target(policy, dbs_info->freq_lo,
CPUFREQ_RELATION_H);
} else {
- dbs_check_cpu(policy);
+ od_update(policy);
if (dbs_info->freq_lo) {
/* Setup timer for SUB_SAMPLE */
dbs_info->sample_type = OD_SUB_SAMPLE;
@@ -428,7 +432,6 @@ static struct dbs_governor od_dbs_gov = {
.get_cpu_cdbs = get_cpu_cdbs,
.get_cpu_dbs_info_s = get_cpu_dbs_info_s,
.gov_dbs_timer = od_dbs_timer,
- .gov_check_cpu = od_check_cpu,
.gov_ops = &od_ops,
.init = od_init,
.exit = od_exit,