summaryrefslogtreecommitdiff
path: root/kernel/sched
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/sched')
-rw-r--r--kernel/sched/core.c19
-rw-r--r--kernel/sched/cpufreq_schedutil.c29
-rw-r--r--kernel/sched/cputime.c4
3 files changed, 24 insertions, 28 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 4a0668acd876..9143163fa678 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -5470,25 +5470,25 @@ void sched_dynamic_update(int mode)
switch (mode) {
case preempt_dynamic_none:
static_call_update(cond_resched, __cond_resched);
- static_call_update(might_resched, (typeof(&__cond_resched)) __static_call_return0);
- static_call_update(preempt_schedule, (typeof(&preempt_schedule)) NULL);
- static_call_update(preempt_schedule_notrace, (typeof(&preempt_schedule_notrace)) NULL);
- static_call_update(irqentry_exit_cond_resched, (typeof(&irqentry_exit_cond_resched)) NULL);
+ static_call_update(might_resched, (void *)&__static_call_return0);
+ static_call_update(preempt_schedule, NULL);
+ static_call_update(preempt_schedule_notrace, NULL);
+ static_call_update(irqentry_exit_cond_resched, NULL);
pr_info("Dynamic Preempt: none\n");
break;
case preempt_dynamic_voluntary:
static_call_update(cond_resched, __cond_resched);
static_call_update(might_resched, __cond_resched);
- static_call_update(preempt_schedule, (typeof(&preempt_schedule)) NULL);
- static_call_update(preempt_schedule_notrace, (typeof(&preempt_schedule_notrace)) NULL);
- static_call_update(irqentry_exit_cond_resched, (typeof(&irqentry_exit_cond_resched)) NULL);
+ static_call_update(preempt_schedule, NULL);
+ static_call_update(preempt_schedule_notrace, NULL);
+ static_call_update(irqentry_exit_cond_resched, NULL);
pr_info("Dynamic Preempt: voluntary\n");
break;
case preempt_dynamic_full:
- static_call_update(cond_resched, (typeof(&__cond_resched)) __static_call_return0);
- static_call_update(might_resched, (typeof(&__cond_resched)) __static_call_return0);
+ static_call_update(cond_resched, (void *)&__static_call_return0);
+ static_call_update(might_resched, (void *)&__static_call_return0);
static_call_update(preempt_schedule, __preempt_schedule_func);
static_call_update(preempt_schedule_notrace, __preempt_schedule_notrace_func);
static_call_update(irqentry_exit_cond_resched, irqentry_exit_cond_resched);
@@ -6389,6 +6389,7 @@ int sched_setattr_nocheck(struct task_struct *p, const struct sched_attr *attr)
{
return __sched_setscheduler(p, attr, false, true);
}
+EXPORT_SYMBOL_GPL(sched_setattr_nocheck);
/**
* sched_setscheduler_nocheck - change the scheduling policy and/or RT priority of a thread from kernelspace.
diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c
index 7cc2e115a5c4..4f09afd2f321 100644
--- a/kernel/sched/cpufreq_schedutil.c
+++ b/kernel/sched/cpufreq_schedutil.c
@@ -114,19 +114,8 @@ static bool sugov_update_next_freq(struct sugov_policy *sg_policy, u64 time,
return true;
}
-static void sugov_fast_switch(struct sugov_policy *sg_policy, u64 time,
- unsigned int next_freq)
+static void sugov_deferred_update(struct sugov_policy *sg_policy)
{
- if (sugov_update_next_freq(sg_policy, time, next_freq))
- cpufreq_driver_fast_switch(sg_policy->policy, next_freq);
-}
-
-static void sugov_deferred_update(struct sugov_policy *sg_policy, u64 time,
- unsigned int next_freq)
-{
- if (!sugov_update_next_freq(sg_policy, time, next_freq))
- return;
-
if (!sg_policy->work_in_progress) {
sg_policy->work_in_progress = true;
irq_work_queue(&sg_policy->irq_work);
@@ -366,16 +355,19 @@ static void sugov_update_single_freq(struct update_util_data *hook, u64 time,
sg_policy->cached_raw_freq = cached_freq;
}
+ if (!sugov_update_next_freq(sg_policy, time, next_f))
+ return;
+
/*
* This code runs under rq->lock for the target CPU, so it won't run
* concurrently on two different CPUs for the same target and it is not
* necessary to acquire the lock in the fast switch case.
*/
if (sg_policy->policy->fast_switch_enabled) {
- sugov_fast_switch(sg_policy, time, next_f);
+ cpufreq_driver_fast_switch(sg_policy->policy, next_f);
} else {
raw_spin_lock(&sg_policy->update_lock);
- sugov_deferred_update(sg_policy, time, next_f);
+ sugov_deferred_update(sg_policy);
raw_spin_unlock(&sg_policy->update_lock);
}
}
@@ -454,12 +446,15 @@ sugov_update_shared(struct update_util_data *hook, u64 time, unsigned int flags)
if (sugov_should_update_freq(sg_policy, time)) {
next_f = sugov_next_freq_shared(sg_cpu, time);
+ if (!sugov_update_next_freq(sg_policy, time, next_f))
+ goto unlock;
+
if (sg_policy->policy->fast_switch_enabled)
- sugov_fast_switch(sg_policy, time, next_f);
+ cpufreq_driver_fast_switch(sg_policy->policy, next_f);
else
- sugov_deferred_update(sg_policy, time, next_f);
+ sugov_deferred_update(sg_policy);
}
-
+unlock:
raw_spin_unlock(&sg_policy->update_lock);
}
diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c
index 3b36644fa80a..872e481d5098 100644
--- a/kernel/sched/cputime.c
+++ b/kernel/sched/cputime.c
@@ -60,7 +60,7 @@ void irqtime_account_irq(struct task_struct *curr, unsigned int offset)
cpu = smp_processor_id();
delta = sched_clock_cpu(cpu) - irqtime->irq_start_time;
irqtime->irq_start_time += delta;
- pc = preempt_count() - offset;
+ pc = irq_count() - offset;
/*
* We do not account for softirq time from ksoftirqd here.
@@ -421,7 +421,7 @@ void vtime_task_switch(struct task_struct *prev)
void vtime_account_irq(struct task_struct *tsk, unsigned int offset)
{
- unsigned int pc = preempt_count() - offset;
+ unsigned int pc = irq_count() - offset;
if (pc & HARDIRQ_OFFSET) {
vtime_account_hardirq(tsk);