summaryrefslogtreecommitdiff
path: root/kernel/sched/sched.h
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2020-11-18 02:19:43 +0300
committerPeter Zijlstra <peterz@infradead.org>2021-05-12 12:43:30 +0300
commitd2dfa17bc7de67e99685c4d6557837bf801a102c (patch)
tree6583db24d9515ffcd05dd6369b5094ceb7e6b8ff /kernel/sched/sched.h
parentc6047c2e3af68dae23ad884249e0d42ff28d2d1b (diff)
downloadlinux-d2dfa17bc7de67e99685c4d6557837bf801a102c.tar.xz
sched: Trivial forced-newidle balancer
When a sibling is forced-idle to match the core-cookie; search for matching tasks to fill the core. rcu_read_unlock() can incur an infrequent deadlock in sched_core_balance(). Fix this by using the RCU-sched flavor instead. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Tested-by: Don Hiatt <dhiatt@digitalocean.com> Tested-by: Hongyu Ning <hongyu.ning@linux.intel.com> Tested-by: Vincent Guittot <vincent.guittot@linaro.org> Link: https://lkml.kernel.org/r/20210422123308.800048269@infradead.org
Diffstat (limited to 'kernel/sched/sched.h')
-rw-r--r--kernel/sched/sched.h6
1 files changed, 6 insertions, 0 deletions
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 4a898abc60ce..91ca1fee9fec 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -1170,6 +1170,8 @@ static inline raw_spinlock_t *__rq_lockp(struct rq *rq)
bool cfs_prio_less(struct task_struct *a, struct task_struct *b, bool fi);
+extern void queue_core_balance(struct rq *rq);
+
#else /* !CONFIG_SCHED_CORE */
static inline bool sched_core_enabled(struct rq *rq)
@@ -1192,6 +1194,10 @@ static inline raw_spinlock_t *__rq_lockp(struct rq *rq)
return &rq->__lock;
}
+static inline void queue_core_balance(struct rq *rq)
+{
+}
+
#endif /* CONFIG_SCHED_CORE */
static inline void lockdep_assert_rq_held(struct rq *rq)