summaryrefslogtreecommitdiff
path: root/include/linux/sched.h
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-05-23 05:27:32 +0400
committerLinus Torvalds <torvalds@linux-foundation.org>2012-05-23 05:27:32 +0400
commitd79ee93de909dfb252279b9a95978bbda9a814a9 (patch)
treebfccca60fd36259ff4bcc5e78a2c272fbd680065 /include/linux/sched.h
parent2ff2b289a695807e291e1ed9f639d8a3ba5f4254 (diff)
parent1c2927f18576d65631d8e0ddd19e1d023183222e (diff)
downloadlinux-d79ee93de909dfb252279b9a95978bbda9a814a9.tar.xz
Merge branch 'sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull scheduler changes from Ingo Molnar: "The biggest change is the cleanup/simplification of the load-balancer: instead of the current practice of architectures twiddling scheduler internal data structures and providing the scheduler domains in colorfully inconsistent ways, we now have generic scheduler code in kernel/sched/core.c:sched_init_numa() that looks at the architecture's node_distance() parameters and (while not fully trusting it) deducts a NUMA topology from it. This inevitably changes balancing behavior - hopefully for the better. There are various smaller optimizations, cleanups and fixlets as well" * 'sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: sched: Taint kernel with TAINT_WARN after sleep-in-atomic bug sched: Remove stale power aware scheduling remnants and dysfunctional knobs sched/debug: Fix printing large integers on 32-bit platforms sched/fair: Improve the ->group_imb logic sched/nohz: Fix rq->cpu_load[] calculations sched/numa: Don't scale the imbalance sched/fair: Revert sched-domain iteration breakage sched/x86: Rewrite set_cpu_sibling_map() sched/numa: Fix the new NUMA topology bits sched/numa: Rewrite the CONFIG_NUMA sched domain support sched/fair: Propagate 'struct lb_env' usage into find_busiest_group sched/fair: Add some serialization to the sched_domain load-balance walk sched/fair: Let minimally loaded cpu balance the group sched: Change rq->nr_running to unsigned int x86/numa: Check for nonsensical topologies on real hw as well x86/numa: Hard partition cpu topology masks on node boundaries x86/numa: Allow specifying node_distance() for numa=fake x86/sched: Make mwait_usable() heed to "idle=" kernel parameters properly sched: Update documentation and comments sched_rt: Avoid unnecessary dequeue and enqueue of pushable tasks in set_cpus_allowed_rt()
Diffstat (limited to 'include/linux/sched.h')
-rw-r--r--include/linux/sched.h49
1 files changed, 1 insertions, 48 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h
index f774d88cd0aa..28fa9d02fd59 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -855,61 +855,14 @@ enum cpu_idle_type {
#define SD_WAKE_AFFINE 0x0020 /* Wake task to waking CPU */
#define SD_PREFER_LOCAL 0x0040 /* Prefer to keep tasks local to this domain */
#define SD_SHARE_CPUPOWER 0x0080 /* Domain members share cpu power */
-#define SD_POWERSAVINGS_BALANCE 0x0100 /* Balance for power savings */
#define SD_SHARE_PKG_RESOURCES 0x0200 /* Domain members share cpu pkg resources */
#define SD_SERIALIZE 0x0400 /* Only a single load balancing instance */
#define SD_ASYM_PACKING 0x0800 /* Place busy groups earlier in the domain */
#define SD_PREFER_SIBLING 0x1000 /* Prefer to place tasks in a sibling domain */
#define SD_OVERLAP 0x2000 /* sched_domains of this level overlap */
-enum powersavings_balance_level {
- POWERSAVINGS_BALANCE_NONE = 0, /* No power saving load balance */
- POWERSAVINGS_BALANCE_BASIC, /* Fill one thread/core/package
- * first for long running threads
- */
- POWERSAVINGS_BALANCE_WAKEUP, /* Also bias task wakeups to semi-idle
- * cpu package for power savings
- */
- MAX_POWERSAVINGS_BALANCE_LEVELS
-};
-
-extern int sched_mc_power_savings, sched_smt_power_savings;
-
-static inline int sd_balance_for_mc_power(void)
-{
- if (sched_smt_power_savings)
- return SD_POWERSAVINGS_BALANCE;
-
- if (!sched_mc_power_savings)
- return SD_PREFER_SIBLING;
-
- return 0;
-}
-
-static inline int sd_balance_for_package_power(void)
-{
- if (sched_mc_power_savings | sched_smt_power_savings)
- return SD_POWERSAVINGS_BALANCE;
-
- return SD_PREFER_SIBLING;
-}
-
extern int __weak arch_sd_sibiling_asym_packing(void);
-/*
- * Optimise SD flags for power savings:
- * SD_BALANCE_NEWIDLE helps aggressive task consolidation and power savings.
- * Keep default SD flags if sched_{smt,mc}_power_saving=0
- */
-
-static inline int sd_power_saving_flags(void)
-{
- if (sched_mc_power_savings | sched_smt_power_savings)
- return SD_BALANCE_NEWIDLE;
-
- return 0;
-}
-
struct sched_group_power {
atomic_t ref;
/*
@@ -1962,7 +1915,7 @@ static inline int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask)
*/
extern unsigned long long notrace sched_clock(void);
/*
- * See the comment in kernel/sched_clock.c
+ * See the comment in kernel/sched/clock.c
*/
extern u64 cpu_clock(int cpu);
extern u64 local_clock(void);