summaryrefslogtreecommitdiff
path: root/kernel/time
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/time')
-rw-r--r--kernel/time/Kconfig19
-rw-r--r--kernel/time/tick-sched.c62
2 files changed, 81 insertions, 0 deletions
diff --git a/kernel/time/Kconfig b/kernel/time/Kconfig
index 24510d84efd7..5a87c03e45ad 100644
--- a/kernel/time/Kconfig
+++ b/kernel/time/Kconfig
@@ -79,6 +79,25 @@ config NO_HZ
only trigger on an as-needed basis both when the system is
busy and when the system is idle.
+config NO_HZ_EXTENDED
+ bool "Full dynticks system"
+ depends on NO_HZ && RCU_USER_QS && VIRT_CPU_ACCOUNTING_GEN && RCU_NOCB_CPU && SMP
+ select CONTEXT_TRACKING_FORCE
+ help
+ Adaptively try to shutdown the tick whenever possible, even when
+ the CPU is running tasks. Typically this requires running a single
+ task on the CPU. Chances for running tickless are maximized when
+ the task mostly runs in userspace and has few kernel activity.
+
+ You need to fill up the nohz_extended boot parameter with the
+ desired range of dynticks CPUs.
+
+ This is implemented at the expense of some overhead in user <-> kernel
+ transitions: syscalls, exceptions and interrupts. Even when it's
+ dynamically off.
+
+ Say N.
+
config HIGH_RES_TIMERS
bool "High Resolution Timer Support"
depends on !ARCH_USES_GETTIMEOFFSET && GENERIC_CLOCKEVENTS
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index a19a39952c1b..79c275f08b7d 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -142,6 +142,68 @@ static void tick_sched_handle(struct tick_sched *ts, struct pt_regs *regs)
profile_tick(CPU_PROFILING);
}
+#ifdef CONFIG_NO_HZ_EXTENDED
+static cpumask_var_t nohz_extended_mask;
+bool have_nohz_extended_mask;
+
+int tick_nohz_extended_cpu(int cpu)
+{
+ if (!have_nohz_extended_mask)
+ return 0;
+
+ return cpumask_test_cpu(cpu, nohz_extended_mask);
+}
+
+/* Parse the boot-time nohz CPU list from the kernel parameters. */
+static int __init tick_nohz_extended_setup(char *str)
+{
+ alloc_bootmem_cpumask_var(&nohz_extended_mask);
+ if (cpulist_parse(str, nohz_extended_mask) < 0)
+ pr_warning("NOHZ: Incorrect nohz_extended cpumask\n");
+ else
+ have_nohz_extended_mask = true;
+ return 1;
+}
+__setup("nohz_extended=", tick_nohz_extended_setup);
+
+static int __init init_tick_nohz_extended(void)
+{
+ cpumask_var_t online_nohz;
+ int cpu;
+
+ if (!have_nohz_extended_mask)
+ return 0;
+
+ if (!zalloc_cpumask_var(&online_nohz, GFP_KERNEL)) {
+ pr_warning("NO_HZ: Not enough memory to check extended nohz mask\n");
+ return -ENOMEM;
+ }
+
+ /*
+ * CPUs can probably not be concurrently offlined on initcall time.
+ * But we are paranoid, aren't we?
+ */
+ get_online_cpus();
+
+ /* Ensure we keep a CPU outside the dynticks range for timekeeping */
+ cpumask_and(online_nohz, cpu_online_mask, nohz_extended_mask);
+ if (cpumask_equal(online_nohz, cpu_online_mask)) {
+ cpu = cpumask_any(cpu_online_mask);
+ pr_warning("NO_HZ: Must keep at least one online CPU "
+ "out of nohz_extended range\n");
+ pr_warning("NO_HZ: Clearing %d from nohz_extended range\n", cpu);
+ cpumask_clear_cpu(cpu, nohz_extended_mask);
+ }
+ put_online_cpus();
+ free_cpumask_var(online_nohz);
+
+ return 0;
+}
+core_initcall(init_tick_nohz_extended);
+#else
+#define have_nohz_extended_mask (0)
+#endif
+
/*
* NOHZ - aka dynamic tick functionality
*/