summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2008-10-21 00:19:56 +0400
committerLinus Torvalds <torvalds@linux-foundation.org>2008-10-21 00:19:56 +0400
commit99ebcf8285df28f32fd2d1c19a7166e70f00309c (patch)
treecaf45f39a77026b2fae2413c145067a1e5164701 /kernel
parent72558dde738b06cc01e16b3247a9659ca739e22d (diff)
parentc465a76af658b443075d6efee1c3131257643020 (diff)
downloadlinux-99ebcf8285df28f32fd2d1c19a7166e70f00309c.tar.xz
Merge branch 'v28-timers-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'v28-timers-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: (36 commits) fix documentation of sysrq-q really Fix documentation of sysrq-q timer_list: add base address to clock base timer_list: print cpu number of clockevents device timer_list: print real timer address NOHZ: restart tick device from irq_enter() NOHZ: split tick_nohz_restart_sched_tick() NOHZ: unify the nohz function calls in irq_enter() timers: fix itimer/many thread hang, fix timers: fix itimer/many thread hang, v3 ntp: improve adjtimex frequency rounding timekeeping: fix rounding problem during clock update ntp: let update_persistent_clock() sleep hrtimer: reorder struct hrtimer to save 8 bytes on 64bit builds posix-timers: lock_timer: make it readable posix-timers: lock_timer: kill the bogus ->it_id check posix-timers: kill ->it_sigev_signo and ->it_sigev_value posix-timers: sys_timer_create: cleanup the error handling posix-timers: move the initialization of timer->sigq from send to create path posix-timers: sys_timer_create: simplify and s/tasklist/rcu/ ... Fix trivial conflicts due to sysrq-q description clahes in Documentation/sysrq.txt and drivers/char/sysrq.c
Diffstat (limited to 'kernel')
-rw-r--r--kernel/compat.c53
-rw-r--r--kernel/exit.c19
-rw-r--r--kernel/fork.c92
-rw-r--r--kernel/hrtimer.c15
-rw-r--r--kernel/itimer.c33
-rw-r--r--kernel/posix-cpu-timers.c512
-rw-r--r--kernel/posix-timers.c153
-rw-r--r--kernel/sched.c19
-rw-r--r--kernel/sched_fair.c1
-rw-r--r--kernel/sched_rt.c4
-rw-r--r--kernel/sched_stats.h86
-rw-r--r--kernel/signal.c8
-rw-r--r--kernel/softirq.c10
-rw-r--r--kernel/sys.c75
-rw-r--r--kernel/time/clocksource.c3
-rw-r--r--kernel/time/jiffies.c1
-rw-r--r--kernel/time/ntp.c93
-rw-r--r--kernel/time/tick-broadcast.c13
-rw-r--r--kernel/time/tick-internal.h2
-rw-r--r--kernel/time/tick-sched.c93
-rw-r--r--kernel/time/timekeeping.c122
-rw-r--r--kernel/time/timer_list.c20
-rw-r--r--kernel/timer.c11
23 files changed, 778 insertions, 660 deletions
diff --git a/kernel/compat.c b/kernel/compat.c
index 143990e48cb9..8eafe3eb50d9 100644
--- a/kernel/compat.c
+++ b/kernel/compat.c
@@ -23,6 +23,7 @@
#include <linux/timex.h>
#include <linux/migrate.h>
#include <linux/posix-timers.h>
+#include <linux/times.h>
#include <asm/uaccess.h>
@@ -208,49 +209,23 @@ asmlinkage long compat_sys_setitimer(int which,
return 0;
}
+static compat_clock_t clock_t_to_compat_clock_t(clock_t x)
+{
+ return compat_jiffies_to_clock_t(clock_t_to_jiffies(x));
+}
+
asmlinkage long compat_sys_times(struct compat_tms __user *tbuf)
{
- /*
- * In the SMP world we might just be unlucky and have one of
- * the times increment as we use it. Since the value is an
- * atomically safe type this is just fine. Conceptually its
- * as if the syscall took an instant longer to occur.
- */
if (tbuf) {
+ struct tms tms;
struct compat_tms tmp;
- struct task_struct *tsk = current;
- struct task_struct *t;
- cputime_t utime, stime, cutime, cstime;
-
- read_lock(&tasklist_lock);
- utime = tsk->signal->utime;
- stime = tsk->signal->stime;
- t = tsk;
- do {
- utime = cputime_add(utime, t->utime);
- stime = cputime_add(stime, t->stime);
- t = next_thread(t);
- } while (t != tsk);
-
- /*
- * While we have tasklist_lock read-locked, no dying thread
- * can be updating current->signal->[us]time. Instead,
- * we got their counts included in the live thread loop.
- * However, another thread can come in right now and
- * do a wait call that updates current->signal->c[us]time.
- * To make sure we always see that pair updated atomically,
- * we take the siglock around fetching them.
- */
- spin_lock_irq(&tsk->sighand->siglock);
- cutime = tsk->signal->cutime;
- cstime = tsk->signal->cstime;
- spin_unlock_irq(&tsk->sighand->siglock);
- read_unlock(&tasklist_lock);
-
- tmp.tms_utime = compat_jiffies_to_clock_t(cputime_to_jiffies(utime));
- tmp.tms_stime = compat_jiffies_to_clock_t(cputime_to_jiffies(stime));
- tmp.tms_cutime = compat_jiffies_to_clock_t(cputime_to_jiffies(cutime));
- tmp.tms_cstime = compat_jiffies_to_clock_t(cputime_to_jiffies(cstime));
+
+ do_sys_times(&tms);
+ /* Convert our struct tms to the compat version. */
+ tmp.tms_utime = clock_t_to_compat_clock_t(tms.tms_utime);
+ tmp.tms_stime = clock_t_to_compat_clock_t(tms.tms_stime);
+ tmp.tms_cutime = clock_t_to_compat_clock_t(tms.tms_cutime);
+ tmp.tms_cstime = clock_t_to_compat_clock_t(tms.tms_cstime);
if (copy_to_user(tbuf, &tmp, sizeof(tmp)))
return -EFAULT;
}
diff --git a/kernel/exit.c b/kernel/exit.c
index 0ef4673e351b..059b38cae384 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -112,8 +112,6 @@ static void __exit_signal(struct task_struct *tsk)
* We won't ever get here for the group leader, since it
* will have been the last reference on the signal_struct.
*/
- sig->utime = cputime_add(sig->utime, task_utime(tsk));
- sig->stime = cputime_add(sig->stime, task_stime(tsk));
sig->gtime = cputime_add(sig->gtime, task_gtime(tsk));
sig->min_flt += tsk->min_flt;
sig->maj_flt += tsk->maj_flt;
@@ -122,7 +120,6 @@ static void __exit_signal(struct task_struct *tsk)
sig->inblock += task_io_get_inblock(tsk);
sig->oublock += task_io_get_oublock(tsk);
task_io_accounting_add(&sig->ioac, &tsk->ioac);
- sig->sum_sched_runtime += tsk->se.sum_exec_runtime;
sig = NULL; /* Marker for below. */
}
@@ -1301,6 +1298,7 @@ static int wait_task_zombie(struct task_struct *p, int options,
if (likely(!traced)) {
struct signal_struct *psig;
struct signal_struct *sig;
+ struct task_cputime cputime;
/*
* The resource counters for the group leader are in its
@@ -1316,20 +1314,23 @@ static int wait_task_zombie(struct task_struct *p, int options,
* need to protect the access to p->parent->signal fields,
* as other threads in the parent group can be right
* here reaping other children at the same time.
+ *
+ * We use thread_group_cputime() to get times for the thread
+ * group, which consolidates times for all threads in the
+ * group including the group leader.
*/
spin_lock_irq(&p->parent->sighand->siglock);
psig = p->parent->signal;
sig = p->signal;
+ thread_group_cputime(p, &cputime);
psig->cutime =
cputime_add(psig->cutime,
- cputime_add(p->utime,
- cputime_add(sig->utime,
- sig->cutime)));
+ cputime_add(cputime.utime,
+ sig->cutime));
psig->cstime =
cputime_add(psig->cstime,
- cputime_add(p->stime,
- cputime_add(sig->stime,
- sig->cstime)));
+ cputime_add(cputime.stime,
+ sig->cstime));
psig->cgtime =
cputime_add(psig->cgtime,
cputime_add(p->gtime,
diff --git a/kernel/fork.c b/kernel/fork.c
index 30de644a40c4..44e64d7ba29b 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -759,15 +759,44 @@ void __cleanup_sighand(struct sighand_struct *sighand)
kmem_cache_free(sighand_cachep, sighand);
}
+
+/*
+ * Initialize POSIX timer handling for a thread group.
+ */
+static void posix_cpu_timers_init_group(struct signal_struct *sig)
+{
+ /* Thread group counters. */
+ thread_group_cputime_init(sig);
+
+ /* Expiration times and increments. */
+ sig->it_virt_expires = cputime_zero;
+ sig->it_virt_incr = cputime_zero;
+ sig->it_prof_expires = cputime_zero;
+ sig->it_prof_incr = cputime_zero;
+
+ /* Cached expiration times. */
+ sig->cputime_expires.prof_exp = cputime_zero;
+ sig->cputime_expires.virt_exp = cputime_zero;
+ sig->cputime_expires.sched_exp = 0;
+
+ /* The timer lists. */
+ INIT_LIST_HEAD(&sig->cpu_timers[0]);
+ INIT_LIST_HEAD(&sig->cpu_timers[1]);
+ INIT_LIST_HEAD(&sig->cpu_timers[2]);
+}
+
static int copy_signal(unsigned long clone_flags, struct task_struct *tsk)
{
struct signal_struct *sig;
int ret;
if (clone_flags & CLONE_THREAD) {
- atomic_inc(&current->signal->count);
- atomic_inc(&current->signal->live);
- return 0;
+ ret = thread_group_cputime_clone_thread(current);
+ if (likely(!ret)) {
+ atomic_inc(&current->signal->count);
+ atomic_inc(&current->signal->live);
+ }
+ return ret;
}
sig = kmem_cache_alloc(signal_cachep, GFP_KERNEL);
tsk->signal = sig;
@@ -795,40 +824,25 @@ static int copy_signal(unsigned long clone_flags, struct task_struct *tsk)
sig->it_real_incr.tv64 = 0;
sig->real_timer.function = it_real_fn;
- sig->it_virt_expires = cputime_zero;
- sig->it_virt_incr = cputime_zero;
- sig->it_prof_expires = cputime_zero;
- sig->it_prof_incr = cputime_zero;
-
sig->leader = 0; /* session leadership doesn't inherit */
sig->tty_old_pgrp = NULL;
sig->tty = NULL;
- sig->utime = sig->stime = sig->cutime = sig->cstime = cputime_zero;
+ sig->cutime = sig->cstime = cputime_zero;
sig->gtime = cputime_zero;
sig->cgtime = cputime_zero;
sig->nvcsw = sig->nivcsw = sig->cnvcsw = sig->cnivcsw = 0;
sig->min_flt = sig->maj_flt = sig->cmin_flt = sig->cmaj_flt = 0;
sig->inblock = sig->oublock = sig->cinblock = sig->coublock = 0;
task_io_accounting_init(&sig->ioac);
- sig->sum_sched_runtime = 0;
- INIT_LIST_HEAD(&sig->cpu_timers[0]);
- INIT_LIST_HEAD(&sig->cpu_timers[1]);
- INIT_LIST_HEAD(&sig->cpu_timers[2]);
taskstats_tgid_init(sig);
task_lock(current->group_leader);
memcpy(sig->rlim, current->signal->rlim, sizeof sig->rlim);
task_unlock(current->group_leader);
- if (sig->rlim[RLIMIT_CPU].rlim_cur != RLIM_INFINITY) {
- /*
- * New sole thread in the process gets an expiry time
- * of the whole CPU time limit.
- */
- tsk->it_prof_expires =
- secs_to_cputime(sig->rlim[RLIMIT_CPU].rlim_cur);
- }
+ posix_cpu_timers_init_group(sig);
+
acct_init_pacct(&sig->pacct);
tty_audit_fork(sig);
@@ -838,6 +852,7 @@ static int copy_signal(unsigned long clone_flags, struct task_struct *tsk)
void __cleanup_signal(struct signal_struct *sig)
{
+ thread_group_cputime_free(sig);
exit_thread_group_keys(sig);
tty_kref_put(sig->tty);
kmem_cache_free(signal_cachep, sig);
@@ -888,6 +903,19 @@ void mm_init_owner(struct mm_struct *mm, struct task_struct *p)
#endif /* CONFIG_MM_OWNER */
/*
+ * Initialize POSIX timer handling for a single task.
+ */
+static void posix_cpu_timers_init(struct task_struct *tsk)
+{
+ tsk->cputime_expires.prof_exp = cputime_zero;
+ tsk->cputime_expires.virt_exp = cputime_zero;
+ tsk->cputime_expires.sched_exp = 0;
+ INIT_LIST_HEAD(&tsk->cpu_timers[0]);
+ INIT_LIST_HEAD(&tsk->cpu_timers[1]);
+ INIT_LIST_HEAD(&tsk->cpu_timers[2]);
+}
+
+/*
* This creates a new process as a copy of the old one,
* but does not actually start it yet.
*
@@ -997,12 +1025,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
task_io_accounting_init(&p->ioac);
acct_clear_integrals(p);
- p->it_virt_expires = cputime_zero;
- p->it_prof_expires = cputime_zero;
- p->it_sched_expires = 0;
- INIT_LIST_HEAD(&p->cpu_timers[0]);
- INIT_LIST_HEAD(&p->cpu_timers[1]);
- INIT_LIST_HEAD(&p->cpu_timers[2]);
+ posix_cpu_timers_init(p);
p->lock_depth = -1; /* -1 = no lock */
do_posix_clock_monotonic_gettime(&p->start_time);
@@ -1203,21 +1226,6 @@ static struct task_struct *copy_process(unsigned long clone_flags,
if (clone_flags & CLONE_THREAD) {
p->group_leader = current->group_leader;
list_add_tail_rcu(&p->thread_group, &p->group_leader->thread_group);
-
- if (!cputime_eq(current->signal->it_virt_expires,
- cputime_zero) ||
- !cputime_eq(current->signal->it_prof_expires,
- cputime_zero) ||
- current->signal->rlim[RLIMIT_CPU].rlim_cur != RLIM_INFINITY ||
- !list_empty(&current->signal->cpu_timers[0]) ||
- !list_empty(&current->signal->cpu_timers[1]) ||
- !list_empty(&current->signal->cpu_timers[2])) {
- /*
- * Have child wake up on its first tick to check
- * for process CPU timers.
- */
- p->it_prof_expires = jiffies_to_cputime(1);
- }
}
if (likely(p->pid)) {
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
index cdec83e722fa..95978f48e039 100644
--- a/kernel/hrtimer.c
+++ b/kernel/hrtimer.c
@@ -1403,9 +1403,7 @@ void hrtimer_run_queues(void)
if (!base->first)
continue;
- if (base->get_softirq_time)
- base->softirq_time = base->get_softirq_time();
- else if (gettime) {
+ if (gettime) {
hrtimer_get_softirq_time(cpu_base);
gettime = 0;
}
@@ -1688,9 +1686,11 @@ static void migrate_hrtimers(int cpu)
new_base = &get_cpu_var(hrtimer_bases);
tick_cancel_sched_timer(cpu);
-
- local_irq_disable();
- spin_lock(&new_base->lock);
+ /*
+ * The caller is globally serialized and nobody else
+ * takes two locks at once, deadlock is not possible.
+ */
+ spin_lock_irq(&new_base->lock);
spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING);
for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) {
@@ -1703,8 +1703,7 @@ static void migrate_hrtimers(int cpu)
raise = 1;
spin_unlock(&old_base->lock);
- spin_unlock(&new_base->lock);
- local_irq_enable();
+ spin_unlock_irq(&new_base->lock);
put_cpu_var(hrtimer_bases);
if (raise)
diff --git a/kernel/itimer.c b/kernel/itimer.c
index ab982747d9bd..db7c358b9a02 100644
--- a/kernel/itimer.c
+++ b/kernel/itimer.c
@@ -55,17 +55,15 @@ int do_getitimer(int which, struct itimerval *value)
spin_unlock_irq(&tsk->sighand->siglock);
break;
case ITIMER_VIRTUAL:
- read_lock(&tasklist_lock);
spin_lock_irq(&tsk->sighand->siglock);
cval = tsk->signal->it_virt_expires;
cinterval = tsk->signal->it_virt_incr;
if (!cputime_eq(cval, cputime_zero)) {
- struct task_struct *t = tsk;
- cputime_t utime = tsk->signal->utime;
- do {
- utime = cputime_add(utime, t->utime);
- t = next_thread(t);
- } while (t != tsk);
+ struct task_cputime cputime;
+ cputime_t utime;
+
+ thread_group_cputime(tsk, &cputime);
+ utime = cputime.utime;
if (cputime_le(cval, utime)) { /* about to fire */
cval = jiffies_to_cputime(1);
} else {
@@ -73,25 +71,19 @@ int do_getitimer(int which, struct itimerval *value)
}
}
spin_unlock_irq(&tsk->sighand->siglock);
- read_unlock(&tasklist_lock);
cputime_to_timeval(cval, &value->it_value);
cputime_to_timeval(cinterval, &value->it_interval);
break;
case ITIMER_PROF:
- read_lock(&tasklist_lock);
spin_lock_irq(&tsk->sighand->siglock);
cval = tsk->signal->it_prof_expires;
cinterval = tsk->signal->it_prof_incr;
if (!cputime_eq(cval, cputime_zero)) {
- struct task_struct *t = tsk;
- cputime_t ptime = cputime_add(tsk->signal->utime,
- tsk->signal->stime);
- do {
- ptime = cputime_add(ptime,
- cputime_add(t->utime,
- t->stime));
- t = next_thread(t);
- } while (t != tsk);
+ struct task_cputime times;
+ cputime_t ptime;
+
+ thread_group_cputime(tsk, &times);
+ ptime = cputime_add(times.utime, times.stime);
if (cputime_le(cval, ptime)) { /* about to fire */
cval = jiffies_to_cputime(1);
} else {
@@ -99,7 +91,6 @@ int do_getitimer(int which, struct itimerval *value)
}
}
spin_unlock_irq(&tsk->sighand->siglock);
- read_unlock(&tasklist_lock);
cputime_to_timeval(cval, &value->it_value);
cputime_to_timeval(cinterval, &value->it_interval);
break;
@@ -185,7 +176,6 @@ again:
case ITIMER_VIRTUAL:
nval = timeval_to_cputime(&value->it_value);
ninterval = timeval_to_cputime(&value->it_interval);
- read_lock(&tasklist_lock);
spin_lock_irq(&tsk->sighand->siglock);
cval = tsk->signal->it_virt_expires;
cinterval = tsk->signal->it_virt_incr;
@@ -200,7 +190,6 @@ again:
tsk->signal->it_virt_expires = nval;
tsk->signal->it_virt_incr = ninterval;
spin_unlock_irq(&tsk->sighand->siglock);
- read_unlock(&tasklist_lock);
if (ovalue) {
cputime_to_timeval(cval, &ovalue->it_value);
cputime_to_timeval(cinterval, &ovalue->it_interval);
@@ -209,7 +198,6 @@ again:
case ITIMER_PROF:
nval = timeval_to_cputime(&value->it_value);
ninterval = timeval_to_cputime(&value->it_interval);
- read_lock(&tasklist_lock);
spin_lock_irq(&tsk->sighand->siglock);
cval = tsk->signal->it_prof_expires;
cinterval = tsk->signal->it_prof_incr;
@@ -224,7 +212,6 @@ again:
tsk->signal->it_prof_expires = nval;
tsk->signal->it_prof_incr = ninterval;
spin_unlock_irq(&tsk->sighand->siglock);
- read_unlock(&tasklist_lock);
if (ovalue) {
cputime_to_timeval(cval, &ovalue->it_value);
cputime_to_timeval(cinterval, &ovalue->it_interval);
diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
index c42a03aef36f..153dcb2639c3 100644
--- a/kernel/posix-cpu-timers.c
+++ b/kernel/posix-cpu-timers.c
@@ -7,6 +7,93 @@
#include <linux/errno.h>
#include <linux/math64.h>
#include <asm/uaccess.h>
+#include <linux/kernel_stat.h>
+
+/*
+ * Allocate the thread_group_cputime structure appropriately and fill in the
+ * current values of the fields. Called from copy_signal() via
+ * thread_group_cputime_clone_thread() when adding a second or subsequent
+ * thread to a thread group. Assumes interrupts are enabled when called.
+ */
+int thread_group_cputime_alloc(struct task_struct *tsk)
+{
+ struct signal_struct *sig = tsk->signal;
+ struct task_cputime *cputime;
+
+ /*
+ * If we have multiple threads and we don't already have a
+ * per-CPU task_cputime struct (checked in the caller), allocate
+ * one and fill it in with the times accumulated so far. We may
+ * race with another thread so recheck after we pick up the sighand
+ * lock.
+ */
+ cputime = alloc_percpu(struct task_cputime);
+ if (cputime == NULL)
+ return -ENOMEM;
+ spin_lock_irq(&tsk->sighand->siglock);
+ if (sig->cputime.totals) {
+ spin_unlock_irq(&tsk->sighand->siglock);
+ free_percpu(cputime);
+ return 0;
+ }
+ sig->cputime.totals = cputime;
+ cputime = per_cpu_ptr(sig->cputime.totals, smp_processor_id());
+ cputime->utime = tsk->utime;
+ cputime->stime = tsk->stime;
+ cputime->sum_exec_runtime = tsk->se.sum_exec_runtime;
+ spin_unlock_irq(&tsk->sighand->siglock);
+ return 0;
+}
+
+/**
+ * thread_group_cputime - Sum the thread group time fields across all CPUs.
+ *
+ * @tsk: The task we use to identify the thread group.
+ * @times: task_cputime structure in which we return the summed fields.
+ *
+ * Walk the list of CPUs to sum the per-CPU time fields in the thread group
+ * time structure.
+ */
+void thread_group_cputime(
+ struct task_struct *tsk,
+ struct task_cputime *times)
+{
+ struct signal_struct *sig;
+ int i;
+ struct task_cputime *tot;
+
+ sig = tsk->signal;
+ if (unlikely(!sig) || !sig->cputime.totals) {
+ times->utime = tsk->utime;
+ times->stime = tsk->stime;
+ times->sum_exec_runtime = tsk->se.sum_exec_runtime;
+ return;
+ }
+ times->stime = times->utime = cputime_zero;
+ times->sum_exec_runtime = 0;
+ for_each_possible_cpu(i) {
+ tot = per_cpu_ptr(tsk->signal->cputime.totals, i);
+ times->utime = cputime_add(times->utime, tot->utime);
+ times->stime = cputime_add(times->stime, tot->stime);
+ times->sum_exec_runtime += tot->sum_exec_runtime;
+ }
+}
+
+/*
+ * Called after updating RLIMIT_CPU to set timer expiration if necessary.
+ */
+void update_rlimit_cpu(unsigned long rlim_new)
+{
+ cputime_t cputime;
+
+ cputime = secs_to_cputime(rlim_new);
+ if (cputime_eq(current->signal->it_prof_expires, cputime_zero) ||
+ cputime_lt(current->signal->it_prof_expires, cputime)) {
+ spin_lock_irq(&current->sighand->siglock);
+ set_process_cpu_timer(current, CPUCLOCK_PROF, &cputime, NULL);
+ spin_unlock_irq(&current->sighand->siglock);
+ }
+}
static int check_clock(const clockid_t which_clock)
{
@@ -158,10 +245,6 @@ static inline cputime_t virt_ticks(struct task_struct *p)
{
return p->utime;
}
-static inline unsigned long long sched_ns(struct task_struct *p)
-{
- return task_sched_runtime(p);
-}
int posix_cpu_clock_getres(const clockid_t which_clock, struct timespec *tp)
{
@@ -211,7 +294,7 @@ static int cpu_clock_sample(const clockid_t which_clock, struct task_struct *p,
cpu->cpu = virt_ticks(p);
break;
case CPUCLOCK_SCHED:
- cpu->sched = sched_ns(p);
+ cpu->sched = p->se.sum_exec_runtime + task_delta_exec(p);
break;
}
return 0;
@@ -220,59 +303,30 @@ static int cpu_clock_sample(const clockid_t which_clock, struct task_struct *p,
/*
* Sample a process (thread group) clock for the given group_leader task.
* Must be called with tasklist_lock held for reading.
- * Must be called with tasklist_lock held for reading, and p->sighand->siglock.
*/
-static int cpu_clock_sample_group_locked(unsigned int clock_idx,
- struct task_struct *p,
- union cpu_time_count *cpu)
+static int cpu_clock_sample_group(const clockid_t which_clock,
+ struct task_struct *p,
+ union cpu_time_count *cpu)
{
- struct task_struct *t = p;
- switch (clock_idx) {
+ struct task_cputime cputime;
+
+ thread_group_cputime(p, &cputime);
+ switch (which_clock) {
default:
return -EINVAL;
case CPUCLOCK_PROF:
- cpu->cpu = cputime_add(p->signal->utime, p->signal->stime);
- do {
- cpu->cpu = cputime_add(cpu->cpu, prof_ticks(t));
- t = next_thread(t);
- } while (t != p);
+ cpu->cpu = cputime_add(cputime.utime, cputime.stime);
break;
case CPUCLOCK_VIRT:
- cpu->cpu = p->signal->utime;
- do {
- cpu->cpu = cputime_add(cpu->cpu, virt_ticks(t));
- t = next_thread(t);
- } while (t != p);
+ cpu->cpu = cputime.utime;
break;
case CPUCLOCK_SCHED:
- cpu->sched = p->signal->sum_sched_runtime;
- /* Add in each other live thread. */
- while ((t = next_thread(t)) != p) {
- cpu->sched += t->se.sum_exec_runtime;
- }
- cpu->sched += sched_ns(p);
+ cpu->sched = cputime.sum_exec_runtime + task_delta_exec(p);
break;
}
return 0;
}
-/*
- * Sample a process (thread group) clock for the given group_leader task.
- * Must be called with tasklist_lock held for reading.
- */
-static int cpu_clock_sample_group(const clockid_t which_clock,
- struct task_struct *p,
- union cpu_time_count *cpu)
-{
- int ret;
- unsigned long flags;
- spin_lock_irqsave(&p->sighand->siglock, flags);
- ret = cpu_clock_sample_group_locked(CPUCLOCK_WHICH(which_clock), p,
- cpu);
- spin_unlock_irqrestore(&p->sighand->siglock, flags);
- return ret;
-}
-
int posix_cpu_clock_get(const clockid_t which_clock, struct timespec *tp)
{
@@ -471,80 +525,11 @@ void posix_cpu_timers_exit(struct task_struct *tsk)
}
void posix_cpu_timers_exit_group(struct task_struct *tsk)
{
- cleanup_timers(tsk->signal->cpu_timers,
- cputime_add(tsk->utime, tsk->signal->utime),
- cputime_add(tsk->stime, tsk->signal->stime),
- tsk->se.sum_exec_runtime + tsk->signal->sum_sched_runtime);
-}
+ struct task_cputime cputime;
-
-/*
- * Set the expiry times of all the threads in the process so one of them
- * will go off before the process cumulative expiry total is reached.
- */
-static void process_timer_rebalance(struct task_struct *p,
- unsigned int clock_idx,
- union cpu_time_count expires,
- union cpu_time_count val)
-{
- cputime_t ticks, left;
- unsigned long long ns, nsleft;
- struct task_struct *t = p;
- unsigned int nthreads = atomic_read(&p->signal->live);
-
- if (!nthreads)
- return;
-
- switch (clock_idx) {
- default:
- BUG();
- break;
- case CPUCLOCK_PROF:
- left = cputime_div_non_zero(cputime_sub(expires.cpu, val.cpu),
- nthreads);
- do {
- if (likely(!(t->flags & PF_EXITING))) {
- ticks = cputime_add(prof_ticks(t), left);
- if (cputime_eq(t->it_prof_expires,
- cputime_zero) ||
- cputime_gt(t->it_prof_expires, ticks)) {
- t->it_prof_expires = ticks;
- }
- }
- t = next_thread(t);
- } while (t != p);
- break;
- case CPUCLOCK_VIRT:
- left = cputime_div_non_zero(cputime_sub(expires.cpu, val.cpu),
- nthreads);
- do {
- if (likely(!(t->flags & PF_EXITING))) {
- ticks = cputime_add(virt_ticks(t), left);
- if (cputime_eq(t->it_virt_expires,
- cputime_zero) ||
- cputime_gt(t->it_virt_expires, ticks)) {
- t->it_virt_expires = ticks;
- }
- }
- t = next_thread(t);
- } while (t != p);
- break;
- case CPUCLOCK_SCHED:
- nsleft = expires.sched - val.sched;
- do_div(nsleft, nthreads);
- nsleft = max_t(unsigned long long, nsleft, 1);
- do {
- if (likely(!(t->flags & PF_EXITING))) {
- ns = t->se.sum_exec_runtime + nsleft;
- if (t->it_sched_expires == 0 ||
- t->it_sched_expires > ns) {
- t->it_sched_expires = ns;
- }
- }
- t = next_thread(t);
- } while (t != p);
- break;
- }
+ thread_group_cputime(tsk, &cputime);
+ cleanup_timers(tsk->signal->cpu_timers,
+ cputime.utime, cputime.stime, cputime.sum_exec_runtime);
}
static void clear_dead_task(struct k_itimer *timer, union cpu_time_count now)
@@ -608,29 +593,32 @@ static void arm_timer(struct k_itimer *timer, union cpu_time_count now)
default:
BUG();
case CPUCLOCK_PROF:
- if (cputime_eq(p->it_prof_expires,
+ if (cputime_eq(p->cputime_expires.prof_exp,
cputime_zero) ||
- cputime_gt(p->it_prof_expires,
+ cputime_gt(p->cputime_expires.prof_exp,
nt->expires.cpu))
- p->it_prof_expires = nt->expires.cpu;
+ p->cputime_expires.prof_exp =
+ nt->expires.cpu;
break;
case CPUCLOCK_VIRT:
- if (cputime_eq(p->it_virt_expires,
+ if (cputime_eq(p->cputime_expires.virt_exp,
cputime_zero) ||
- cputime_gt(p->it_virt_expires,
+ cputime_gt(p->cputime_expires.virt_exp,
nt->expires.cpu))
- p->it_virt_expires = nt->expires.cpu;
+ p->cputime_expires.virt_exp =
+ nt->expires.cpu;
break;
case CPUCLOCK_SCHED:
- if (p->it_sched_expires == 0 ||
- p->it_sched_expires > nt->expires.sched)
- p->it_sched_expires = nt->expires.sched;
+ if (p->cputime_expires.sched_exp == 0 ||
+ p->cputime_expires.sched_exp >
+ nt->expires.sched)
+ p->cputime_expires.sched_exp =
+ nt->expires.sched;
break;
}
} else {
/*
- * For a process timer, we must balance
- * all the live threads' expirations.
+ * For a process timer, set the cached expiration time.
*/
switch (CPUCLOCK_WHICH(timer->it_clock)) {
default:
@@ -641,7 +629,9 @@ static void arm_timer(struct k_itimer *timer, union cpu_time_count now)
cputime_lt(p->signal->it_virt_expires,
timer->it.cpu.expires.cpu))
break;
- goto rebalance;
+ p->signal->cputime_expires.virt_exp =
+ timer->it.cpu.expires.cpu;
+ break;
case CPUCLOCK_PROF:
if (!cputime_eq(p->signal->it_prof_expires,
cputime_zero) &&
@@ -652,13 +642,12 @@ static void arm_timer(struct k_itimer *timer, union cpu_time_count now)
if (i != RLIM_INFINITY &&
i <= cputime_to_secs(timer->it.cpu.expires.cpu))
break;
- goto rebalance;
+ p->signal->cputime_expires.prof_exp =
+ timer->it.cpu.expires.cpu;
+ break;
case CPUCLOCK_SCHED:
- rebalance:
- process_timer_rebalance(
- timer->it.cpu.task,
- CPUCLOCK_WHICH(timer->it_clock),
- timer->it.cpu.expires, now);
+ p->signal->cputime_expires.sched_exp =
+ timer->it.cpu.expires.sched;
break;
}
}
@@ -969,13 +958,13 @@ static void check_thread_timers(struct task_struct *tsk,
struct signal_struct *const sig = tsk->signal;
maxfire = 20;
- tsk->it_prof_expires = cputime_zero;
+ tsk->cputime_expires.prof_exp = cputime_zero;
while (!list_empty(timers)) {
struct cpu_timer_list *t = list_first_entry(timers,
struct cpu_timer_list,
entry);
if (!--maxfire || cputime_lt(prof_ticks(tsk), t->expires.cpu)) {
- tsk->it_prof_expires = t->expires.cpu;
+ tsk->cputime_expires.prof_exp = t->expires.cpu;
break;
}
t->firing = 1;
@@ -984,13 +973,13 @@ static void check_thread_timers(struct task_struct *tsk,
++timers;
maxfire = 20;
- tsk->it_virt_expires = cputime_zero;
+ tsk->cputime_expires.virt_exp = cputime_zero;
while (!list_empty(timers)) {
struct cpu_timer_list *t = list_first_entry(timers,
struct cpu_timer_list,
entry);
if (!--maxfire || cputime_lt(virt_ticks(tsk), t->expires.cpu)) {
- tsk->it_virt_expires = t->expires.cpu;
+ tsk->cputime_expires.virt_exp = t->expires.cpu;
break;
}
t->firing = 1;
@@ -999,13 +988,13 @@ static void check_thread_timers(struct task_struct *tsk,
++timers;
maxfire = 20;
- tsk->it_sched_expires = 0;
+ tsk->cputime_expires.sched_exp = 0;
while (!list_empty(timers)) {
struct cpu_timer_list *t = list_first_entry(timers,
struct cpu_timer_list,
entry);
if (!--maxfire || tsk->se.sum_exec_runtime < t->expires.sched) {
- tsk->it_sched_expires = t->expires.sched;
+ tsk->cputime_expires.sched_exp = t->expires.sched;
break;
}
t->firing = 1;
@@ -1055,10 +1044,10 @@ static void check_process_timers(struct task_struct *tsk,
{
int maxfire;
struct signal_struct *const sig = tsk->signal;
- cputime_t utime, stime, ptime, virt_expires, prof_expires;
+ cputime_t utime, ptime, virt_expires, prof_expires;
unsigned long long sum_sched_runtime, sched_expires;
- struct task_struct *t;
struct list_head *timers = sig->cpu_timers;
+ struct task_cputime cputime;
/*
* Don't sample the current process CPU clocks if there are no timers.
@@ -1074,18 +1063,10 @@ static void check_process_timers(struct task_struct *tsk,
/*
* Collect the current process totals.
*/
- utime = sig->utime;
- stime = sig->stime;
- sum_sched_runtime = sig->sum_sched_runtime;
- t = tsk;
- do {
- utime = cputime_add(utime, t->utime);
- stime = cputime_add(stime, t->stime);
- sum_sched_runtime += t->se.sum_exec_runtime;
- t = next_thread(t);
- } while (t != tsk);
- ptime = cputime_add(utime, stime);
-
+ thread_group_cputime(tsk, &cputime);
+ utime = cputime.utime;
+ ptime = cputime_add(utime, cputime.stime);
+ sum_sched_runtime = cputime.sum_exec_runtime;
maxfire = 20;
prof_expires = cputime_zero;
while (!list_empty(timers)) {
@@ -1193,60 +1174,18 @@ static void check_process_timers(struct task_struct *tsk,
}
}
- if (!cputime_eq(prof_expires, cputime_zero) ||
- !cputime_eq(virt_expires, cputime_zero) ||
- sched_expires != 0) {
- /*
- * Rebalance the threads' expiry times for the remaining
- * process CPU timers.
- */
-
- cputime_t prof_left, virt_left, ticks;
- unsigned long long sched_left, sched;
- const unsigned int nthreads = atomic_read(&sig->live);
-
- if (!nthreads)
- return;
-
- prof_left = cputime_sub(prof_expires, utime);
- prof_left = cputime_sub(prof_left, stime);
- prof_left = cputime_div_non_zero(prof_left, nthreads);
- virt_left = cputime_sub(virt_expires, utime);
- virt_left = cputime_div_non_zero(virt_left, nthreads);
- if (sched_expires) {
- sched_left = sched_expires - sum_sched_runtime;
- do_div(sched_left, nthreads);
- sched_left = max_t(unsigned long long, sched_left, 1);
- } else {
- sched_left = 0;
- }
- t = tsk;
- do {
- if (unlikely(t->flags & PF_EXITING))
- continue;
-
- ticks = cputime_add(cputime_add(t->utime, t->stime),
- prof_left);
- if (!cputime_eq(prof_expires, cputime_zero) &&
- (cputime_eq(t->it_prof_expires, cputime_zero) ||
- cputime_gt(t->it_prof_expires, ticks))) {
- t->it_prof_expires = ticks;
- }
-
- ticks = cputime_add(t->utime, virt_left);
- if (!cputime_eq(virt_expires, cputime_zero) &&
- (cputime_eq(t->it_virt_expires, cputime_zero) ||
- cputime_gt(t->it_virt_expires, ticks))) {
- t->it_virt_expires = ticks;
- }
-
- sched = t->se.sum_exec_runtime + sched_left;
- if (sched_expires && (t->it_sched_expires == 0 ||
- t->it_sched_expires > sched)) {
- t->it_sched_expires = sched;
- }
- } while ((t = next_thread(t)) != tsk);
- }
+ if (!cputime_eq(prof_expires, cputime_zero) &&
+ (cputime_eq(sig->cputime_expires.prof_exp, cputime_zero) ||
+ cputime_gt(sig->cputime_expires.prof_exp, prof_expires)))
+ sig->cputime_expires.prof_exp = prof_expires;
+ if (!cputime_eq(virt_expires, cputime_zero) &&
+ (cputime_eq(sig->cputime_expires.virt_exp, cputime_zero) ||
+ cputime_gt(sig->cputime_expires.virt_exp, virt_expires)))
+ sig->cputime_expires.virt_exp = virt_expires;
+ if (sched_expires != 0 &&
+ (sig->cputime_expires.sched_exp == 0 ||
+ sig->cputime_expires.sched_exp > sched_expires))
+ sig->cputime_expires.sched_exp = sched_expires;
}
/*
@@ -1314,6 +1253,86 @@ out:
++timer->it_requeue_pending;
}
+/**
+ * task_cputime_zero - Check a task_cputime struct for all zero fields.
+ *
+ * @cputime: The struct to compare.
+ *
+ * Checks @cputime to see if all fields are zero. Returns true if all fields
+ * are zero, false if any field is nonzero.
+ */
+static inline int task_cputime_zero(const struct task_cputime *cputime)
+{
+ if (cputime_eq(cputime->utime, cputime_zero) &&
+ cputime_eq(cputime->stime, cputime_zero) &&
+ cputime->sum_exec_runtime == 0)
+ return 1;
+ return 0;
+}
+
+/**
+ * task_cputime_expired - Compare two task_cputime entities.
+ *
+ * @sample: The task_cputime structure to be checked for expiration.
+ * @expires: Expiration times, against which @sample will be checked.
+ *
+ * Checks @sample against @expires to see if any field of @sample has expired.
+ * Returns true if any field of the former is greater than the corresponding
+ * field of the latter if the latter field is set. Otherwise returns false.
+ */
+static inline int task_cputime_expired(const struct task_cputime *sample,
+ const struct task_cputime *expires)
+{
+ if (!cputime_eq(expires->utime, cputime_zero) &&
+ cputime_ge(sample->utime, expires->utime))
+ return 1;
+ if (!cputime_eq(expires->stime, cputime_zero) &&
+ cputime_ge(cputime_add(sample->utime, sample->stime),
+ expires->stime))
+ return 1;
+ if (expires->sum_exec_runtime != 0 &&
+ sample->sum_exec_runtime >= expires->sum_exec_runtime)
+ return 1;
+ return 0;
+}
+
+/**
+ * fastpath_timer_check - POSIX CPU timers fast path.
+ *
+ * @tsk: The task (thread) being checked.
+ *
+ * Check the task and thread group timers. If both are zero (there are no
+ * timers set) return false. Otherwise snapshot the task and thread group
+ * timers and compare them with the corresponding expiration times. Return
+ * true if a timer has expired, else return false.
+ */
+static inline int fastpath_timer_check(struct task_struct *tsk)
+{
+ struct signal_struct *sig = tsk->signal;
+
+ if (unlikely(!sig))
+ return 0;
+
+ if (!task_cputime_zero(&tsk->cputime_expires)) {
+ struct task_cputime task_sample = {
+ .utime = tsk->utime,
+ .stime = tsk->stime,
+ .sum_exec_runtime = tsk->se.sum_exec_runtime
+ };
+
+ if (task_cputime_expired(&task_sample, &tsk->cputime_expires))
+ return 1;
+ }
+ if (!task_cputime_zero(&sig->cputime_expires)) {
+ struct task_cputime group_sample;
+
+ thread_group_cputime(tsk, &group_sample);
+ if (task_cputime_expired(&group_sample, &sig->cputime_expires))
+ return 1;
+ }
+ return 0;
+}
+
/*
* This is called from the timer interrupt handler. The irq handler has
* already updated our counts. We need to check if any timers fire now.
@@ -1326,42 +1345,31 @@ void run_posix_cpu_timers(struct task_struct *tsk)
BUG_ON(!irqs_disabled());
-#define UNEXPIRED(clock) \
- (cputime_eq(tsk->it_##clock##_expires, cputime_zero) || \
- cputime_lt(clock##_ticks(tsk), tsk->it_##clock##_expires))
-
- if (UNEXPIRED(prof) && UNEXPIRED(virt) &&
- (tsk->it_sched_expires == 0 ||
- tsk->se.sum_exec_runtime < tsk->it_sched_expires))
+ /*
+ * The fast path checks that there are no expired thread or thread
+ * group timers. If that's so, just return.
+ */
+ if (!fastpath_timer_check(tsk))
return;
-#undef UNEXPIRED
-
+ spin_lock(&tsk->sighand->siglock);
/*
- * Double-check with locks held.
+ * Here we take off tsk->signal->cpu_timers[N] and
+ * tsk->cpu_timers[N] all the timers that are firing, and
+ * put them on the firing list.
*/
- read_lock(&tasklist_lock);
- if (likely(tsk->signal != NULL)) {
- spin_lock(&tsk->sighand->siglock);
+ check_thread_timers(tsk, &firing);
+ check_process_timers(tsk, &firing);
- /*
- * Here we take off tsk->cpu_timers[N] and tsk->signal->cpu_timers[N]
- * all the timers that are firing, and put them on the firing list.
- */
- check_thread_timers(tsk, &firing);
- check_process_timers(tsk, &firing);
-
- /*
- * We must release these locks before taking any timer's lock.
- * There is a potential race with timer deletion here, as the
- * siglock now protects our private firing list. We have set
- * the firing flag in each timer, so that a deletion attempt
- * that gets the timer lock before we do will give it up and
- * spin until we've taken care of that timer below.
- */
- spin_unlock(&tsk->sighand->siglock);
- }
- read_unlock(&tasklist_lock);
+ /*
+ * We must release these locks before taking any timer's lock.
+ * There is a potential race with timer deletion here, as the
+ * siglock now protects our private firing list. We have set
+ * the firing flag in each timer, so that a deletion attempt
+ * that gets the timer lock before we do will give it up and
+ * spin until we've taken care of that timer below.
+ */
+ spin_unlock(&tsk->sighand->siglock);
/*
* Now that all the timers on our list have the firing flag,
@@ -1389,10 +1397,9 @@ void run_posix_cpu_timers(struct task_struct *tsk)
/*
* Set one of the process-wide special case CPU timers.
- * The tasklist_lock and tsk->sighand->siglock must be held by the caller.
- * The oldval argument is null for the RLIMIT_CPU timer, where *newval is
- * absolute; non-null for ITIMER_*, where *newval is relative and we update
- * it to be absolute, *oldval is absolute and we update it to be relative.
+ * The tsk->sighand->siglock must be held by the caller.
+ * The *newval argument is relative and we update it to be absolute, *oldval
+ * is absolute and we update it to be relative.
*/
void set_process_cpu_timer(struct task_struct *tsk, unsigned int clock_idx,
cputime_t *newval, cputime_t *oldval)
@@ -1401,7 +1408,7 @@ void set_process_cpu_timer(struct task_struct *tsk, unsigned int clock_idx,
struct list_head *head;
BUG_ON(clock_idx == CPUCLOCK_SCHED);
- cpu_clock_sample_group_locked(clock_idx, tsk, &now);
+ cpu_clock_sample_group(clock_idx, tsk, &now);
if (oldval) {
if (!cputime_eq(*oldval, cputime_zero)) {
@@ -1435,13 +1442,14 @@ void set_process_cpu_timer(struct task_struct *tsk, unsigned int clock_idx,
cputime_ge(list_first_entry(head,
struct cpu_timer_list, entry)->expires.cpu,
*newval)) {
- /*
- * Rejigger each thread's expiry time so that one will
- * notice before we hit the process-cumulative expiry time.
- */
- union cpu_time_count expires = { .sched = 0 };
- expires.cpu = *newval;
- process_timer_rebalance(tsk, clock_idx, expires, now);
+ switch (clock_idx) {
+ case CPUCLOCK_PROF:
+ tsk->signal->cputime_expires.prof_exp = *newval;
+ break;
+ case CPUCLOCK_VIRT:
+ tsk->signal->cputime_expires.virt_exp = *newval;
+ break;
+ }
}
}
diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c
index 5131e5471169..b931d7cedbfa 100644
--- a/kernel/posix-timers.c
+++ b/kernel/posix-timers.c
@@ -223,6 +223,15 @@ static int posix_ktime_get_ts(clockid_t which_clock, struct timespec *tp)
}
/*
+ * Get monotonic time for posix timers
+ */
+static int posix_get_monotonic_raw(clockid_t which_clock, struct timespec *tp)
+{
+ getrawmonotonic(tp);
+ return 0;
+}
+
+/*
* Initialize everything, well, just everything in Posix clocks/timers ;)
*/
static __init int init_posix_timers(void)
@@ -235,9 +244,15 @@ static __init int init_posix_timers(void)
.clock_get = posix_ktime_get_ts,
.clock_set = do_posix_clock_nosettime,
};
+ struct k_clock clock_monotonic_raw = {
+ .clock_getres = hrtimer_get_res,
+ .clock_get = posix_get_monotonic_raw,
+ .clock_set = do_posix_clock_nosettime,
+ };
register_posix_clock(CLOCK_REALTIME, &clock_realtime);
register_posix_clock(CLOCK_MONOTONIC, &clock_monotonic);
+ register_posix_clock(CLOCK_MONOTONIC_RAW, &clock_monotonic_raw);
posix_timers_cache = kmem_cache_create("posix_timers_cache",
sizeof (struct k_itimer), 0, SLAB_PANIC,
@@ -298,6 +313,7 @@ void do_schedule_next_timer(struct siginfo *info)
int posix_timer_event(struct k_itimer *timr, int si_private)
{
+ int shared, ret;
/*
* FIXME: if ->sigq is queued we can race with
* dequeue_signal()->do_schedule_next_timer().
@@ -311,25 +327,10 @@ int posix_timer_event(struct k_itimer *timr, int si_private)
*/
timr->sigq->info.si_sys_private = si_private;
- timr->sigq->info.si_signo = timr->it_sigev_signo;
- timr->sigq->info.si_code = SI_TIMER;
- timr->sigq->info.si_tid = timr->it_id;
- timr->sigq->info.si_value = timr->it_sigev_value;
-
- if (timr->it_sigev_notify & SIGEV_THREAD_ID) {
- struct task_struct *leader;
- int ret = send_sigqueue(timr->sigq, timr->it_process, 0);
-
- if (likely(ret >= 0))
- return ret;
-
- timr->it_sigev_notify = SIGEV_SIGNAL;
- leader = timr->it_process->group_leader;
- put_task_struct(timr->it_process);
- timr->it_process = leader;
- }
-
- return send_sigqueue(timr->sigq, timr->it_process, 1);
+ shared = !(timr->it_sigev_notify & SIGEV_THREAD_ID);
+ ret = send_sigqueue(timr->sigq, timr->it_process, shared);
+ /* If we failed to send the signal the timer stops. */
+ return ret > 0;
}
EXPORT_SYMBOL_GPL(posix_timer_event);
@@ -468,11 +469,9 @@ sys_timer_create(const clockid_t which_clock,
struct sigevent __user *timer_event_spec,
timer_t __user * created_timer_id)
{
- int error = 0;
- struct k_itimer *new_timer = NULL;
- int new_timer_id;
- struct task_struct *process = NULL;
- unsigned long flags;
+ struct k_itimer *new_timer;
+ int error, new_timer_id;
+ struct task_struct *process;
sigevent_t event;
int it_id_set = IT_ID_NOT_SET;
@@ -490,12 +489,11 @@ sys_timer_create(const clockid_t which_clock,
goto out;
}
spin_lock_irq(&idr_lock);
- error = idr_get_new(&posix_timers_id, (void *) new_timer,
- &new_timer_id);
+ error = idr_get_new(&posix_timers_id, new_timer, &new_timer_id);
spin_unlock_irq(&idr_lock);
- if (error == -EAGAIN)
- goto retry;
- else if (error) {
+ if (error) {
+ if (error == -EAGAIN)
+ goto retry;
/*
* Weird looking, but we return EAGAIN if the IDR is
* full (proper POSIX return value for this)
@@ -526,67 +524,43 @@ sys_timer_create(const clockid_t which_clock,
error = -EFAULT;
goto out;
}
- new_timer->it_sigev_notify = event.sigev_notify;
- new_timer->it_sigev_signo = event.sigev_signo;
- new_timer->it_sigev_value = event.sigev_value;
-
- read_lock(&tasklist_lock);
- if ((process = good_sigevent(&event))) {
- /*
- * We may be setting up this process for another
- * thread. It may be exiting. To catch this
- * case the we check the PF_EXITING flag. If
- * the flag is not set, the siglock will catch
- * him before it is too late (in exit_itimers).
- *
- * The exec case is a bit more invloved but easy
- * to code. If the process is in our thread
- * group (and it must be or we would not allow
- * it here) and is doing an exec, it will cause
- * us to be killed. In this case it will wait
- * for us to die which means we can finish this
- * linkage with our last gasp. I.e. no code :)
- */
- spin_lock_irqsave(&process->sighand->siglock, flags);
- if (!(process->flags & PF_EXITING)) {
- new_timer->it_process = process;
- list_add(&new_timer->list,
- &process->signal->posix_timers);
- if (new_timer->it_sigev_notify == (SIGEV_SIGNAL|SIGEV_THREAD_ID))
- get_task_struct(process);
- spin_unlock_irqrestore(&process->sighand->siglock, flags);
- } else {
- spin_unlock_irqrestore(&process->sighand->siglock, flags);
- process = NULL;
- }
- }
- read_unlock(&tasklist_lock);
+ rcu_read_lock();
+ process = good_sigevent(&event);
+ if (process)
+ get_task_struct(process);
+ rcu_read_unlock();
if (!process) {
error = -EINVAL;
goto out;
}
} else {
- new_timer->it_sigev_notify = SIGEV_SIGNAL;
- new_timer->it_sigev_signo = SIGALRM;
- new_timer->it_sigev_value.sival_int = new_timer->it_id;
+ event.sigev_notify = SIGEV_SIGNAL;
+ event.sigev_signo = SIGALRM;
+ event.sigev_value.sival_int = new_timer->it_id;
process = current->group_leader;
- spin_lock_irqsave(&process->sighand->siglock, flags);
- new_timer->it_process = process;
- list_add(&new_timer->list, &process->signal->posix_timers);
- spin_unlock_irqrestore(&process->sighand->siglock, flags);
+ get_task_struct(process);
}
+ new_timer->it_sigev_notify = event.sigev_notify;
+ new_timer->sigq->info.si_signo = event.sigev_signo;
+ new_timer->sigq->info.si_value = event.sigev_value;
+ new_timer->sigq->info.si_tid = new_timer->it_id;
+ new_timer->sigq->info.si_code = SI_TIMER;
+
+ spin_lock_irq(&current->sighand->siglock);
+ new_timer->it_process = process;
+ list_add(&new_timer->list, &current->signal->posix_timers);
+ spin_unlock_irq(&current->sighand->siglock);
+
+ return 0;
/*
* In the case of the timer belonging to another task, after
* the task is unlocked, the timer is owned by the other task
* and may cease to exist at any time. Don't use or modify
* new_timer after the unlock call.
*/
-
out:
- if (error)
- release_posix_timer(new_timer, it_id_set);
-
+ release_posix_timer(new_timer, it_id_set);
return error;
}
@@ -597,7 +571,7 @@ out:
* the find to the timer lock. To avoid a dead lock, the timer id MUST
* be release with out holding the timer lock.
*/
-static struct k_itimer * lock_timer(timer_t timer_id, unsigned long *flags)
+static struct k_itimer *lock_timer(timer_t timer_id, unsigned long *flags)
{
struct k_itimer *timr;
/*
@@ -605,23 +579,20 @@ static struct k_itimer * lock_timer(timer_t timer_id, unsigned long *flags)
* flags part over to the timer lock. Must not let interrupts in
* while we are moving the lock.
*/
-
spin_lock_irqsave(&idr_lock, *flags);
- timr = (struct k_itimer *) idr_find(&posix_timers_id, (int) timer_id);
+ timr = idr_find(&posix_timers_id, (int)timer_id);
if (timr) {
spin_lock(&timr->it_lock);
-
- if ((timr->it_id != timer_id) || !(timr->it_process) ||
- !same_thread_group(timr->it_process, current)) {
- spin_unlock(&timr->it_lock);
- spin_unlock_irqrestore(&idr_lock, *flags);
- timr = NULL;
- } else
+ if (timr->it_process &&
+ same_thread_group(timr->it_process, current)) {
spin_unlock(&idr_lock);
- } else
- spin_unlock_irqrestore(&idr_lock, *flags);
+ return timr;
+ }
+ spin_unlock(&timr->it_lock);
+ }
+ spin_unlock_irqrestore(&idr_lock, *flags);
- return timr;
+ return NULL;
}
/*
@@ -862,8 +833,7 @@ retry_delete:
* This keeps any tasks waiting on the spin lock from thinking
* they got something (see the lock code above).
*/
- if (timer->it_sigev_notify == (SIGEV_SIGNAL|SIGEV_THREAD_ID))
- put_task_struct(timer->it_process);
+ put_task_struct(timer->it_process);
timer->it_process = NULL;
unlock_timer(timer, flags);
@@ -890,8 +860,7 @@ retry_delete:
* This keeps any tasks waiting on the spin lock from thinking
* they got something (see the lock code above).
*/
- if (timer->it_sigev_notify == (SIGEV_SIGNAL|SIGEV_THREAD_ID))
- put_task_struct(timer->it_process);
+ put_task_struct(timer->it_process);
timer->it_process = NULL;
unlock_timer(timer, flags);
diff --git a/kernel/sched.c b/kernel/sched.c
index 6f230596bd0c..09a8c15748f1 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -4052,23 +4052,26 @@ DEFINE_PER_CPU(struct kernel_stat, kstat);
EXPORT_PER_CPU_SYMBOL(kstat);
/*
- * Return p->sum_exec_runtime plus any more ns on the sched_clock
- * that have not yet been banked in case the task is currently running.
+ * Return any ns on the sched_clock that have not yet been banked in
+ * @p in case that task is currently running.
*/
-unsigned long long task_sched_runtime(struct task_struct *p)
+unsigned long long task_delta_exec(struct task_struct *p)
{
unsigned long flags;
- u64 ns, delta_exec;
struct rq *rq;
+ u64 ns = 0;
rq = task_rq_lock(p, &flags);
- ns = p->se.sum_exec_runtime;
+
if (task_current(rq, p)) {
+ u64 delta_exec;
+
update_rq_clock(rq);
delta_exec = rq->clock - p->se.exec_start;
if ((s64)delta_exec > 0)
- ns += delta_exec;
+ ns = delta_exec;
}
+
task_rq_unlock(rq, &flags);
return ns;
@@ -4085,6 +4088,7 @@ void account_user_time(struct task_struct *p, cputime_t cputime)
cputime64_t tmp;
p->utime = cputime_add(p->utime, cputime);
+ account_group_user_time(p, cputime);
/* Add user time to cpustat. */
tmp = cputime_to_cputime64(cputime);
@@ -4109,6 +4113,7 @@ static void account_guest_time(struct task_struct *p, cputime_t cputime)
tmp = cputime_to_cputime64(cputime);
p->utime = cputime_add(p->utime, cputime);
+ account_group_user_time(p, cputime);
p->gtime = cputime_add(p->gtime, cputime);
cpustat->user = cputime64_add(cpustat->user, tmp);
@@ -4144,6 +4149,7 @@ void account_system_time(struct task_struct *p, int hardirq_offset,
}
p->stime = cputime_add(p->stime, cputime);
+ account_group_system_time(p, cputime);
/* Add system time to cpustat. */
tmp = cputime_to_cputime64(cputime);
@@ -4185,6 +4191,7 @@ void account_steal_time(struct task_struct *p, cputime_t steal)
if (p == rq->idle) {
p->stime = cputime_add(p->stime, steal);
+ account_group_system_time(p, steal);
if (atomic_read(&rq->nr_iowait) > 0)
cpustat->iowait = cputime64_add(cpustat->iowait, tmp);
else
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 18fd17172eb6..f604dae71316 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -449,6 +449,7 @@ static void update_curr(struct cfs_rq *cfs_rq)
struct task_struct *curtask = task_of(curr);
cpuacct_charge(curtask, delta_exec);
+ account_group_exec_runtime(curtask, delta_exec);
}
}
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c
index cdf5740ab03e..b446dc87494f 100644
--- a/kernel/sched_rt.c
+++ b/kernel/sched_rt.c
@@ -526,6 +526,8 @@ static void update_curr_rt(struct rq *rq)
schedstat_set(curr->se.exec_max, max(curr->se.exec_max, delta_exec));
curr->se.sum_exec_runtime += delta_exec;
+ account_group_exec_runtime(curr, delta_exec);
+
curr->se.exec_start = rq->clock;
cpuacct_charge(curr, delta_exec);
@@ -1458,7 +1460,7 @@ static void watchdog(struct rq *rq, struct task_struct *p)
p->rt.timeout++;
next = DIV_ROUND_UP(min(soft, hard), USEC_PER_SEC/HZ);
if (p->rt.timeout > next)
- p->it_sched_expires = p->se.sum_exec_runtime;
+ p->cputime_expires.sched_exp = p->se.sum_exec_runtime;
}
}
diff --git a/kernel/sched_stats.h b/kernel/sched_stats.h
index 8385d43987e2..b8c156979cf2 100644
--- a/kernel/sched_stats.h
+++ b/kernel/sched_stats.h
@@ -270,3 +270,89 @@ sched_info_switch(struct task_struct *prev, struct task_struct *next)
#define sched_info_switch(t, next) do { } while (0)
#endif /* CONFIG_SCHEDSTATS || CONFIG_TASK_DELAY_ACCT */
+/*
+ * The following are functions that support scheduler-internal time accounting.
+ * These functions are generally called at the timer tick. None of this depends
+ * on CONFIG_SCHEDSTATS.
+ */
+
+/**
+ * account_group_user_time - Maintain utime for a thread group.
+ *
+ * @tsk: Pointer to task structure.
+ * @cputime: Time value by which to increment the utime field of the
+ * thread_group_cputime structure.
+ *
+ * If thread group time is being maintained, get the structure for the
+ * running CPU and update the utime field there.
+ */
+static inline void account_group_user_time(struct task_struct *tsk,
+ cputime_t cputime)
+{
+ struct signal_struct *sig;
+
+ sig = tsk->signal;
+ if (unlikely(!sig))
+ return;
+ if (sig->cputime.totals) {
+ struct task_cputime *times;
+
+ times = per_cpu_ptr(sig->cputime.totals, get_cpu());
+ times->utime = cputime_add(times->utime, cputime);
+ put_cpu_no_resched();
+ }
+}
+
+/**
+ * account_group_system_time - Maintain stime for a thread group.
+ *
+ * @tsk: Pointer to task structure.
+ * @cputime: Time value by which to increment the stime field of the
+ * thread_group_cputime structure.
+ *
+ * If thread group time is being maintained, get the structure for the
+ * running CPU and update the stime field there.
+ */
+static inline void account_group_system_time(struct task_struct *tsk,
+ cputime_t cputime)
+{
+ struct signal_struct *sig;
+
+ sig = tsk->signal;
+ if (unlikely(!sig))
+ return;
+ if (sig->cputime.totals) {
+ struct task_cputime *times;
+
+ times = per_cpu_ptr(sig->cputime.totals, get_cpu());
+ times->stime = cputime_add(times->stime, cputime);
+ put_cpu_no_resched();
+ }
+}
+
+/**
+ * account_group_exec_runtime - Maintain exec runtime for a thread group.
+ *
+ * @tsk: Pointer to task structure.
+ * @ns: Time value by which to increment the sum_exec_runtime field
+ * of the thread_group_cputime structure.
+ *
+ * If thread group time is being maintained, get the structure for the
+ * running CPU and update the sum_exec_runtime field there.
+ */
+static inline void account_group_exec_runtime(struct task_struct *tsk,
+ unsigned long long ns)
+{
+ struct signal_struct *sig;
+
+ sig = tsk->signal;
+ if (unlikely(!sig))
+ return;
+ if (sig->cputime.totals) {
+ struct task_cputime *times;
+
+ times = per_cpu_ptr(sig->cputime.totals, get_cpu());
+ times->sum_exec_runtime += ns;
+ put_cpu_no_resched();
+ }
+}
diff --git a/kernel/signal.c b/kernel/signal.c
index e661b01d340f..6eea5826d618 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -1338,6 +1338,7 @@ int do_notify_parent(struct task_struct *tsk, int sig)
struct siginfo info;
unsigned long flags;
struct sighand_struct *psig;
+ struct task_cputime cputime;
int ret = sig;
BUG_ON(sig == -1);
@@ -1368,10 +1369,9 @@ int do_notify_parent(struct task_struct *tsk, int sig)
info.si_uid = tsk->uid;
- info.si_utime = cputime_to_clock_t(cputime_add(tsk->utime,
- tsk->signal->utime));
- info.si_stime = cputime_to_clock_t(cputime_add(tsk->stime,
- tsk->signal->stime));
+ thread_group_cputime(tsk, &cputime);
+ info.si_utime = cputime_to_jiffies(cputime.utime);
+ info.si_stime = cputime_to_jiffies(cputime.stime);
info.si_status = tsk->exit_code & 0x7f;
if (tsk->exit_code & 0x80)
diff --git a/kernel/softirq.c b/kernel/softirq.c
index 83ba21a13bd4..7110daeb9a90 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -267,16 +267,12 @@ asmlinkage void do_softirq(void)
*/
void irq_enter(void)
{
-#ifdef CONFIG_NO_HZ
int cpu = smp_processor_id();
+
if (idle_cpu(cpu) && !in_interrupt())
- tick_nohz_stop_idle(cpu);
-#endif
+ tick_check_idle(cpu);
+
__irq_enter();
-#ifdef CONFIG_NO_HZ
- if (idle_cpu(cpu))
- tick_nohz_update_jiffies();
-#endif
}
#ifdef __ARCH_IRQ_EXIT_IRQS_DISABLED
diff --git a/kernel/sys.c b/kernel/sys.c
index 0bc8fa3c2288..53879cdae483 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -853,38 +853,28 @@ asmlinkage long sys_setfsgid(gid_t gid)
return old_fsgid;
}
+void do_sys_times(struct tms *tms)
+{
+ struct task_cputime cputime;
+ cputime_t cutime, cstime;
+
+ spin_lock_irq(&current->sighand->siglock);
+ thread_group_cputime(current, &cputime);
+ cutime = current->signal->cutime;
+ cstime = current->signal->cstime;
+ spin_unlock_irq(&current->sighand->siglock);
+ tms->tms_utime = cputime_to_clock_t(cputime.utime);
+ tms->tms_stime = cputime_to_clock_t(cputime.stime);
+ tms->tms_cutime = cputime_to_clock_t(cutime);
+ tms->tms_cstime = cputime_to_clock_t(cstime);
+}
+
asmlinkage long sys_times(struct tms __user * tbuf)
{
- /*
- * In the SMP world we might just be unlucky and have one of
- * the times increment as we use it. Since the value is an
- * atomically safe type this is just fine. Conceptually its
- * as if the syscall took an instant longer to occur.
- */
if (tbuf) {
struct tms tmp;
- struct task_struct *tsk = current;
- struct task_struct *t;
- cputime_t utime, stime, cutime, cstime;
-
- spin_lock_irq(&tsk->sighand->siglock);
- utime = tsk->signal->utime;
- stime = tsk->signal->stime;
- t = tsk;
- do {
- utime = cputime_add(utime, t->utime);
- stime = cputime_add(stime, t->stime);
- t = next_thread(t);
- } while (t != tsk);
-
- cutime = tsk->signal->cutime;
- cstime = tsk->signal->cstime;
- spin_unlock_irq(&tsk->sighand->siglock);
-
- tmp.tms_utime = cputime_to_clock_t(utime);
- tmp.tms_stime = cputime_to_clock_t(stime);
- tmp.tms_cutime = cputime_to_clock_t(cutime);
- tmp.tms_cstime = cputime_to_clock_t(cstime);
+
+ do_sys_times(&tmp);
if (copy_to_user(tbuf, &tmp, sizeof(struct tms)))
return -EFAULT;
}
@@ -1449,7 +1439,6 @@ asmlinkage long sys_old_getrlimit(unsigned int resource, struct rlimit __user *r
asmlinkage long sys_setrlimit(unsigned int resource, struct rlimit __user *rlim)
{
struct rlimit new_rlim, *old_rlim;
- unsigned long it_prof_secs;
int retval;
if (resource >= RLIM_NLIMITS)
@@ -1503,18 +1492,7 @@ asmlinkage long sys_setrlimit(unsigned int resource, struct rlimit __user *rlim)
if (new_rlim.rlim_cur == RLIM_INFINITY)
goto out;
- it_prof_secs = cputime_to_secs(current->signal->it_prof_expires);
- if (it_prof_secs == 0 || new_rlim.rlim_cur <= it_prof_secs) {
- unsigned long rlim_cur = new_rlim.rlim_cur;
- cputime_t cputime;
-
- cputime = secs_to_cputime(rlim_cur);
- read_lock(&tasklist_lock);
- spin_lock_irq(&current->sighand->siglock);
- set_process_cpu_timer(current, CPUCLOCK_PROF, &cputime, NULL);
- spin_unlock_irq(&current->sighand->siglock);
- read_unlock(&tasklist_lock);
- }
+ update_rlimit_cpu(new_rlim.rlim_cur);
out:
return 0;
}
@@ -1552,11 +1530,8 @@ out:
*
*/
-static void accumulate_thread_rusage(struct task_struct *t, struct rusage *r,
- cputime_t *utimep, cputime_t *stimep)
+static void accumulate_thread_rusage(struct task_struct *t, struct rusage *r)
{
- *utimep = cputime_add(*utimep, t->utime);
- *stimep = cputime_add(*stimep, t->stime);
r->ru_nvcsw += t->nvcsw;
r->ru_nivcsw += t->nivcsw;
r->ru_minflt += t->min_flt;
@@ -1570,12 +1545,13 @@ static void k_getrusage(struct task_struct *p, int who, struct rusage *r)
struct task_struct *t;
unsigned long flags;
cputime_t utime, stime;
+ struct task_cputime cputime;
memset((char *) r, 0, sizeof *r);
utime = stime = cputime_zero;
if (who == RUSAGE_THREAD) {
- accumulate_thread_rusage(p, r, &utime, &stime);
+ accumulate_thread_rusage(p, r);
goto out;
}
@@ -1598,8 +1574,9 @@ static void k_getrusage(struct task_struct *p, int who, struct rusage *r)
break;
case RUSAGE_SELF:
- utime = cputime_add(utime, p->signal->utime);
- stime = cputime_add(stime, p->signal->stime);
+ thread_group_cputime(p, &cputime);
+ utime = cputime_add(utime, cputime.utime);
+ stime = cputime_add(stime, cputime.stime);
r->ru_nvcsw += p->signal->nvcsw;
r->ru_nivcsw += p->signal->nivcsw;
r->ru_minflt += p->signal->min_flt;
@@ -1608,7 +1585,7 @@ static void k_getrusage(struct task_struct *p, int who, struct rusage *r)
r->ru_oublock += p->signal->oublock;
t = p;
do {
- accumulate_thread_rusage(t, r, &utime, &stime);
+ accumulate_thread_rusage(t, r);
t = next_thread(t);
} while (t != p);
break;
diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c
index 093d4acf993b..9ed2eec97526 100644
--- a/kernel/time/clocksource.c
+++ b/kernel/time/clocksource.c
@@ -325,6 +325,9 @@ int clocksource_register(struct clocksource *c)
unsigned long flags;
int ret;
+ /* save mult_orig on registration */
+ c->mult_orig = c->mult;
+
spin_lock_irqsave(&clocksource_lock, flags);
ret = clocksource_enqueue(c);
if (!ret)
diff --git a/kernel/time/jiffies.c b/kernel/time/jiffies.c
index 4c256fdb8875..1ca99557e929 100644
--- a/kernel/time/jiffies.c
+++ b/kernel/time/jiffies.c
@@ -61,6 +61,7 @@ struct clocksource clocksource_jiffies = {
.read = jiffies_read,
.mask = 0xffffffff, /*32bits*/
.mult = NSEC_PER_JIFFY << JIFFIES_SHIFT, /* details above */
+ .mult_orig = NSEC_PER_JIFFY << JIFFIES_SHIFT,
.shift = JIFFIES_SHIFT,
};
diff --git a/kernel/time/ntp.c b/kernel/time/ntp.c
index 1ad46f3df6e7..1a20715bfd6e 100644
--- a/kernel/time/ntp.c
+++ b/kernel/time/ntp.c
@@ -10,13 +10,13 @@
#include <linux/mm.h>
#include <linux/time.h>
-#include <linux/timer.h>
#include <linux/timex.h>
#include <linux/jiffies.h>
#include <linux/hrtimer.h>
#include <linux/capability.h>
#include <linux/math64.h>
#include <linux/clocksource.h>
+#include <linux/workqueue.h>
#include <asm/timex.h>
/*
@@ -218,11 +218,11 @@ void second_overflow(void)
/* Disable the cmos update - used by virtualization and embedded */
int no_sync_cmos_clock __read_mostly;
-static void sync_cmos_clock(unsigned long dummy);
+static void sync_cmos_clock(struct work_struct *work);
-static DEFINE_TIMER(sync_cmos_timer, sync_cmos_clock, 0, 0);
+static DECLARE_DELAYED_WORK(sync_cmos_work, sync_cmos_clock);
-static void sync_cmos_clock(unsigned long dummy)
+static void sync_cmos_clock(struct work_struct *work)
{
struct timespec now, next;
int fail = 1;
@@ -258,13 +258,13 @@ static void sync_cmos_clock(unsigned long dummy)
next.tv_sec++;
next.tv_nsec -= NSEC_PER_SEC;
}
- mod_timer(&sync_cmos_timer, jiffies + timespec_to_jiffies(&next));
+ schedule_delayed_work(&sync_cmos_work, timespec_to_jiffies(&next));
}
static void notify_cmos_timer(void)
{
if (!no_sync_cmos_clock)
- mod_timer(&sync_cmos_timer, jiffies + 1);
+ schedule_delayed_work(&sync_cmos_work, 0);
}
#else
@@ -277,38 +277,50 @@ static inline void notify_cmos_timer(void) { }
int do_adjtimex(struct timex *txc)
{
struct timespec ts;
- long save_adjust, sec;
int result;
- /* In order to modify anything, you gotta be super-user! */
- if (txc->modes && !capable(CAP_SYS_TIME))
- return -EPERM;
-
- /* Now we validate the data before disabling interrupts */
-
- if ((txc->modes & ADJ_OFFSET_SINGLESHOT) == ADJ_OFFSET_SINGLESHOT) {
+ /* Validate the data before disabling interrupts */
+ if (txc->modes & ADJ_ADJTIME) {
/* singleshot must not be used with any other mode bits */
- if (txc->modes & ~ADJ_OFFSET_SS_READ)
+ if (!(txc->modes & ADJ_OFFSET_SINGLESHOT))
return -EINVAL;
+ if (!(txc->modes & ADJ_OFFSET_READONLY) &&
+ !capable(CAP_SYS_TIME))
+ return -EPERM;
+ } else {
+ /* In order to modify anything, you gotta be super-user! */
+ if (txc->modes && !capable(CAP_SYS_TIME))
+ return -EPERM;
+
+ /* if the quartz is off by more than 10% something is VERY wrong! */
+ if (txc->modes & ADJ_TICK &&
+ (txc->tick < 900000/USER_HZ ||
+ txc->tick > 1100000/USER_HZ))
+ return -EINVAL;
+
+ if (txc->modes & ADJ_STATUS && time_state != TIME_OK)
+ hrtimer_cancel(&leap_timer);
}
- /* if the quartz is off by more than 10% something is VERY wrong ! */
- if (txc->modes & ADJ_TICK)
- if (txc->tick < 900000/USER_HZ ||
- txc->tick > 1100000/USER_HZ)
- return -EINVAL;
-
- if (time_state != TIME_OK && txc->modes & ADJ_STATUS)
- hrtimer_cancel(&leap_timer);
getnstimeofday(&ts);
write_seqlock_irq(&xtime_lock);
- /* Save for later - semantics of adjtime is to return old value */
- save_adjust = time_adjust;
-
/* If there are input parameters, then process them */
+ if (txc->modes & ADJ_ADJTIME) {
+ long save_adjust = time_adjust;
+
+ if (!(txc->modes & ADJ_OFFSET_READONLY)) {
+ /* adjtime() is independent from ntp_adjtime() */
+ time_adjust = txc->offset;
+ ntp_update_frequency();
+ }
+ txc->offset = save_adjust;
+ goto adj_done;
+ }
if (txc->modes) {
+ long sec;
+
if (txc->modes & ADJ_STATUS) {
if ((time_status & STA_PLL) &&
!(txc->status & STA_PLL)) {
@@ -375,13 +387,8 @@ int do_adjtimex(struct timex *txc)
if (txc->modes & ADJ_TAI && txc->constant > 0)
time_tai = txc->constant;
- if (txc->modes & ADJ_OFFSET) {
- if (txc->modes == ADJ_OFFSET_SINGLESHOT)
- /* adjtime() is independent from ntp_adjtime() */
- time_adjust = txc->offset;
- else
- ntp_update_offset(txc->offset);
- }
+ if (txc->modes & ADJ_OFFSET)
+ ntp_update_offset(txc->offset);
if (txc->modes & ADJ_TICK)
tick_usec = txc->tick;
@@ -389,22 +396,18 @@ int do_adjtimex(struct timex *txc)
ntp_update_frequency();
}
+ txc->offset = shift_right(time_offset * NTP_INTERVAL_FREQ,
+ NTP_SCALE_SHIFT);
+ if (!(time_status & STA_NANO))
+ txc->offset /= NSEC_PER_USEC;
+
+adj_done:
result = time_state; /* mostly `TIME_OK' */
if (time_status & (STA_UNSYNC|STA_CLOCKERR))
result = TIME_ERROR;
- if ((txc->modes == ADJ_OFFSET_SINGLESHOT) ||
- (txc->modes == ADJ_OFFSET_SS_READ))
- txc->offset = save_adjust;
- else {
- txc->offset = shift_right(time_offset * NTP_INTERVAL_FREQ,
- NTP_SCALE_SHIFT);
- if (!(time_status & STA_NANO))
- txc->offset /= NSEC_PER_USEC;
- }
- txc->freq = shift_right((s32)(time_freq >> PPM_SCALE_INV_SHIFT) *
- (s64)PPM_SCALE_INV,
- NTP_SCALE_SHIFT);
+ txc->freq = shift_right((time_freq >> PPM_SCALE_INV_SHIFT) *
+ (s64)PPM_SCALE_INV, NTP_SCALE_SHIFT);
txc->maxerror = time_maxerror;
txc->esterror = time_esterror;
txc->status = time_status;
diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
index cb01cd8f919b..f98a1b7b16e9 100644
--- a/kernel/time/tick-broadcast.c
+++ b/kernel/time/tick-broadcast.c
@@ -384,6 +384,19 @@ int tick_resume_broadcast_oneshot(struct clock_event_device *bc)
}
/*
+ * Called from irq_enter() when idle was interrupted to reenable the
+ * per cpu device.
+ */
+void tick_check_oneshot_broadcast(int cpu)
+{
+ if (cpu_isset(cpu, tick_broadcast_oneshot_mask)) {
+ struct tick_device *td = &per_cpu(tick_cpu_device, cpu);
+
+ clockevents_set_mode(td->evtdev, CLOCK_EVT_MODE_ONESHOT);
+ }
+}
+
+/*
* Handle oneshot mode broadcasting
*/
static void tick_handle_oneshot_broadcast(struct clock_event_device *dev)
diff --git a/kernel/time/tick-internal.h b/kernel/time/tick-internal.h
index 469248782c23..b1c05bf75ee0 100644
--- a/kernel/time/tick-internal.h
+++ b/kernel/time/tick-internal.h
@@ -36,6 +36,7 @@ extern void tick_broadcast_switch_to_oneshot(void);
extern void tick_shutdown_broadcast_oneshot(unsigned int *cpup);
extern int tick_resume_broadcast_oneshot(struct clock_event_device *bc);
extern int tick_broadcast_oneshot_active(void);
+extern void tick_check_oneshot_broadcast(int cpu);
# else /* BROADCAST */
static inline void tick_broadcast_setup_oneshot(struct clock_event_device *bc)
{
@@ -45,6 +46,7 @@ static inline void tick_broadcast_oneshot_control(unsigned long reason) { }
static inline void tick_broadcast_switch_to_oneshot(void) { }
static inline void tick_shutdown_broadcast_oneshot(unsigned int *cpup) { }
static inline int tick_broadcast_oneshot_active(void) { return 0; }
+static inline void tick_check_oneshot_broadcast(int cpu) { }
# endif /* !BROADCAST */
#else /* !ONESHOT */
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index b711ffcb106c..0581c11fe6c6 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -155,7 +155,7 @@ void tick_nohz_update_jiffies(void)
touch_softlockup_watchdog();
}
-void tick_nohz_stop_idle(int cpu)
+static void tick_nohz_stop_idle(int cpu)
{
struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
@@ -377,6 +377,32 @@ ktime_t tick_nohz_get_sleep_length(void)
return ts->sleep_length;
}
+static void tick_nohz_restart(struct tick_sched *ts, ktime_t now)
+{
+ hrtimer_cancel(&ts->sched_timer);
+ ts->sched_timer.expires = ts->idle_tick;
+
+ while (1) {
+ /* Forward the time to expire in the future */
+ hrtimer_forward(&ts->sched_timer, now, tick_period);
+
+ if (ts->nohz_mode == NOHZ_MODE_HIGHRES) {
+ hrtimer_start(&ts->sched_timer,
+ ts->sched_timer.expires,
+ HRTIMER_MODE_ABS);
+ /* Check, if the timer was already in the past */
+ if (hrtimer_active(&ts->sched_timer))
+ break;
+ } else {
+ if (!tick_program_event(ts->sched_timer.expires, 0))
+ break;
+ }
+ /* Update jiffies and reread time */
+ tick_do_update_jiffies64(now);
+ now = ktime_get();
+ }
+}
+
/**
* tick_nohz_restart_sched_tick - restart the idle tick from the idle task
*
@@ -430,28 +456,7 @@ void tick_nohz_restart_sched_tick(void)
*/
ts->tick_stopped = 0;
ts->idle_exittime = now;
- hrtimer_cancel(&ts->sched_timer);
- ts->sched_timer.expires = ts->idle_tick;
-
- while (1) {
- /* Forward the time to expire in the future */
- hrtimer_forward(&ts->sched_timer, now, tick_period);
-
- if (ts->nohz_mode == NOHZ_MODE_HIGHRES) {
- hrtimer_start(&ts->sched_timer,
- ts->sched_timer.expires,
- HRTIMER_MODE_ABS);
- /* Check, if the timer was already in the past */
- if (hrtimer_active(&ts->sched_timer))
- break;
- } else {
- if (!tick_program_event(ts->sched_timer.expires, 0))
- break;
- }
- /* Update jiffies and reread time */
- tick_do_update_jiffies64(now);
- now = ktime_get();
- }
+ tick_nohz_restart(ts, now);
local_irq_enable();
}
@@ -503,10 +508,6 @@ static void tick_nohz_handler(struct clock_event_device *dev)
update_process_times(user_mode(regs));
profile_tick(CPU_PROFILING);
- /* Do not restart, when we are in the idle loop */
- if (ts->tick_stopped)
- return;
-
while (tick_nohz_reprogram(ts, now)) {
now = ktime_get();
tick_do_update_jiffies64(now);
@@ -552,6 +553,27 @@ static void tick_nohz_switch_to_nohz(void)
smp_processor_id());
}
+/*
+ * When NOHZ is enabled and the tick is stopped, we need to kick the
+ * tick timer from irq_enter() so that the jiffies update is kept
+ * alive during long running softirqs. That's ugly as hell, but
+ * correctness is key even if we need to fix the offending softirq in
+ * the first place.
+ *
+ * Note, this is different to tick_nohz_restart. We just kick the
+ * timer and do not touch the other magic bits which need to be done
+ * when idle is left.
+ */
+static void tick_nohz_kick_tick(int cpu)
+{
+ struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
+
+ if (!ts->tick_stopped)
+ return;
+
+ tick_nohz_restart(ts, ktime_get());
+}
+
#else
static inline void tick_nohz_switch_to_nohz(void) { }
@@ -559,6 +581,19 @@ static inline void tick_nohz_switch_to_nohz(void) { }
#endif /* NO_HZ */
/*
+ * Called from irq_enter to notify about the possible interruption of idle()
+ */
+void tick_check_idle(int cpu)
+{
+ tick_check_oneshot_broadcast(cpu);
+#ifdef CONFIG_NO_HZ
+ tick_nohz_stop_idle(cpu);
+ tick_nohz_update_jiffies();
+ tick_nohz_kick_tick(cpu);
+#endif
+}
+
+/*
* High resolution timer specific code
*/
#ifdef CONFIG_HIGH_RES_TIMERS
@@ -611,10 +646,6 @@ static enum hrtimer_restart tick_sched_timer(struct hrtimer *timer)
profile_tick(CPU_PROFILING);
}
- /* Do not restart, when we are in the idle loop */
- if (ts->tick_stopped)
- return HRTIMER_NORESTART;
-
hrtimer_forward(timer, now, tick_period);
return HRTIMER_RESTART;
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index e91c29f961c9..e7acfb482a68 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -58,27 +58,26 @@ struct clocksource *clock;
#ifdef CONFIG_GENERIC_TIME
/**
- * __get_nsec_offset - Returns nanoseconds since last call to periodic_hook
+ * clocksource_forward_now - update clock to the current time
*
- * private function, must hold xtime_lock lock when being
- * called. Returns the number of nanoseconds since the
- * last call to update_wall_time() (adjusted by NTP scaling)
+ * Forward the current clock to update its state since the last call to
+ * update_wall_time(). This is useful before significant clock changes,
+ * as it avoids having to deal with this time offset explicitly.
*/
-static inline s64 __get_nsec_offset(void)
+static void clocksource_forward_now(void)
{
cycle_t cycle_now, cycle_delta;
- s64 ns_offset;
+ s64 nsec;
- /* read clocksource: */
cycle_now = clocksource_read(clock);
-
- /* calculate the delta since the last update_wall_time: */
cycle_delta = (cycle_now - clock->cycle_last) & clock->mask;
+ clock->cycle_last = cycle_now;
- /* convert to nanoseconds: */
- ns_offset = cyc2ns(clock, cycle_delta);
+ nsec = cyc2ns(clock, cycle_delta);
+ timespec_add_ns(&xtime, nsec);
- return ns_offset;
+ nsec = ((s64)cycle_delta * clock->mult_orig) >> clock->shift;
+ clock->raw_time.tv_nsec += nsec;
}
/**
@@ -89,6 +88,7 @@ static inline s64 __get_nsec_offset(void)
*/
void getnstimeofday(struct timespec *ts)
{
+ cycle_t cycle_now, cycle_delta;
unsigned long seq;
s64 nsecs;
@@ -96,7 +96,15 @@ void getnstimeofday(struct timespec *ts)
seq = read_seqbegin(&xtime_lock);
*ts = xtime;
- nsecs = __get_nsec_offset();
+
+ /* read clocksource: */
+ cycle_now = clocksource_read(clock);
+
+ /* calculate the delta since the last update_wall_time: */
+ cycle_delta = (cycle_now - clock->cycle_last) & clock->mask;
+
+ /* convert to nanoseconds: */
+ nsecs = cyc2ns(clock, cycle_delta);
} while (read_seqretry(&xtime_lock, seq));
@@ -129,22 +137,22 @@ EXPORT_SYMBOL(do_gettimeofday);
*/
int do_settimeofday(struct timespec *tv)
{
+ struct timespec ts_delta;
unsigned long flags;
- time_t wtm_sec, sec = tv->tv_sec;
- long wtm_nsec, nsec = tv->tv_nsec;
if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
return -EINVAL;
write_seqlock_irqsave(&xtime_lock, flags);
- nsec -= __get_nsec_offset();
+ clocksource_forward_now();
+
+ ts_delta.tv_sec = tv->tv_sec - xtime.tv_sec;
+ ts_delta.tv_nsec = tv->tv_nsec - xtime.tv_nsec;
+ wall_to_monotonic = timespec_sub(wall_to_monotonic, ts_delta);
- wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec);
- wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - nsec);
+ xtime = *tv;
- set_normalized_timespec(&xtime, sec, nsec);
- set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec);
update_xtime_cache(0);
clock->error = 0;
@@ -170,22 +178,19 @@ EXPORT_SYMBOL(do_settimeofday);
static void change_clocksource(void)
{
struct clocksource *new;
- cycle_t now;
- u64 nsec;
new = clocksource_get_next();
if (clock == new)
return;
- new->cycle_last = 0;
- now = clocksource_read(new);
- nsec = __get_nsec_offset();
- timespec_add_ns(&xtime, nsec);
+ clocksource_forward_now();
- clock = new;
- clock->cycle_last = now;
+ new->raw_time = clock->raw_time;
+ clock = new;
+ clock->cycle_last = 0;
+ clock->cycle_last = clocksource_read(new);
clock->error = 0;
clock->xtime_nsec = 0;
clocksource_calculate_interval(clock, NTP_INTERVAL_LENGTH);
@@ -200,11 +205,44 @@ static void change_clocksource(void)
*/
}
#else
+static inline void clocksource_forward_now(void) { }
static inline void change_clocksource(void) { }
-static inline s64 __get_nsec_offset(void) { return 0; }
#endif
/**
+ * getrawmonotonic - Returns the raw monotonic time in a timespec
+ * @ts: pointer to the timespec to be set
+ *
+ * Returns the raw monotonic time (completely un-modified by ntp)
+ */
+void getrawmonotonic(struct timespec *ts)
+{
+ unsigned long seq;
+ s64 nsecs;
+ cycle_t cycle_now, cycle_delta;
+
+ do {
+ seq = read_seqbegin(&xtime_lock);
+
+ /* read clocksource: */
+ cycle_now = clocksource_read(clock);
+
+ /* calculate the delta since the last update_wall_time: */
+ cycle_delta = (cycle_now - clock->cycle_last) & clock->mask;
+
+ /* convert to nanoseconds: */
+ nsecs = ((s64)cycle_delta * clock->mult_orig) >> clock->shift;
+
+ *ts = clock->raw_time;
+
+ } while (read_seqretry(&xtime_lock, seq));
+
+ timespec_add_ns(ts, nsecs);
+}
+EXPORT_SYMBOL(getrawmonotonic);
+
+
+/**
* timekeeping_valid_for_hres - Check if timekeeping is suitable for hres
*/
int timekeeping_valid_for_hres(void)
@@ -265,8 +303,6 @@ void __init timekeeping_init(void)
static int timekeeping_suspended;
/* time in seconds when suspend began */
static unsigned long timekeeping_suspend_time;
-/* xtime offset when we went into suspend */
-static s64 timekeeping_suspend_nsecs;
/**
* timekeeping_resume - Resumes the generic timekeeping subsystem.
@@ -292,8 +328,6 @@ static int timekeeping_resume(struct sys_device *dev)
wall_to_monotonic.tv_sec -= sleep_length;
total_sleep_time += sleep_length;
}
- /* Make sure that we have the correct xtime reference */
- timespec_add_ns(&xtime, timekeeping_suspend_nsecs);
update_xtime_cache(0);
/* re-base the last cycle value */
clock->cycle_last = 0;
@@ -319,8 +353,7 @@ static int timekeeping_suspend(struct sys_device *dev, pm_message_t state)
timekeeping_suspend_time = read_persistent_clock();
write_seqlock_irqsave(&xtime_lock, flags);
- /* Get the current xtime offset */
- timekeeping_suspend_nsecs = __get_nsec_offset();
+ clocksource_forward_now();
timekeeping_suspended = 1;
write_sequnlock_irqrestore(&xtime_lock, flags);
@@ -454,23 +487,29 @@ void update_wall_time(void)
#else
offset = clock->cycle_interval;
#endif
- clock->xtime_nsec += (s64)xtime.tv_nsec << clock->shift;
+ clock->xtime_nsec = (s64)xtime.tv_nsec << clock->shift;
/* normally this loop will run just once, however in the
* case of lost or late ticks, it will accumulate correctly.
*/
while (offset >= clock->cycle_interval) {
/* accumulate one interval */
- clock->xtime_nsec += clock->xtime_interval;
- clock->cycle_last += clock->cycle_interval;
offset -= clock->cycle_interval;
+ clock->cycle_last += clock->cycle_interval;
+ clock->xtime_nsec += clock->xtime_interval;
if (clock->xtime_nsec >= (u64)NSEC_PER_SEC << clock->shift) {
clock->xtime_nsec -= (u64)NSEC_PER_SEC << clock->shift;
xtime.tv_sec++;
second_overflow();
}
+ clock->raw_time.tv_nsec += clock->raw_interval;
+ if (clock->raw_time.tv_nsec >= NSEC_PER_SEC) {
+ clock->raw_time.tv_nsec -= NSEC_PER_SEC;
+ clock->raw_time.tv_sec++;
+ }
+
/* accumulate error between NTP and clock interval */
clock->error += tick_length;
clock->error -= clock->xtime_interval << (NTP_SCALE_SHIFT - clock->shift);
@@ -479,9 +518,12 @@ void update_wall_time(void)
/* correct the clock when NTP error is too big */
clocksource_adjust(offset);
- /* store full nanoseconds into xtime */
- xtime.tv_nsec = (s64)clock->xtime_nsec >> clock->shift;
+ /* store full nanoseconds into xtime after rounding it up and
+ * add the remainder to the error difference.
+ */
+ xtime.tv_nsec = ((s64)clock->xtime_nsec >> clock->shift) + 1;
clock->xtime_nsec -= (s64)xtime.tv_nsec << clock->shift;
+ clock->error += clock->xtime_nsec << (NTP_SCALE_SHIFT - clock->shift);
update_xtime_cache(cyc2ns(clock, offset));
diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c
index a40e20fd0001..f6426911e35a 100644
--- a/kernel/time/timer_list.c
+++ b/kernel/time/timer_list.c
@@ -47,13 +47,14 @@ static void print_name_offset(struct seq_file *m, void *sym)
}
static void
-print_timer(struct seq_file *m, struct hrtimer *timer, int idx, u64 now)
+print_timer(struct seq_file *m, struct hrtimer *taddr, struct hrtimer *timer,
+ int idx, u64 now)
{
#ifdef CONFIG_TIMER_STATS
char tmp[TASK_COMM_LEN + 1];
#endif
SEQ_printf(m, " #%d: ", idx);
- print_name_offset(m, timer);
+ print_name_offset(m, taddr);
SEQ_printf(m, ", ");
print_name_offset(m, timer->function);
SEQ_printf(m, ", S:%02lx", timer->state);
@@ -99,7 +100,7 @@ next_one:
tmp = *timer;
spin_unlock_irqrestore(&base->cpu_base->lock, flags);
- print_timer(m, &tmp, i, now);
+ print_timer(m, timer, &tmp, i, now);
next++;
goto next_one;
}
@@ -109,6 +110,7 @@ next_one:
static void
print_base(struct seq_file *m, struct hrtimer_clock_base *base, u64 now)
{
+ SEQ_printf(m, " .base: %p\n", base);
SEQ_printf(m, " .index: %d\n",
base->index);
SEQ_printf(m, " .resolution: %Lu nsecs\n",
@@ -183,12 +185,16 @@ static void print_cpu(struct seq_file *m, int cpu, u64 now)
#ifdef CONFIG_GENERIC_CLOCKEVENTS
static void
-print_tickdevice(struct seq_file *m, struct tick_device *td)
+print_tickdevice(struct seq_file *m, struct tick_device *td, int cpu)
{
struct clock_event_device *dev = td->evtdev;
SEQ_printf(m, "\n");
SEQ_printf(m, "Tick Device: mode: %d\n", td->mode);
+ if (cpu < 0)
+ SEQ_printf(m, "Broadcast device\n");
+ else
+ SEQ_printf(m, "Per CPU device: %d\n", cpu);
SEQ_printf(m, "Clock Event Device: ");
if (!dev) {
@@ -222,7 +228,7 @@ static void timer_list_show_tickdevices(struct seq_file *m)
int cpu;
#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
- print_tickdevice(m, tick_get_broadcast_device());
+ print_tickdevice(m, tick_get_broadcast_device(), -1);
SEQ_printf(m, "tick_broadcast_mask: %08lx\n",
tick_get_broadcast_mask()->bits[0]);
#ifdef CONFIG_TICK_ONESHOT
@@ -232,7 +238,7 @@ static void timer_list_show_tickdevices(struct seq_file *m)
SEQ_printf(m, "\n");
#endif
for_each_online_cpu(cpu)
- print_tickdevice(m, tick_get_device(cpu));
+ print_tickdevice(m, tick_get_device(cpu), cpu);
SEQ_printf(m, "\n");
}
#else
@@ -244,7 +250,7 @@ static int timer_list_show(struct seq_file *m, void *v)
u64 now = ktime_to_ns(ktime_get());
int cpu;
- SEQ_printf(m, "Timer List Version: v0.3\n");
+ SEQ_printf(m, "Timer List Version: v0.4\n");
SEQ_printf(m, "HRTIMER_MAX_CLOCK_BASES: %d\n", HRTIMER_MAX_CLOCK_BASES);
SEQ_printf(m, "now at %Ld nsecs\n", (unsigned long long)now);
diff --git a/kernel/timer.c b/kernel/timer.c
index 510fe69351ca..56becf373c58 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
@@ -1436,9 +1436,11 @@ static void __cpuinit migrate_timers(int cpu)
BUG_ON(cpu_online(cpu));
old_base = per_cpu(tvec_bases, cpu);
new_base = get_cpu_var(tvec_bases);
-
- local_irq_disable();
- spin_lock(&new_base->lock);
+ /*
+ * The caller is globally serialized and nobody else
+ * takes two locks at once, deadlock is not possible.
+ */
+ spin_lock_irq(&new_base->lock);
spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING);
BUG_ON(old_base->running_timer);
@@ -1453,8 +1455,7 @@ static void __cpuinit migrate_timers(int cpu)
}
spin_unlock(&old_base->lock);
- spin_unlock(&new_base->lock);
- local_irq_enable();
+ spin_unlock_irq(&new_base->lock);
put_cpu_var(tvec_bases);
}
#endif /* CONFIG_HOTPLUG_CPU */