summaryrefslogtreecommitdiff
path: root/kernel/time
diff options
context:
space:
mode:
authorFrederic Weisbecker <frederic@kernel.org>2021-07-26 15:55:09 +0300
committerThomas Gleixner <tglx@linutronix.de>2021-08-10 18:09:59 +0300
commit175cc3ab28e3509ddee8de4f164b563d99daa570 (patch)
treefc28cebc76fb649e06eee2f55f15a2c9a9ce36f9 /kernel/time
parenta5dec9f82ab2ae486119f0b0820ea16db3e522c3 (diff)
downloadlinux-175cc3ab28e3509ddee8de4f164b563d99daa570.tar.xz
posix-cpu-timers: Force next_expiration recalc after timer deletion
A timer deletion only dequeues the timer but it doesn't shutdown the related costly process wide cputimer counter and the tick dependency. The following code snippet keeps this overhead around for one week after the timer deletion: void trigger_process_counter(void) { timer_t id; struct itimerspec val = { }; val.it_value.tv_sec = 604800; timer_create(CLOCK_PROCESS_CPUTIME_ID, NULL, &id); timer_settime(id, 0, &val, NULL); timer_delete(id); } Make sure the next target's tick recalculates the nearest expiration and clears the process wide counter and tick dependency if necessary. Signed-off-by: Frederic Weisbecker <frederic@kernel.org> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org> Link: https://lore.kernel.org/r/20210726125513.271824-3-frederic@kernel.org
Diffstat (limited to 'kernel/time')
-rw-r--r--kernel/time/posix-cpu-timers.c33
1 files changed, 32 insertions, 1 deletions
diff --git a/kernel/time/posix-cpu-timers.c b/kernel/time/posix-cpu-timers.c
index 4693d3c71e7e..61c78b62fe6a 100644
--- a/kernel/time/posix-cpu-timers.c
+++ b/kernel/time/posix-cpu-timers.c
@@ -408,6 +408,37 @@ static int posix_cpu_timer_create(struct k_itimer *new_timer)
}
/*
+ * Dequeue the timer and reset the base if it was its earliest expiration.
+ * It makes sure the next tick recalculates the base next expiration so we
+ * don't keep the costly process wide cputime counter around for a random
+ * amount of time, along with the tick dependency.
+ *
+ * If another timer gets queued between this and the next tick, its
+ * expiration will update the base next event if necessary on the next
+ * tick.
+ */
+static void disarm_timer(struct k_itimer *timer, struct task_struct *p)
+{
+ struct cpu_timer *ctmr = &timer->it.cpu;
+ struct posix_cputimer_base *base;
+ int clkidx;
+
+ if (!cpu_timer_dequeue(ctmr))
+ return;
+
+ clkidx = CPUCLOCK_WHICH(timer->it_clock);
+
+ if (CPUCLOCK_PERTHREAD(timer->it_clock))
+ base = p->posix_cputimers.bases + clkidx;
+ else
+ base = p->signal->posix_cputimers.bases + clkidx;
+
+ if (cpu_timer_getexpires(ctmr) == base->nextevt)
+ base->nextevt = 0;
+}
+
+
+/*
* Clean up a CPU-clock timer that is about to be destroyed.
* This is called from timer deletion with the timer already locked.
* If we return TIMER_RETRY, it's necessary to release the timer's lock
@@ -441,7 +472,7 @@ static int posix_cpu_timer_del(struct k_itimer *timer)
if (timer->it.cpu.firing)
ret = TIMER_RETRY;
else
- cpu_timer_dequeue(ctmr);
+ disarm_timer(timer, p);
unlock_task_sighand(p, &flags);
}