From 2b69942f9021bf75bd1b001f53bd2578361fadf3 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Wed, 21 Aug 2019 21:09:04 +0200 Subject: posix-cpu-timers: Create a container struct Per task/process data of posix CPU timers is all over the place which makes the code hard to follow and requires ifdeffery. Create a container to hold all this information in one place, so data is consolidated and the ifdeffery can be confined to the posix timer header file and removed from places like fork. As a first step, move the cpu_timers list head array into the new struct and clean up the initializers and simplify fork. The remaining #ifdef in fork will be removed later. Signed-off-by: Thomas Gleixner Reviewed-by: Frederic Weisbecker Link: https://lkml.kernel.org/r/20190821192920.819418976@linutronix.de --- kernel/fork.c | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) (limited to 'kernel/fork.c') diff --git a/kernel/fork.c b/kernel/fork.c index d8ae0f1b4148..b6a135e4275b 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -1523,6 +1523,7 @@ void __cleanup_sighand(struct sighand_struct *sighand) */ static void posix_cpu_timers_init_group(struct signal_struct *sig) { + struct posix_cputimers *pct = &sig->posix_cputimers; unsigned long cpu_limit; cpu_limit = READ_ONCE(sig->rlim[RLIMIT_CPU].rlim_cur); @@ -1531,10 +1532,7 @@ static void posix_cpu_timers_init_group(struct signal_struct *sig) sig->cputimer.running = true; } - /* The timer lists. */ - INIT_LIST_HEAD(&sig->cpu_timers[0]); - INIT_LIST_HEAD(&sig->cpu_timers[1]); - INIT_LIST_HEAD(&sig->cpu_timers[2]); + posix_cputimers_init(pct); } #else static inline void posix_cpu_timers_init_group(struct signal_struct *sig) { } @@ -1649,9 +1647,8 @@ static void posix_cpu_timers_init(struct task_struct *tsk) tsk->cputime_expires.prof_exp = 0; tsk->cputime_expires.virt_exp = 0; tsk->cputime_expires.sched_exp = 0; - INIT_LIST_HEAD(&tsk->cpu_timers[0]); - INIT_LIST_HEAD(&tsk->cpu_timers[1]); - INIT_LIST_HEAD(&tsk->cpu_timers[2]); + + posix_cputimers_init(&tsk->posix_cputimers); } #else static inline void posix_cpu_timers_init(struct task_struct *tsk) { } -- cgit v1.2.3 From 3a245c0f110e2bfcf7f2cd2248a29005c78999e3 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Wed, 21 Aug 2019 21:09:06 +0200 Subject: posix-cpu-timers: Move expiry cache into struct posix_cputimers The expiry cache belongs into the posix_cputimers container where the other cpu timers information is. Signed-off-by: Thomas Gleixner Reviewed-by: Frederic Weisbecker Link: https://lkml.kernel.org/r/20190821192921.014444012@linutronix.de --- include/linux/posix-timers.h | 22 +++++++++++++++++++++ include/linux/sched.h | 8 -------- include/linux/sched/signal.h | 3 --- kernel/fork.c | 25 +++-------------------- kernel/sched/rt.c | 6 ++++-- kernel/time/posix-cpu-timers.c | 45 +++++++++++++++++++++++++----------------- 6 files changed, 56 insertions(+), 53 deletions(-) (limited to 'kernel/fork.c') diff --git a/include/linux/posix-timers.h b/include/linux/posix-timers.h index cdef89750b2c..a3731ba15bce 100644 --- a/include/linux/posix-timers.h +++ b/include/linux/posix-timers.h @@ -62,24 +62,43 @@ static inline int clockid_to_fd(const clockid_t clk) return ~(clk >> 3); } +/* + * Alternate field names for struct task_cputime when used on cache + * expirations. Will go away soon. + */ +#define virt_exp utime +#define prof_exp stime +#define sched_exp sum_exec_runtime + #ifdef CONFIG_POSIX_TIMERS /** * posix_cputimers - Container for posix CPU timer related data + * @cputime_expires: Earliest-expiration cache * @cpu_timers: List heads to queue posix CPU timers * * Used in task_struct and signal_struct */ struct posix_cputimers { + struct task_cputime cputime_expires; struct list_head cpu_timers[CPUCLOCK_MAX]; }; static inline void posix_cputimers_init(struct posix_cputimers *pct) { + memset(&pct->cputime_expires, 0, sizeof(pct->cputime_expires)); INIT_LIST_HEAD(&pct->cpu_timers[0]); INIT_LIST_HEAD(&pct->cpu_timers[1]); INIT_LIST_HEAD(&pct->cpu_timers[2]); } +void posix_cputimers_group_init(struct posix_cputimers *pct, u64 cpu_limit); + +static inline void posix_cputimers_rt_watchdog(struct posix_cputimers *pct, + u64 runtime) +{ + pct->cputime_expires.sched_exp = runtime; +} + /* Init task static initializer */ #define INIT_CPU_TIMERLISTS(c) { \ LIST_HEAD_INIT(c.cpu_timers[0]), \ @@ -94,6 +113,9 @@ static inline void posix_cputimers_init(struct posix_cputimers *pct) #else struct posix_cputimers { }; #define INIT_CPU_TIMERS(s) +static inline void posix_cputimers_init(struct posix_cputimers *pct) { } +static inline void posix_cputimers_group_init(struct posix_cputimers *pct, + u64 cpu_limit) { } #endif #define REQUEUE_PENDING 1 diff --git a/include/linux/sched.h b/include/linux/sched.h index 37c39df9b186..8cc8e323093f 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -246,11 +246,6 @@ struct prev_cputime { #endif }; -/* Alternate field names when used on cache expirations: */ -#define virt_exp utime -#define prof_exp stime -#define sched_exp sum_exec_runtime - enum vtime_state { /* Task is sleeping or running in a CPU with VTIME inactive: */ VTIME_INACTIVE = 0, @@ -862,9 +857,6 @@ struct task_struct { unsigned long min_flt; unsigned long maj_flt; -#ifdef CONFIG_POSIX_TIMERS - struct task_cputime cputime_expires; -#endif /* Empty if CONFIG_POSIX_CPUTIMERS=n */ struct posix_cputimers posix_cputimers; diff --git a/include/linux/sched/signal.h b/include/linux/sched/signal.h index 88fbb3f1c375..729bd892ee45 100644 --- a/include/linux/sched/signal.h +++ b/include/linux/sched/signal.h @@ -149,9 +149,6 @@ struct signal_struct { */ struct thread_group_cputimer cputimer; - /* Earliest-expiration cache. */ - struct task_cputime cputime_expires; - #endif /* Empty if CONFIG_POSIX_TIMERS=n */ struct posix_cputimers posix_cputimers; diff --git a/kernel/fork.c b/kernel/fork.c index b6a135e4275b..52bfe7c20ff6 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -1527,12 +1527,9 @@ static void posix_cpu_timers_init_group(struct signal_struct *sig) unsigned long cpu_limit; cpu_limit = READ_ONCE(sig->rlim[RLIMIT_CPU].rlim_cur); - if (cpu_limit != RLIM_INFINITY) { - sig->cputime_expires.prof_exp = cpu_limit * NSEC_PER_SEC; + posix_cputimers_group_init(pct, cpu_limit); + if (cpu_limit != RLIM_INFINITY) sig->cputimer.running = true; - } - - posix_cputimers_init(pct); } #else static inline void posix_cpu_timers_init_group(struct signal_struct *sig) { } @@ -1638,22 +1635,6 @@ static void rt_mutex_init_task(struct task_struct *p) #endif } -#ifdef CONFIG_POSIX_TIMERS -/* - * Initialize POSIX timer handling for a single task. - */ -static void posix_cpu_timers_init(struct task_struct *tsk) -{ - tsk->cputime_expires.prof_exp = 0; - tsk->cputime_expires.virt_exp = 0; - tsk->cputime_expires.sched_exp = 0; - - posix_cputimers_init(&tsk->posix_cputimers); -} -#else -static inline void posix_cpu_timers_init(struct task_struct *tsk) { } -#endif - static inline void init_task_pid_links(struct task_struct *task) { enum pid_type type; @@ -1932,7 +1913,7 @@ static __latent_entropy struct task_struct *copy_process( task_io_accounting_init(&p->ioac); acct_clear_integrals(p); - posix_cpu_timers_init(p); + posix_cputimers_init(&p->posix_cputimers); p->io_context = NULL; audit_set_context(p, NULL); diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c index da3e85e61013..d6678f773c96 100644 --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c @@ -2305,8 +2305,10 @@ static void watchdog(struct rq *rq, struct task_struct *p) } next = DIV_ROUND_UP(min(soft, hard), USEC_PER_SEC/HZ); - if (p->rt.timeout > next) - p->cputime_expires.sched_exp = p->se.sum_exec_runtime; + if (p->rt.timeout > next) { + posix_cputimers_rt_watchdog(&p->posix_cputimers, + p->se.sum_exec_runtime); + } } } #else diff --git a/kernel/time/posix-cpu-timers.c b/kernel/time/posix-cpu-timers.c index 849e2045fb6e..3e29d1692437 100644 --- a/kernel/time/posix-cpu-timers.c +++ b/kernel/time/posix-cpu-timers.c @@ -20,11 +20,18 @@ static void posix_cpu_timer_rearm(struct k_itimer *timer); +void posix_cputimers_group_init(struct posix_cputimers *pct, u64 cpu_limit) +{ + posix_cputimers_init(pct); + if (cpu_limit != RLIM_INFINITY) + pct->cputime_expires.prof_exp = cpu_limit * NSEC_PER_SEC; +} + /* * Called after updating RLIMIT_CPU to run cpu timer and update - * tsk->signal->cputime_expires expiration cache if necessary. Needs - * siglock protection since other code may update expiration cache as - * well. + * tsk->signal->posix_cputimers.cputime_expires expiration cache if + * necessary. Needs siglock protection since other code may update + * expiration cache as well. */ void update_rlimit_cpu(struct task_struct *task, unsigned long rlim_new) { @@ -447,10 +454,10 @@ static void arm_timer(struct k_itimer *timer) if (CPUCLOCK_PERTHREAD(timer->it_clock)) { head = p->posix_cputimers.cpu_timers; - cputime_expires = &p->cputime_expires; + cputime_expires = &p->posix_cputimers.cputime_expires; } else { head = p->signal->posix_cputimers.cpu_timers; - cputime_expires = &p->signal->cputime_expires; + cputime_expires = &p->signal->posix_cputimers.cputime_expires; } head += CPUCLOCK_WHICH(timer->it_clock); @@ -774,7 +781,7 @@ static void check_thread_timers(struct task_struct *tsk, struct list_head *firing) { struct list_head *timers = tsk->posix_cputimers.cpu_timers; - struct task_cputime *tsk_expires = &tsk->cputime_expires; + struct task_cputime *tsk_expires = &tsk->posix_cputimers.cputime_expires; u64 expires, stime, utime; unsigned long soft; @@ -785,7 +792,7 @@ static void check_thread_timers(struct task_struct *tsk, * If cputime_expires is zero, then there are no active * per thread CPU timers. */ - if (task_cputime_zero(&tsk->cputime_expires)) + if (task_cputime_zero(tsk_expires)) return; task_cputime(tsk, &utime, &stime); @@ -954,10 +961,10 @@ static void check_process_timers(struct task_struct *tsk, prof_expires = x; } - sig->cputime_expires.prof_exp = prof_expires; - sig->cputime_expires.virt_exp = virt_expires; - sig->cputime_expires.sched_exp = sched_expires; - if (task_cputime_zero(&sig->cputime_expires)) + sig->posix_cputimers.cputime_expires.prof_exp = prof_expires; + sig->posix_cputimers.cputime_expires.virt_exp = virt_expires; + sig->posix_cputimers.cputime_expires.sched_exp = sched_expires; + if (task_cputime_zero(&sig->posix_cputimers.cputime_expires)) stop_process_timers(sig); sig->cputimer.checking_timer = false; @@ -1058,12 +1065,13 @@ static inline int fastpath_timer_check(struct task_struct *tsk) { struct signal_struct *sig; - if (!task_cputime_zero(&tsk->cputime_expires)) { + if (!task_cputime_zero(&tsk->posix_cputimers.cputime_expires)) { struct task_cputime task_sample; task_cputime(tsk, &task_sample.utime, &task_sample.stime); task_sample.sum_exec_runtime = tsk->se.sum_exec_runtime; - if (task_cputime_expired(&task_sample, &tsk->cputime_expires)) + if (task_cputime_expired(&task_sample, + &tsk->posix_cputimers.cputime_expires)) return 1; } @@ -1088,7 +1096,8 @@ static inline int fastpath_timer_check(struct task_struct *tsk) sample_cputime_atomic(&group_sample, &sig->cputimer.cputime_atomic); - if (task_cputime_expired(&group_sample, &sig->cputime_expires)) + if (task_cputime_expired(&group_sample, + &sig->posix_cputimers.cputime_expires)) return 1; } @@ -1204,12 +1213,12 @@ void set_process_cpu_timer(struct task_struct *tsk, unsigned int clock_idx, */ switch (clock_idx) { case CPUCLOCK_PROF: - if (expires_gt(tsk->signal->cputime_expires.prof_exp, *newval)) - tsk->signal->cputime_expires.prof_exp = *newval; + if (expires_gt(tsk->signal->posix_cputimers.cputime_expires.prof_exp, *newval)) + tsk->signal->posix_cputimers.cputime_expires.prof_exp = *newval; break; case CPUCLOCK_VIRT: - if (expires_gt(tsk->signal->cputime_expires.virt_exp, *newval)) - tsk->signal->cputime_expires.virt_exp = *newval; + if (expires_gt(tsk->signal->posix_cputimers.cputime_expires.virt_exp, *newval)) + tsk->signal->posix_cputimers.cputime_expires.virt_exp = *newval; break; } -- cgit v1.2.3 From 244d49e30653658d4e7e9b2b8427777cbbc5affe Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Wed, 21 Aug 2019 21:09:24 +0200 Subject: posix-cpu-timers: Move state tracking to struct posix_cputimers Put it where it belongs and clean up the ifdeffery in fork completely. Signed-off-by: Thomas Gleixner Link: https://lkml.kernel.org/r/20190821192922.743229404@linutronix.de --- include/linux/posix-timers.h | 8 +++++ include/linux/sched/cputime.h | 9 ++++-- include/linux/sched/signal.h | 6 ---- init/init_task.c | 2 -- kernel/fork.c | 6 ---- kernel/time/posix-cpu-timers.c | 73 +++++++++++++++++++++++------------------- 6 files changed, 54 insertions(+), 50 deletions(-) (limited to 'kernel/fork.c') diff --git a/include/linux/posix-timers.h b/include/linux/posix-timers.h index 3ea920e8fe7f..a9e3f69d2db4 100644 --- a/include/linux/posix-timers.h +++ b/include/linux/posix-timers.h @@ -77,15 +77,23 @@ struct posix_cputimer_base { /** * posix_cputimers - Container for posix CPU timer related data * @bases: Base container for posix CPU clocks + * @timers_active: Timers are queued. + * @expiry_active: Timer expiry is active. Used for + * process wide timers to avoid multiple + * task trying to handle expiry concurrently * * Used in task_struct and signal_struct */ struct posix_cputimers { struct posix_cputimer_base bases[CPUCLOCK_MAX]; + unsigned int timers_active; + unsigned int expiry_active; }; static inline void posix_cputimers_init(struct posix_cputimers *pct) { + pct->timers_active = 0; + pct->expiry_active = 0; pct->bases[0].nextevt = U64_MAX; pct->bases[1].nextevt = U64_MAX; pct->bases[2].nextevt = U64_MAX; diff --git a/include/linux/sched/cputime.h b/include/linux/sched/cputime.h index eefa5dff16b4..6c9f19a33865 100644 --- a/include/linux/sched/cputime.h +++ b/include/linux/sched/cputime.h @@ -70,7 +70,7 @@ void thread_group_sample_cputime(struct task_struct *tsk, u64 *samples); */ /** - * get_running_cputimer - return &tsk->signal->cputimer if cputimer is running + * get_running_cputimer - return &tsk->signal->cputimer if cputimers are active * * @tsk: Pointer to target task. */ @@ -80,8 +80,11 @@ struct thread_group_cputimer *get_running_cputimer(struct task_struct *tsk) { struct thread_group_cputimer *cputimer = &tsk->signal->cputimer; - /* Check if cputimer isn't running. This is accessed without locking. */ - if (!READ_ONCE(cputimer->running)) + /* + * Check whether posix CPU timers are active. If not the thread + * group accounting is not active either. Lockless check. + */ + if (!READ_ONCE(tsk->signal->posix_cputimers.timers_active)) return NULL; /* diff --git a/include/linux/sched/signal.h b/include/linux/sched/signal.h index 729bd892ee45..88050259c466 100644 --- a/include/linux/sched/signal.h +++ b/include/linux/sched/signal.h @@ -57,18 +57,12 @@ struct task_cputime_atomic { /** * struct thread_group_cputimer - thread group interval timer counts * @cputime_atomic: atomic thread group interval timers. - * @running: true when there are timers running and - * @cputime_atomic receives updates. - * @checking_timer: true when a thread in the group is in the - * process of checking for thread group timers. * * This structure contains the version of task_cputime, above, that is * used for thread group CPU timer calculations. */ struct thread_group_cputimer { struct task_cputime_atomic cputime_atomic; - bool running; - bool checking_timer; }; struct multiprocess_signals { diff --git a/init/init_task.c b/init/init_task.c index 7ab773b9b3cd..d49692a0ec51 100644 --- a/init/init_task.c +++ b/init/init_task.c @@ -30,8 +30,6 @@ static struct signal_struct init_signals = { .posix_timers = LIST_HEAD_INIT(init_signals.posix_timers), .cputimer = { .cputime_atomic = INIT_CPUTIME_ATOMIC, - .running = false, - .checking_timer = false, }, #endif INIT_CPU_TIMERS(init_signals) diff --git a/kernel/fork.c b/kernel/fork.c index 52bfe7c20ff6..f1228d9f0b11 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -1517,7 +1517,6 @@ void __cleanup_sighand(struct sighand_struct *sighand) } } -#ifdef CONFIG_POSIX_TIMERS /* * Initialize POSIX timer handling for a thread group. */ @@ -1528,12 +1527,7 @@ static void posix_cpu_timers_init_group(struct signal_struct *sig) cpu_limit = READ_ONCE(sig->rlim[RLIMIT_CPU].rlim_cur); posix_cputimers_group_init(pct, cpu_limit); - if (cpu_limit != RLIM_INFINITY) - sig->cputimer.running = true; } -#else -static inline void posix_cpu_timers_init_group(struct signal_struct *sig) { } -#endif static int copy_signal(unsigned long clone_flags, struct task_struct *tsk) { diff --git a/kernel/time/posix-cpu-timers.c b/kernel/time/posix-cpu-timers.c index ef39a7a4a95c..52f4c99c1d60 100644 --- a/kernel/time/posix-cpu-timers.c +++ b/kernel/time/posix-cpu-timers.c @@ -23,8 +23,10 @@ static void posix_cpu_timer_rearm(struct k_itimer *timer); void posix_cputimers_group_init(struct posix_cputimers *pct, u64 cpu_limit) { posix_cputimers_init(pct); - if (cpu_limit != RLIM_INFINITY) + if (cpu_limit != RLIM_INFINITY) { pct->bases[CPUCLOCK_PROF].nextevt = cpu_limit * NSEC_PER_SEC; + pct->timers_active = true; + } } /* @@ -248,8 +250,9 @@ static void update_gt_cputime(struct task_cputime_atomic *cputime_atomic, void thread_group_sample_cputime(struct task_struct *tsk, u64 *samples) { struct thread_group_cputimer *cputimer = &tsk->signal->cputimer; + struct posix_cputimers *pct = &tsk->signal->posix_cputimers; - WARN_ON_ONCE(!cputimer->running); + WARN_ON_ONCE(!pct->timers_active); proc_sample_cputime_atomic(&cputimer->cputime_atomic, samples); } @@ -269,9 +272,10 @@ void thread_group_sample_cputime(struct task_struct *tsk, u64 *samples) static void thread_group_start_cputime(struct task_struct *tsk, u64 *samples) { struct thread_group_cputimer *cputimer = &tsk->signal->cputimer; + struct posix_cputimers *pct = &tsk->signal->posix_cputimers; /* Check if cputimer isn't running. This is accessed without locking. */ - if (!READ_ONCE(cputimer->running)) { + if (!READ_ONCE(pct->timers_active)) { struct task_cputime sum; /* @@ -283,13 +287,13 @@ static void thread_group_start_cputime(struct task_struct *tsk, u64 *samples) update_gt_cputime(&cputimer->cputime_atomic, &sum); /* - * We're setting cputimer->running without a lock. Ensure - * this only gets written to in one operation. We set - * running after update_gt_cputime() as a small optimization, - * but barriers are not required because update_gt_cputime() + * We're setting timers_active without a lock. Ensure this + * only gets written to in one operation. We set it after + * update_gt_cputime() as a small optimization, but + * barriers are not required because update_gt_cputime() * can handle concurrent updates. */ - WRITE_ONCE(cputimer->running, true); + WRITE_ONCE(pct->timers_active, true); } proc_sample_cputime_atomic(&cputimer->cputime_atomic, samples); } @@ -313,9 +317,10 @@ static u64 cpu_clock_sample_group(const clockid_t clkid, struct task_struct *p, bool start) { struct thread_group_cputimer *cputimer = &p->signal->cputimer; + struct posix_cputimers *pct = &p->signal->posix_cputimers; u64 samples[CPUCLOCK_MAX]; - if (!READ_ONCE(cputimer->running)) { + if (!READ_ONCE(pct->timers_active)) { if (start) thread_group_start_cputime(p, samples); else @@ -834,10 +839,10 @@ static void check_thread_timers(struct task_struct *tsk, static inline void stop_process_timers(struct signal_struct *sig) { - struct thread_group_cputimer *cputimer = &sig->cputimer; + struct posix_cputimers *pct = &sig->posix_cputimers; - /* Turn off cputimer->running. This is done without locking. */ - WRITE_ONCE(cputimer->running, false); + /* Turn off the active flag. This is done without locking. */ + WRITE_ONCE(pct->timers_active, false); tick_dep_clear_signal(sig, TICK_DEP_BIT_POSIX_TIMER); } @@ -877,17 +882,17 @@ static void check_process_timers(struct task_struct *tsk, unsigned long soft; /* - * If cputimer is not running, then there are no active - * process wide timers (POSIX 1.b, itimers, RLIMIT_CPU). + * If there are no active process wide timers (POSIX 1.b, itimers, + * RLIMIT_CPU) nothing to check. */ - if (!READ_ONCE(sig->cputimer.running)) + if (!READ_ONCE(pct->timers_active)) return; /* * Signify that a thread is checking for process timers. * Write access to this field is protected by the sighand lock. */ - sig->cputimer.checking_timer = true; + pct->timers_active = true; /* * Collect the current process totals. Group accounting is active @@ -933,7 +938,7 @@ static void check_process_timers(struct task_struct *tsk, if (expiry_cache_is_inactive(pct)) stop_process_timers(sig); - sig->cputimer.checking_timer = false; + pct->expiry_active = false; } /* @@ -1027,39 +1032,41 @@ task_cputimers_expired(const u64 *sample, struct posix_cputimers *pct) */ static inline bool fastpath_timer_check(struct task_struct *tsk) { + struct posix_cputimers *pct = &tsk->posix_cputimers; struct signal_struct *sig; - if (!expiry_cache_is_inactive(&tsk->posix_cputimers)) { + if (!expiry_cache_is_inactive(pct)) { u64 samples[CPUCLOCK_MAX]; task_sample_cputime(tsk, samples); - if (task_cputimers_expired(samples, &tsk->posix_cputimers)) + if (task_cputimers_expired(samples, pct)) return true; } sig = tsk->signal; + pct = &sig->posix_cputimers; /* - * Check if thread group timers expired when the cputimer is - * running and no other thread in the group is already checking - * for thread group cputimers. These fields are read without the - * sighand lock. However, this is fine because this is meant to - * be a fastpath heuristic to determine whether we should try to - * acquire the sighand lock to check/handle timers. + * Check if thread group timers expired when timers are active and + * no other thread in the group is already handling expiry for + * thread group cputimers. These fields are read without the + * sighand lock. However, this is fine because this is meant to be + * a fastpath heuristic to determine whether we should try to + * acquire the sighand lock to handle timer expiry. * - * In the worst case scenario, if 'running' or 'checking_timer' gets - * set but the current thread doesn't see the change yet, we'll wait - * until the next thread in the group gets a scheduler interrupt to - * handle the timer. This isn't an issue in practice because these - * types of delays with signals actually getting sent are expected. + * In the worst case scenario, if concurrently timers_active is set + * or expiry_active is cleared, but the current thread doesn't see + * the change yet, the timer checks are delayed until the next + * thread in the group gets a scheduler interrupt to handle the + * timer. This isn't an issue in practice because these types of + * delays with signals actually getting sent are expected. */ - if (READ_ONCE(sig->cputimer.running) && - !READ_ONCE(sig->cputimer.checking_timer)) { + if (READ_ONCE(pct->timers_active) && !READ_ONCE(pct->expiry_active)) { u64 samples[CPUCLOCK_MAX]; proc_sample_cputime_atomic(&sig->cputimer.cputime_atomic, samples); - if (task_cputimers_expired(samples, &sig->posix_cputimers)) + if (task_cputimers_expired(samples, pct)) return true; } -- cgit v1.2.3