From 1df4bd83cdfdbd0720ddb2c6488b7e9a432ba468 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Mon, 23 Oct 2023 17:34:05 +0200 Subject: do_io_accounting: use sig->stats_lock Rather than lock_task_sighand(), sig->stats_lock was specifically designed for this type of use. This way the "if (whole)" branch runs lockless in the likely case. Link: https://lkml.kernel.org/r/20231023153405.GA4639@redhat.com Signed-off-by: Oleg Nesterov Cc: "Eric W. Biederman" Signed-off-by: Andrew Morton --- fs/proc/base.c | 20 ++++++++++++++------ 1 file changed, 14 insertions(+), 6 deletions(-) (limited to 'fs/proc') diff --git a/fs/proc/base.c b/fs/proc/base.c index 7779efda9fe2..ca4f0ce64df6 100644 --- a/fs/proc/base.c +++ b/fs/proc/base.c @@ -2973,7 +2973,6 @@ static const struct file_operations proc_coredump_filter_operations = { static int do_io_accounting(struct task_struct *task, struct seq_file *m, int whole) { struct task_io_accounting acct; - unsigned long flags; int result; result = down_read_killable(&task->signal->exec_update_lock); @@ -2985,15 +2984,24 @@ static int do_io_accounting(struct task_struct *task, struct seq_file *m, int wh goto out_unlock; } - if (whole && lock_task_sighand(task, &flags)) { + if (whole) { struct signal_struct *sig = task->signal; struct task_struct *t; + unsigned int seq = 1; + unsigned long flags; + + rcu_read_lock(); + do { + seq++; /* 2 on the 1st/lockless path, otherwise odd */ + flags = read_seqbegin_or_lock_irqsave(&sig->stats_lock, &seq); - acct = sig->ioac; - __for_each_thread(sig, t) - task_io_accounting_add(&acct, &t->ioac); + acct = sig->ioac; + __for_each_thread(sig, t) + task_io_accounting_add(&acct, &t->ioac); - unlock_task_sighand(task, &flags); + } while (need_seqretry(&sig->stats_lock, seq)); + done_seqretry_irqrestore(&sig->stats_lock, seq, flags); + rcu_read_unlock(); } else { acct = task->ioac; } -- cgit v1.2.3