From 24bc5ea5c01a7695a1308ac24435810855ec71c9 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Tue, 2 Dec 2025 10:05:10 +0100 Subject: [PATCH 1/2] seqlock, procfs: Match scoped_seqlock_read() critical section vs. RCU ordering in do_task_stat() to do_io_accounting() There's two patterns of taking the RCU read-lock and the sig->stats_lock read-seqlock in do_task_stat() and do_io_accounting(), with a different ordering: # do_io_accounting(): guard(rcu)(); scoped_seqlock_read (&sig->stats_lock, ss_lock_irqsave) { # do_task_stat(): scoped_seqlock_read (&sig->stats_lock, ss_lock_irqsave) { ... rcu_read_lock(); The ordering is RCU-read+seqlock_read in the first case, seqlock_read+RCU-read in the second case. While technically these read locks can be taken in any order, nevertheless it's good practice to use the more intrusive lock on the inside (which is the IRQs-off section in this case), and reduces head-scratching during review when done consistently, so let's use the do_io_accounting() pattern in do_task_stat(). This will also reduce irqs-off latencies in do_task_stat() a tiny bit. Signed-off-by: Ingo Molnar Acked-by: Oleg Nesterov Cc: Peter Zijlstra Cc: Linus Torvalds Cc: Thomas Gleixner Cc: Christian Brauner Cc: Al Viro Link: https://patch.msgid.link/aS6rwnaPbHFCdHp1@gmail.com --- fs/proc/array.c | 34 +++++++++++++++++----------------- 1 file changed, 17 insertions(+), 17 deletions(-) diff --git a/fs/proc/array.c b/fs/proc/array.c index cbd4bc4a58e4..42932f88141a 100644 --- a/fs/proc/array.c +++ b/fs/proc/array.c @@ -537,27 +537,27 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns, if (permitted && (!whole || num_threads < 2)) wchan = !task_is_running(task); - scoped_seqlock_read (&sig->stats_lock, ss_lock_irqsave) { - cmin_flt = sig->cmin_flt; - cmaj_flt = sig->cmaj_flt; - cutime = sig->cutime; - cstime = sig->cstime; - cgtime = sig->cgtime; + scoped_guard(rcu) { + scoped_seqlock_read (&sig->stats_lock, ss_lock_irqsave) { + cmin_flt = sig->cmin_flt; + cmaj_flt = sig->cmaj_flt; + cutime = sig->cutime; + cstime = sig->cstime; + cgtime = sig->cgtime; - if (whole) { - struct task_struct *t; + if (whole) { + struct task_struct *t; - min_flt = sig->min_flt; - maj_flt = sig->maj_flt; - gtime = sig->gtime; + min_flt = sig->min_flt; + maj_flt = sig->maj_flt; + gtime = sig->gtime; - rcu_read_lock(); - __for_each_thread(sig, t) { - min_flt += t->min_flt; - maj_flt += t->maj_flt; - gtime += task_gtime(t); + __for_each_thread(sig, t) { + min_flt += t->min_flt; + maj_flt += t->maj_flt; + gtime += task_gtime(t); + } } - rcu_read_unlock(); } } From 90dfeef1cd38dff19f8b3a752d13bfd79f0f7694 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Thu, 4 Dec 2025 11:43:32 +0100 Subject: [PATCH 2/2] seqlock: Cure some more scoped_seqlock() optimization fails Arnd reported an x86 randconfig using gcc-15 tripped over __scoped_seqlock_bug(). Turns out GCC chose not to inline the scoped_seqlock helper functions and as such was not able to optimize properly. [ mingo: Clang fails the build too in some circumstances. ] Reported-by: Arnd Bergmann Tested-by: Arnd Bergmann Signed-off-by: Peter Zijlstra (Intel) Signed-off-by: Ingo Molnar Cc: Oleg Nesterov Link: https://patch.msgid.link/20251204104332.GG2528459@noisy.programming.kicks-ass.net --- include/linux/seqlock.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h index a8a8661839b6..221123660e71 100644 --- a/include/linux/seqlock.h +++ b/include/linux/seqlock.h @@ -1224,7 +1224,7 @@ struct ss_tmp { spinlock_t *lock_irqsave; }; -static inline void __scoped_seqlock_cleanup(struct ss_tmp *sst) +static __always_inline void __scoped_seqlock_cleanup(struct ss_tmp *sst) { if (sst->lock) spin_unlock(sst->lock); @@ -1252,7 +1252,7 @@ static inline void __scoped_seqlock_bug(void) { } extern void __scoped_seqlock_bug(void); #endif -static inline void +static __always_inline void __scoped_seqlock_next(struct ss_tmp *sst, seqlock_t *lock, enum ss_state target) { switch (sst->state) {