From 7ad639840acf2800b5f387c495795f995a67a329 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Mon, 29 Nov 2021 13:06:43 +0000 Subject: thread_info: Add helpers to snapshot thread flags In there are helpers to manipulate individual thread flags, but where code wants to check several flags at once, it must open code reading current_thread_info()->flags and operating on a snapshot. As some flags can be set remotely it's necessary to use READ_ONCE() to get a consistent snapshot even when IRQs are disabled, but some code forgets to do this. Generally this is unlike to cause a problem in practice, but it is somewhat unsound, and KCSAN will legitimately warn that there is a data race. To make it easier to do the right thing, and to highlight that concurrent modification is possible, add new helpers to snapshot the flags, which should be used in preference to plain reads. Subsequent patches will move existing code to use the new helpers. Signed-off-by: Mark Rutland Signed-off-by: Thomas Gleixner Reviewed-by: Thomas Gleixner Acked-by: Marco Elver Acked-by: Paul E. McKenney Cc: Boqun Feng Cc: Dmitry Vyukov Cc: Peter Zijlstra Cc: Will Deacon Link: https://lore.kernel.org/r/20211129130653.2037928-2-mark.rutland@arm.com --- include/linux/thread_info.h | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h index ad0c4e041030..73a6f34b3847 100644 --- a/include/linux/thread_info.h +++ b/include/linux/thread_info.h @@ -118,6 +118,15 @@ static inline int test_ti_thread_flag(struct thread_info *ti, int flag) return test_bit(flag, (unsigned long *)&ti->flags); } +/* + * This may be used in noinstr code, and needs to be __always_inline to prevent + * inadvertent instrumentation. + */ +static __always_inline unsigned long read_ti_thread_flags(struct thread_info *ti) +{ + return READ_ONCE(ti->flags); +} + #define set_thread_flag(flag) \ set_ti_thread_flag(current_thread_info(), flag) #define clear_thread_flag(flag) \ @@ -130,6 +139,11 @@ static inline int test_ti_thread_flag(struct thread_info *ti, int flag) test_and_clear_ti_thread_flag(current_thread_info(), flag) #define test_thread_flag(flag) \ test_ti_thread_flag(current_thread_info(), flag) +#define read_thread_flags() \ + read_ti_thread_flags(current_thread_info()) + +#define read_task_thread_flags(t) \ + read_ti_thread_flags(task_thread_info(t)) #ifdef CONFIG_GENERIC_ENTRY #define set_syscall_work(fl) \ -- cgit v1.2.3 From dca99fb643a2e9bc2e67a0f626b09d4f177f0f09 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Mon, 29 Nov 2021 13:06:53 +0000 Subject: x86: Snapshot thread flags Some thread flags can be set remotely, and so even when IRQs are disabled, the flags can change under our feet. Generally this is unlikely to cause a problem in practice, but it is somewhat unsound, and KCSAN will legitimately warn that there is a data race. To avoid such issues, a snapshot of the flags has to be taken prior to using them. Some places already use READ_ONCE() for that, others do not. Convert them all to the new flag accessor helpers. Signed-off-by: Mark Rutland Signed-off-by: Thomas Gleixner Reviewed-by: Thomas Gleixner Acked-by: Paul E. McKenney Link: https://lore.kernel.org/r/20211129130653.2037928-12-mark.rutland@arm.com --- arch/x86/kernel/process.c | 8 ++++---- arch/x86/kernel/process.h | 4 ++-- arch/x86/mm/tlb.c | 2 +- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c index 04143a653a8a..5d481038fe0b 100644 --- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c @@ -365,7 +365,7 @@ void arch_setup_new_exec(void) clear_thread_flag(TIF_SSBD); task_clear_spec_ssb_disable(current); task_clear_spec_ssb_noexec(current); - speculation_ctrl_update(task_thread_info(current)->flags); + speculation_ctrl_update(read_thread_flags()); } } @@ -617,7 +617,7 @@ static unsigned long speculation_ctrl_update_tif(struct task_struct *tsk) clear_tsk_thread_flag(tsk, TIF_SPEC_IB); } /* Return the updated threadinfo flags*/ - return task_thread_info(tsk)->flags; + return read_task_thread_flags(tsk); } void speculation_ctrl_update(unsigned long tif) @@ -653,8 +653,8 @@ void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p) { unsigned long tifp, tifn; - tifn = READ_ONCE(task_thread_info(next_p)->flags); - tifp = READ_ONCE(task_thread_info(prev_p)->flags); + tifn = read_task_thread_flags(next_p); + tifp = read_task_thread_flags(prev_p); switch_to_bitmap(tifp); diff --git a/arch/x86/kernel/process.h b/arch/x86/kernel/process.h index 1d0797b2338a..76b547b83232 100644 --- a/arch/x86/kernel/process.h +++ b/arch/x86/kernel/process.h @@ -13,8 +13,8 @@ void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p); static inline void switch_to_extra(struct task_struct *prev, struct task_struct *next) { - unsigned long next_tif = task_thread_info(next)->flags; - unsigned long prev_tif = task_thread_info(prev)->flags; + unsigned long next_tif = read_task_thread_flags(next); + unsigned long prev_tif = read_task_thread_flags(prev); if (IS_ENABLED(CONFIG_SMP)) { /* diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c index 59ba2968af1b..92bb03b9ceb5 100644 --- a/arch/x86/mm/tlb.c +++ b/arch/x86/mm/tlb.c @@ -361,7 +361,7 @@ static void l1d_flush_evaluate(unsigned long prev_mm, unsigned long next_mm, static unsigned long mm_mangle_tif_spec_bits(struct task_struct *next) { - unsigned long next_tif = task_thread_info(next)->flags; + unsigned long next_tif = read_task_thread_flags(next); unsigned long spec_bits = (next_tif >> TIF_SPEC_IB) & LAST_USER_MM_SPEC_MASK; /* -- cgit v1.2.3 From 6ce895128b3bff738fe8d9dd74747a03e319e466 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Mon, 29 Nov 2021 13:06:44 +0000 Subject: entry: Snapshot thread flags Some thread flags can be set remotely, and so even when IRQs are disabled, the flags can change under our feet. Generally this is unlikely to cause a problem in practice, but it is somewhat unsound, and KCSAN will legitimately warn that there is a data race. To avoid such issues, a snapshot of the flags has to be taken prior to using them. Some places already use READ_ONCE() for that, others do not. Convert them all to the new flag accessor helpers. Signed-off-by: Mark Rutland Signed-off-by: Thomas Gleixner Acked-by: Paul E. McKenney Link: https://lore.kernel.org/r/20211129130653.2037928-3-mark.rutland@arm.com --- include/linux/entry-kvm.h | 2 +- kernel/entry/common.c | 4 ++-- kernel/entry/kvm.c | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/include/linux/entry-kvm.h b/include/linux/entry-kvm.h index 0d7865a0731c..07c878d6e323 100644 --- a/include/linux/entry-kvm.h +++ b/include/linux/entry-kvm.h @@ -75,7 +75,7 @@ static inline void xfer_to_guest_mode_prepare(void) */ static inline bool __xfer_to_guest_mode_work_pending(void) { - unsigned long ti_work = READ_ONCE(current_thread_info()->flags); + unsigned long ti_work = read_thread_flags(); return !!(ti_work & XFER_TO_GUEST_MODE_WORK); } diff --git a/kernel/entry/common.c b/kernel/entry/common.c index d5a61d565ad5..bad713684c2e 100644 --- a/kernel/entry/common.c +++ b/kernel/entry/common.c @@ -187,7 +187,7 @@ static unsigned long exit_to_user_mode_loop(struct pt_regs *regs, /* Check if any of the above work has queued a deferred wakeup */ tick_nohz_user_enter_prepare(); - ti_work = READ_ONCE(current_thread_info()->flags); + ti_work = read_thread_flags(); } /* Return the latest work state for arch_exit_to_user_mode() */ @@ -196,7 +196,7 @@ static unsigned long exit_to_user_mode_loop(struct pt_regs *regs, static void exit_to_user_mode_prepare(struct pt_regs *regs) { - unsigned long ti_work = READ_ONCE(current_thread_info()->flags); + unsigned long ti_work = read_thread_flags(); lockdep_assert_irqs_disabled(); diff --git a/kernel/entry/kvm.c b/kernel/entry/kvm.c index 49972ee99aff..96d476e06c77 100644 --- a/kernel/entry/kvm.c +++ b/kernel/entry/kvm.c @@ -26,7 +26,7 @@ static int xfer_to_guest_mode_work(struct kvm_vcpu *vcpu, unsigned long ti_work) if (ret) return ret; - ti_work = READ_ONCE(current_thread_info()->flags); + ti_work = read_thread_flags(); } while (ti_work & XFER_TO_GUEST_MODE_WORK || need_resched()); return 0; } @@ -43,7 +43,7 @@ int xfer_to_guest_mode_handle_work(struct kvm_vcpu *vcpu) * disabled in the inner loop before going into guest mode. No need * to disable interrupts here. */ - ti_work = READ_ONCE(current_thread_info()->flags); + ti_work = read_thread_flags(); if (!(ti_work & XFER_TO_GUEST_MODE_WORK)) return 0; -- cgit v1.2.3 From 0569b245132c40015281610353935a50e282eb94 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Mon, 29 Nov 2021 13:06:45 +0000 Subject: sched: Snapshot thread flags Some thread flags can be set remotely, and so even when IRQs are disabled, the flags can change under our feet. Generally this is unlikely to cause a problem in practice, but it is somewhat unsound, and KCSAN will legitimately warn that there is a data race. To avoid such issues, a snapshot of the flags has to be taken prior to using them. Some places already use READ_ONCE() for that, others do not. Convert them all to the new flag accessor helpers. The READ_ONCE(ti->flags) .. cmpxchg(ti->flags) loop in set_nr_if_polling() is left as-is for clarity. Signed-off-by: Mark Rutland Signed-off-by: Thomas Gleixner Acked-by: Paul E. McKenney Cc: Juri Lelli Cc: Vincent Guittot Link: https://lore.kernel.org/r/20211129130653.2037928-4-mark.rutland@arm.com --- kernel/sched/core.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 76f9deeaa942..704262798fb7 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -8520,7 +8520,7 @@ void sched_show_task(struct task_struct *p) rcu_read_unlock(); pr_cont(" stack:%5lu pid:%5d ppid:%6d flags:0x%08lx\n", free, task_pid_nr(p), ppid, - (unsigned long)task_thread_info(p)->flags); + read_task_thread_flags(p)); print_worker_info(KERN_INFO, p); print_stop_info(KERN_INFO, p); -- cgit v1.2.3 From 7fb2b24bb5c5876a3205cb5b9a580f81e8c9f744 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Mon, 29 Nov 2021 13:06:46 +0000 Subject: alpha: Snapshot thread flags Some thread flags can be set remotely, and so even when IRQs are disabled, the flags can change under our feet. Generally this is unlikely to cause a problem in practice, but it is somewhat unsound, and KCSAN will legitimately warn that there is a data race. To avoid such issues, a snapshot of the flags has to be taken prior to using them. Some places already use READ_ONCE() for that, others do not. Convert them all to the new flag accessor helpers. Signed-off-by: Mark Rutland Signed-off-by: Thomas Gleixner Acked-by: Paul E. McKenney Cc: Ivan Kokshaysky Cc: Matt Turner Cc: Richard Henderson Link: https://lore.kernel.org/r/20211129130653.2037928-5-mark.rutland@arm.com --- arch/alpha/kernel/signal.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/alpha/kernel/signal.c b/arch/alpha/kernel/signal.c index bc077babafab..d8ed71d5bed3 100644 --- a/arch/alpha/kernel/signal.c +++ b/arch/alpha/kernel/signal.c @@ -535,6 +535,6 @@ do_work_pending(struct pt_regs *regs, unsigned long thread_flags, } } local_irq_disable(); - thread_flags = current_thread_info()->flags; + thread_flags = read_thread_flags(); } while (thread_flags & _TIF_WORK_MASK); } -- cgit v1.2.3 From 050e22bfc4f450d342dbb7eaea470858ad567bce Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Mon, 29 Nov 2021 13:06:47 +0000 Subject: ARM: Snapshot thread flags Some thread flags can be set remotely, and so even when IRQs are disabled, the flags can change under our feet. Generally this is unlikely to cause a problem in practice, but it is somewhat unsound, and KCSAN will legitimately warn that there is a data race. To avoid such issues, a snapshot of the flags has to be taken prior to using them. Some places already use READ_ONCE() for that, others do not. Convert them all to the new flag accessor helpers. Signed-off-by: Mark Rutland Signed-off-by: Thomas Gleixner Acked-by: Paul E. McKenney Cc: Russell King Link: https://lore.kernel.org/r/20211129130653.2037928-6-mark.rutland@arm.com --- arch/arm/kernel/signal.c | 2 +- arch/arm/mm/alignment.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c index a41e27ace391..c532a6041066 100644 --- a/arch/arm/kernel/signal.c +++ b/arch/arm/kernel/signal.c @@ -631,7 +631,7 @@ do_work_pending(struct pt_regs *regs, unsigned int thread_flags, int syscall) } } local_irq_disable(); - thread_flags = current_thread_info()->flags; + thread_flags = read_thread_flags(); } while (thread_flags & _TIF_WORK_MASK); return 0; } diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c index ea81e89e7740..adbb3817d0be 100644 --- a/arch/arm/mm/alignment.c +++ b/arch/arm/mm/alignment.c @@ -990,7 +990,7 @@ do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs) * there is no work pending for this thread. */ raw_local_irq_disable(); - if (!(current_thread_info()->flags & _TIF_WORK_MASK)) + if (!(read_thread_flags() & _TIF_WORK_MASK)) set_cr(cr_no_alignment); } -- cgit v1.2.3 From 342b3808786518ced347f40b59bae68664e20007 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Mon, 29 Nov 2021 13:06:48 +0000 Subject: arm64: Snapshot thread flags Some thread flags can be set remotely, and so even when IRQs are disabled, the flags can change under our feet. Generally this is unlikely to cause a problem in practice, but it is somewhat unsound, and KCSAN will legitimately warn that there is a data race. To avoid such issues, a snapshot of the flags has to be taken prior to using them. Some places already use READ_ONCE() for that, others do not. Convert them all to the new flag accessor helpers. Signed-off-by: Mark Rutland Signed-off-by: Thomas Gleixner Acked-by: Will Deacon Acked-by: Paul E. McKenney Cc: Catalin Marinas Link: https://lore.kernel.org/r/20211129130653.2037928-7-mark.rutland@arm.com --- arch/arm64/kernel/entry-common.c | 2 +- arch/arm64/kernel/ptrace.c | 4 ++-- arch/arm64/kernel/signal.c | 2 +- arch/arm64/kernel/syscall.c | 4 ++-- 4 files changed, 6 insertions(+), 6 deletions(-) diff --git a/arch/arm64/kernel/entry-common.c b/arch/arm64/kernel/entry-common.c index f7408edf8571..ef7fcefb96bd 100644 --- a/arch/arm64/kernel/entry-common.c +++ b/arch/arm64/kernel/entry-common.c @@ -129,7 +129,7 @@ static __always_inline void prepare_exit_to_user_mode(struct pt_regs *regs) local_daif_mask(); - flags = READ_ONCE(current_thread_info()->flags); + flags = read_thread_flags(); if (unlikely(flags & _TIF_WORK_MASK)) do_notify_resume(regs, flags); } diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c index 88a9034fb9b5..33cac3d75150 100644 --- a/arch/arm64/kernel/ptrace.c +++ b/arch/arm64/kernel/ptrace.c @@ -1839,7 +1839,7 @@ static void tracehook_report_syscall(struct pt_regs *regs, int syscall_trace_enter(struct pt_regs *regs) { - unsigned long flags = READ_ONCE(current_thread_info()->flags); + unsigned long flags = read_thread_flags(); if (flags & (_TIF_SYSCALL_EMU | _TIF_SYSCALL_TRACE)) { tracehook_report_syscall(regs, PTRACE_SYSCALL_ENTER); @@ -1862,7 +1862,7 @@ int syscall_trace_enter(struct pt_regs *regs) void syscall_trace_exit(struct pt_regs *regs) { - unsigned long flags = READ_ONCE(current_thread_info()->flags); + unsigned long flags = read_thread_flags(); audit_syscall_exit(regs); diff --git a/arch/arm64/kernel/signal.c b/arch/arm64/kernel/signal.c index 8f6372b44b65..d8aaf4b6f432 100644 --- a/arch/arm64/kernel/signal.c +++ b/arch/arm64/kernel/signal.c @@ -948,7 +948,7 @@ void do_notify_resume(struct pt_regs *regs, unsigned long thread_flags) } local_daif_mask(); - thread_flags = READ_ONCE(current_thread_info()->flags); + thread_flags = read_thread_flags(); } while (thread_flags & _TIF_WORK_MASK); } diff --git a/arch/arm64/kernel/syscall.c b/arch/arm64/kernel/syscall.c index 50a0f1a38e84..c938603b3ba0 100644 --- a/arch/arm64/kernel/syscall.c +++ b/arch/arm64/kernel/syscall.c @@ -81,7 +81,7 @@ void syscall_trace_exit(struct pt_regs *regs); static void el0_svc_common(struct pt_regs *regs, int scno, int sc_nr, const syscall_fn_t syscall_table[]) { - unsigned long flags = current_thread_info()->flags; + unsigned long flags = read_thread_flags(); regs->orig_x0 = regs->regs[0]; regs->syscallno = scno; @@ -148,7 +148,7 @@ static void el0_svc_common(struct pt_regs *regs, int scno, int sc_nr, */ if (!has_syscall_work(flags) && !IS_ENABLED(CONFIG_DEBUG_RSEQ)) { local_daif_mask(); - flags = current_thread_info()->flags; + flags = read_thread_flags(); if (!has_syscall_work(flags) && !(flags & _TIF_SINGLESTEP)) return; local_daif_restore(DAIF_PROCCTX); -- cgit v1.2.3 From e538c5849143a7f0aa97006cd45ce4c0c26d0744 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Mon, 29 Nov 2021 13:06:49 +0000 Subject: microblaze: Snapshot thread flags Some thread flags can be set remotely, and so even when IRQs are disabled, the flags can change under our feet. Generally this is unlikely to cause a problem in practice, but it is somewhat unsound, and KCSAN will legitimately warn that there is a data race. To avoid such issues, a snapshot of the flags has to be taken prior to using them. Some places already use READ_ONCE() for that, others do not. Convert them all to the new flag accessor helpers. Signed-off-by: Mark Rutland Signed-off-by: Thomas Gleixner Tested-by: Michal Simek Acked-by: Paul E. McKenney Link: https://lore.kernel.org/r/20211129130653.2037928-8-mark.rutland@arm.com --- arch/microblaze/kernel/signal.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/microblaze/kernel/signal.c b/arch/microblaze/kernel/signal.c index fc61eb0eb8dd..23e8a9336a29 100644 --- a/arch/microblaze/kernel/signal.c +++ b/arch/microblaze/kernel/signal.c @@ -283,7 +283,7 @@ static void do_signal(struct pt_regs *regs, int in_syscall) #ifdef DEBUG_SIG pr_info("do signal: %p %d\n", regs, in_syscall); pr_info("do signal2: %lx %lx %ld [%lx]\n", regs->pc, regs->r1, - regs->r12, current_thread_info()->flags); + regs->r12, read_thread_flags()); #endif if (get_signal(&ksig)) { -- cgit v1.2.3 From 4ea7ce0a79b9450b71b9a88f9f5adbfe2bc3f2e5 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Mon, 29 Nov 2021 13:06:50 +0000 Subject: openrisc: Snapshot thread flags Some thread flags can be set remotely, and so even when IRQs are disabled, the flags can change under our feet. Generally this is unlikely to cause a problem in practice, but it is somewhat unsound, and KCSAN will legitimately warn that there is a data race. To avoid such issues, a snapshot of the flags has to be taken prior to using them. Some places already use READ_ONCE() for that, others do not. Convert them all to the new flag accessor helpers. Signed-off-by: Mark Rutland Signed-off-by: Thomas Gleixner Acked-by: Stafford Horne Acked-by: Paul E. McKenney Cc: Jonas Bonn Cc: Stefan Kristiansson Link: https://lore.kernel.org/r/20211129130653.2037928-9-mark.rutland@arm.com --- arch/openrisc/kernel/signal.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/openrisc/kernel/signal.c b/arch/openrisc/kernel/signal.c index 99516c9191c7..92c5b70740f5 100644 --- a/arch/openrisc/kernel/signal.c +++ b/arch/openrisc/kernel/signal.c @@ -313,7 +313,7 @@ do_work_pending(struct pt_regs *regs, unsigned int thread_flags, int syscall) } } local_irq_disable(); - thread_flags = current_thread_info()->flags; + thread_flags = read_thread_flags(); } while (thread_flags & _TIF_WORK_MASK); return 0; } -- cgit v1.2.3 From 08b0af5b2affbe7419853e8dd1330e4b3fe27125 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Mon, 29 Nov 2021 13:06:51 +0000 Subject: powerpc: Avoid discarding flags in system_call_exception() Some thread flags can be set remotely, and so even when IRQs are disabled, the flags can change under our feet. Thus, when setting flags we must use an atomic operation rather than a plain read-modify-write sequence, as a plain read-modify-write may discard flags which are concurrently set by a remote thread, e.g. // task A // task B tmp = A->thread_info.flags; set_tsk_thread_flag(A, NEWFLAG_B); tmp |= NEWFLAG_A; A->thread_info.flags = tmp; arch/powerpc/kernel/interrupt.c's system_call_exception() sets _TIF_RESTOREALL in the thread info flags with a read-modify-write, which may result in other flags being discarded. Elsewhere in the file it uses clear_bits() to atomically remove flag bits, so use set_bits() here for consistency with those. There may be reasons (e.g. instrumentation) that prevent the use of set_thread_flag() and clear_thread_flag() here, which would otherwise be preferable. Fixes: ae7aaecc3f2f78b7 ("powerpc/64s: system call rfscv workaround for TM bugs") Signed-off-by: Mark Rutland Signed-off-by: Thomas Gleixner Cc: Eirik Fuller Cc: Michael Ellerman Cc: Nicholas Piggin Link: https://lore.kernel.org/r/20211129130653.2037928-10-mark.rutland@arm.com --- arch/powerpc/kernel/interrupt.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/powerpc/kernel/interrupt.c b/arch/powerpc/kernel/interrupt.c index 835b626cd476..df048e331cbf 100644 --- a/arch/powerpc/kernel/interrupt.c +++ b/arch/powerpc/kernel/interrupt.c @@ -148,7 +148,7 @@ notrace long system_call_exception(long r3, long r4, long r5, */ if (IS_ENABLED(CONFIG_PPC_TRANSACTIONAL_MEM) && unlikely(MSR_TM_TRANSACTIONAL(regs->msr))) - current_thread_info()->flags |= _TIF_RESTOREALL; + set_bits(_TIF_RESTOREALL, ¤t_thread_info()->flags); /* * If the system call was made with a transaction active, doom it and -- cgit v1.2.3 From 985faa78687de6e583cfd8b8094d87dcb80c33a6 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Mon, 29 Nov 2021 13:06:52 +0000 Subject: powerpc: Snapshot thread flags Some thread flags can be set remotely, and so even when IRQs are disabled, the flags can change under our feet. Generally this is unlikely to cause a problem in practice, but it is somewhat unsound, and KCSAN will legitimately warn that there is a data race. To avoid such issues, a snapshot of the flags has to be taken prior to using them. Some places already use READ_ONCE() for that, others do not. Convert them all to the new flag accessor helpers. Signed-off-by: Mark Rutland Signed-off-by: Thomas Gleixner Acked-by: Paul E. McKenney Cc: Benjamin Herrenschmidt Cc: Michael Ellerman Cc: Paul Mackerras Link: https://lore.kernel.org/r/20211129130653.2037928-11-mark.rutland@arm.com --- arch/powerpc/kernel/interrupt.c | 13 ++++++------- arch/powerpc/kernel/ptrace/ptrace.c | 3 +-- 2 files changed, 7 insertions(+), 9 deletions(-) diff --git a/arch/powerpc/kernel/interrupt.c b/arch/powerpc/kernel/interrupt.c index df048e331cbf..563ebfcd16e2 100644 --- a/arch/powerpc/kernel/interrupt.c +++ b/arch/powerpc/kernel/interrupt.c @@ -181,7 +181,7 @@ notrace long system_call_exception(long r3, long r4, long r5, local_irq_enable(); - if (unlikely(current_thread_info()->flags & _TIF_SYSCALL_DOTRACE)) { + if (unlikely(read_thread_flags() & _TIF_SYSCALL_DOTRACE)) { if (unlikely(trap_is_unsupported_scv(regs))) { /* Unsupported scv vector */ _exception(SIGILL, regs, ILL_ILLOPC, regs->nip); @@ -343,7 +343,7 @@ interrupt_exit_user_prepare_main(unsigned long ret, struct pt_regs *regs) unsigned long ti_flags; again: - ti_flags = READ_ONCE(current_thread_info()->flags); + ti_flags = read_thread_flags(); while (unlikely(ti_flags & (_TIF_USER_WORK_MASK & ~_TIF_RESTORE_TM))) { local_irq_enable(); if (ti_flags & _TIF_NEED_RESCHED) { @@ -359,7 +359,7 @@ again: do_notify_resume(regs, ti_flags); } local_irq_disable(); - ti_flags = READ_ONCE(current_thread_info()->flags); + ti_flags = read_thread_flags(); } if (IS_ENABLED(CONFIG_PPC_BOOK3S_64) && IS_ENABLED(CONFIG_PPC_FPU)) { @@ -437,7 +437,7 @@ notrace unsigned long syscall_exit_prepare(unsigned long r3, /* Check whether the syscall is issued inside a restartable sequence */ rseq_syscall(regs); - ti_flags = current_thread_info()->flags; + ti_flags = read_thread_flags(); if (unlikely(r3 >= (unsigned long)-MAX_ERRNO) && is_not_scv) { if (likely(!(ti_flags & (_TIF_NOERROR | _TIF_RESTOREALL)))) { @@ -532,8 +532,7 @@ notrace unsigned long interrupt_exit_kernel_prepare(struct pt_regs *regs) unsigned long flags; unsigned long ret = 0; unsigned long kuap; - bool stack_store = current_thread_info()->flags & - _TIF_EMULATE_STACK_STORE; + bool stack_store = read_thread_flags() & _TIF_EMULATE_STACK_STORE; if (regs_is_unrecoverable(regs)) unrecoverable_exception(regs); @@ -554,7 +553,7 @@ notrace unsigned long interrupt_exit_kernel_prepare(struct pt_regs *regs) again: if (IS_ENABLED(CONFIG_PREEMPT)) { /* Return to preemptible kernel context */ - if (unlikely(current_thread_info()->flags & _TIF_NEED_RESCHED)) { + if (unlikely(read_thread_flags() & _TIF_NEED_RESCHED)) { if (preempt_count() == 0) preempt_schedule_irq(); } diff --git a/arch/powerpc/kernel/ptrace/ptrace.c b/arch/powerpc/kernel/ptrace/ptrace.c index 7c7093c17c45..c43f77e2ac31 100644 --- a/arch/powerpc/kernel/ptrace/ptrace.c +++ b/arch/powerpc/kernel/ptrace/ptrace.c @@ -260,8 +260,7 @@ long do_syscall_trace_enter(struct pt_regs *regs) { u32 flags; - flags = READ_ONCE(current_thread_info()->flags) & - (_TIF_SYSCALL_EMU | _TIF_SYSCALL_TRACE); + flags = read_thread_flags() & (_TIF_SYSCALL_EMU | _TIF_SYSCALL_TRACE); if (flags) { int rc = tracehook_report_syscall_entry(regs); -- cgit v1.2.3