summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2020-05-04 15:25:30 +0200
committerThomas Gleixner <tglx@linutronix.de>2020-05-04 15:25:30 +0200
commita8644aebec1152b992d5d54a05a538d338262944 (patch)
tree5c0084fbcaf77545497d0ca92bc033693b185653
parent9e96e646e6f890308110cb7c81f200f5211cf082 (diff)
parent3b02a051d25d9600e9d403ad3043aed7de00160e (diff)
Merge branch 'locking/kcsan' into x86/entryentry-base
Pull the pending KCSAN changes as the required changes vs. atomics to fixup INT3 interfere with the KCSAN modifications of atomics.
-rw-r--r--Documentation/dev-tools/index.rst1
-rw-r--r--Documentation/dev-tools/kcsan.rst266
-rw-r--r--MAINTAINERS11
-rw-r--r--Makefile3
-rw-r--r--arch/x86/Kconfig1
-rw-r--r--arch/x86/boot/Makefile2
-rw-r--r--arch/x86/boot/compressed/Makefile2
-rw-r--r--arch/x86/entry/vdso/Makefile6
-rw-r--r--arch/x86/include/asm/bitops.h6
-rw-r--r--arch/x86/kernel/Makefile4
-rw-r--r--arch/x86/kernel/cpu/Makefile3
-rw-r--r--arch/x86/kernel/e820.c10
-rw-r--r--arch/x86/lib/Makefile9
-rw-r--r--arch/x86/mm/Makefile4
-rw-r--r--arch/x86/purgatory/.gitignore1
-rw-r--r--arch/x86/purgatory/Makefile21
-rw-r--r--arch/x86/realmode/Makefile3
-rw-r--r--arch/x86/realmode/rm/Makefile3
-rw-r--r--drivers/firmware/efi/libstub/Makefile2
-rw-r--r--include/asm-generic/atomic-instrumented.h711
-rw-r--r--include/asm-generic/atomic-long.h331
-rw-r--r--include/asm-generic/bitops/instrumented-atomic.h14
-rw-r--r--include/asm-generic/bitops/instrumented-lock.h10
-rw-r--r--include/asm-generic/bitops/instrumented-non-atomic.h16
-rw-r--r--include/linux/atomic-fallback.h340
-rw-r--r--include/linux/compiler-clang.h11
-rw-r--r--include/linux/compiler-gcc.h6
-rw-r--r--include/linux/compiler.h60
-rw-r--r--include/linux/instrumented.h109
-rw-r--r--include/linux/kcsan-checks.h251
-rw-r--r--include/linux/kcsan.h72
-rw-r--r--include/linux/sched.h4
-rw-r--r--include/linux/seqlock.h51
-rw-r--r--include/linux/uaccess.h14
-rw-r--r--init/init_task.c9
-rw-r--r--init/main.c2
-rw-r--r--kernel/Makefile6
-rw-r--r--kernel/kcsan/Makefile14
-rw-r--r--kernel/kcsan/atomic.h27
-rw-r--r--kernel/kcsan/core.c730
-rw-r--r--kernel/kcsan/debugfs.c326
-rw-r--r--kernel/kcsan/encoding.h95
-rw-r--r--kernel/kcsan/kcsan.h138
-rw-r--r--kernel/kcsan/report.c515
-rw-r--r--kernel/kcsan/test.c131
-rw-r--r--kernel/locking/Makefile3
-rw-r--r--kernel/sched/Makefile6
-rw-r--r--kernel/trace/Makefile3
-rw-r--r--lib/Kconfig.debug2
-rw-r--r--lib/Kconfig.kcsan156
-rw-r--r--lib/Makefile4
-rw-r--r--lib/iov_iter.c7
-rw-r--r--lib/usercopy.c7
-rw-r--r--mm/Makefile8
-rw-r--r--scripts/Makefile.kcsan6
-rw-r--r--scripts/Makefile.lib10
-rwxr-xr-xscripts/atomic/fallbacks/acquire2
-rwxr-xr-xscripts/atomic/fallbacks/add_negative2
-rwxr-xr-xscripts/atomic/fallbacks/add_unless2
-rwxr-xr-xscripts/atomic/fallbacks/andnot2
-rwxr-xr-xscripts/atomic/fallbacks/dec2
-rwxr-xr-xscripts/atomic/fallbacks/dec_and_test2
-rwxr-xr-xscripts/atomic/fallbacks/dec_if_positive2
-rwxr-xr-xscripts/atomic/fallbacks/dec_unless_positive2
-rwxr-xr-xscripts/atomic/fallbacks/fence2
-rwxr-xr-xscripts/atomic/fallbacks/fetch_add_unless2
-rwxr-xr-xscripts/atomic/fallbacks/inc2
-rwxr-xr-xscripts/atomic/fallbacks/inc_and_test2
-rwxr-xr-xscripts/atomic/fallbacks/inc_not_zero2
-rwxr-xr-xscripts/atomic/fallbacks/inc_unless_negative2
-rwxr-xr-xscripts/atomic/fallbacks/read_acquire2
-rwxr-xr-xscripts/atomic/fallbacks/release2
-rwxr-xr-xscripts/atomic/fallbacks/set_release2
-rwxr-xr-xscripts/atomic/fallbacks/sub_and_test2
-rwxr-xr-xscripts/atomic/fallbacks/try_cmpxchg2
-rwxr-xr-xscripts/atomic/gen-atomic-fallback.sh2
-rwxr-xr-xscripts/atomic/gen-atomic-instrumented.sh9
-rwxr-xr-xscripts/atomic/gen-atomic-long.sh3
-rw-r--r--tools/objtool/check.c18
79 files changed, 3866 insertions, 767 deletions
diff --git a/Documentation/dev-tools/index.rst b/Documentation/dev-tools/index.rst
index 09dee10d2592..f7809c7b1ba9 100644
--- a/Documentation/dev-tools/index.rst
+++ b/Documentation/dev-tools/index.rst
@@ -21,6 +21,7 @@ whole; patches welcome!
kasan
ubsan
kmemleak
+ kcsan
gdb-kernel-debugging
kgdb
kselftest
diff --git a/Documentation/dev-tools/kcsan.rst b/Documentation/dev-tools/kcsan.rst
new file mode 100644
index 000000000000..65a0be513b7d
--- /dev/null
+++ b/Documentation/dev-tools/kcsan.rst
@@ -0,0 +1,266 @@
+The Kernel Concurrency Sanitizer (KCSAN)
+========================================
+
+Overview
+--------
+
+*Kernel Concurrency Sanitizer (KCSAN)* is a dynamic data race detector for
+kernel space. KCSAN is a sampling watchpoint-based data race detector. Key
+priorities in KCSAN's design are lack of false positives, scalability, and
+simplicity. More details can be found in `Implementation Details`_.
+
+KCSAN uses compile-time instrumentation to instrument memory accesses. KCSAN is
+supported in both GCC and Clang. With GCC it requires version 7.3.0 or later.
+With Clang it requires version 7.0.0 or later.
+
+Usage
+-----
+
+To enable KCSAN configure kernel with::
+
+ CONFIG_KCSAN = y
+
+KCSAN provides several other configuration options to customize behaviour (see
+their respective help text for more info).
+
+Error reports
+~~~~~~~~~~~~~
+
+A typical data race report looks like this::
+
+ ==================================================================
+ BUG: KCSAN: data-race in generic_permission / kernfs_refresh_inode
+
+ write to 0xffff8fee4c40700c of 4 bytes by task 175 on cpu 4:
+ kernfs_refresh_inode+0x70/0x170
+ kernfs_iop_permission+0x4f/0x90
+ inode_permission+0x190/0x200
+ link_path_walk.part.0+0x503/0x8e0
+ path_lookupat.isra.0+0x69/0x4d0
+ filename_lookup+0x136/0x280
+ user_path_at_empty+0x47/0x60
+ vfs_statx+0x9b/0x130
+ __do_sys_newlstat+0x50/0xb0
+ __x64_sys_newlstat+0x37/0x50
+ do_syscall_64+0x85/0x260
+ entry_SYSCALL_64_after_hwframe+0x44/0xa9
+
+ read to 0xffff8fee4c40700c of 4 bytes by task 166 on cpu 6:
+ generic_permission+0x5b/0x2a0
+ kernfs_iop_permission+0x66/0x90
+ inode_permission+0x190/0x200
+ link_path_walk.part.0+0x503/0x8e0
+ path_lookupat.isra.0+0x69/0x4d0
+ filename_lookup+0x136/0x280
+ user_path_at_empty+0x47/0x60
+ do_faccessat+0x11a/0x390
+ __x64_sys_access+0x3c/0x50
+ do_syscall_64+0x85/0x260
+ entry_SYSCALL_64_after_hwframe+0x44/0xa9
+
+ Reported by Kernel Concurrency Sanitizer on:
+ CPU: 6 PID: 166 Comm: systemd-journal Not tainted 5.3.0-rc7+ #1
+ Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.12.0-1 04/01/2014
+ ==================================================================
+
+The header of the report provides a short summary of the functions involved in
+the race. It is followed by the access types and stack traces of the 2 threads
+involved in the data race.
+
+The other less common type of data race report looks like this::
+
+ ==================================================================
+ BUG: KCSAN: data-race in e1000_clean_rx_irq+0x551/0xb10
+
+ race at unknown origin, with read to 0xffff933db8a2ae6c of 1 bytes by interrupt on cpu 0:
+ e1000_clean_rx_irq+0x551/0xb10
+ e1000_clean+0x533/0xda0
+ net_rx_action+0x329/0x900
+ __do_softirq+0xdb/0x2db
+ irq_exit+0x9b/0xa0
+ do_IRQ+0x9c/0xf0
+ ret_from_intr+0x0/0x18
+ default_idle+0x3f/0x220
+ arch_cpu_idle+0x21/0x30
+ do_idle+0x1df/0x230
+ cpu_startup_entry+0x14/0x20
+ rest_init+0xc5/0xcb
+ arch_call_rest_init+0x13/0x2b
+ start_kernel+0x6db/0x700
+
+ Reported by Kernel Concurrency Sanitizer on:
+ CPU: 0 PID: 0 Comm: swapper/0 Not tainted 5.3.0-rc7+ #2
+ Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.12.0-1 04/01/2014
+ ==================================================================
+
+This report is generated where it was not possible to determine the other
+racing thread, but a race was inferred due to the data value of the watched
+memory location having changed. These can occur either due to missing
+instrumentation or e.g. DMA accesses.
+
+Selective analysis
+~~~~~~~~~~~~~~~~~~
+
+It may be desirable to disable data race detection for specific accesses,
+functions, compilation units, or entire subsystems. For static blacklisting,
+the below options are available:
+
+* KCSAN understands the ``data_race(expr)`` annotation, which tells KCSAN that
+ any data races due to accesses in ``expr`` should be ignored and resulting
+ behaviour when encountering a data race is deemed safe.
+
+* Disabling data race detection for entire functions can be accomplished by
+ using the function attribute ``__no_kcsan`` (or ``__no_kcsan_or_inline`` for
+ ``__always_inline`` functions). To dynamically control for which functions
+ data races are reported, see the `debugfs`_ blacklist/whitelist feature.
+
+* To disable data race detection for a particular compilation unit, add to the
+ ``Makefile``::
+
+ KCSAN_SANITIZE_file.o := n
+
+* To disable data race detection for all compilation units listed in a
+ ``Makefile``, add to the respective ``Makefile``::
+
+ KCSAN_SANITIZE := n
+
+debugfs
+~~~~~~~
+
+* The file ``/sys/kernel/debug/kcsan`` can be read to get stats.
+
+* KCSAN can be turned on or off by writing ``on`` or ``off`` to
+ ``/sys/kernel/debug/kcsan``.
+
+* Writing ``!some_func_name`` to ``/sys/kernel/debug/kcsan`` adds
+ ``some_func_name`` to the report filter list, which (by default) blacklists
+ reporting data races where either one of the top stackframes are a function
+ in the list.
+
+* Writing either ``blacklist`` or ``whitelist`` to ``/sys/kernel/debug/kcsan``
+ changes the report filtering behaviour. For example, the blacklist feature
+ can be used to silence frequently occurring data races; the whitelist feature
+ can help with reproduction and testing of fixes.
+
+Data Races
+----------
+
+Informally, two operations *conflict* if they access the same memory location,
+and at least one of them is a write operation. In an execution, two memory
+operations from different threads form a **data race** if they *conflict*, at
+least one of them is a *plain access* (non-atomic), and they are *unordered* in
+the "happens-before" order according to the `LKMM
+<../../tools/memory-model/Documentation/explanation.txt>`_.
+
+Relationship with the Linux Kernel Memory Model (LKMM)
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The LKMM defines the propagation and ordering rules of various memory
+operations, which gives developers the ability to reason about concurrent code.
+Ultimately this allows to determine the possible executions of concurrent code,
+and if that code is free from data races.
+
+KCSAN is aware of *atomic* accesses (``READ_ONCE``, ``WRITE_ONCE``,
+``atomic_*``, etc.), but is oblivious of any ordering guarantees. In other
+words, KCSAN assumes that as long as a plain access is not observed to race
+with another conflicting access, memory operations are correctly ordered.
+
+This means that KCSAN will not report *potential* data races due to missing
+memory ordering. If, however, missing memory ordering (that is observable with
+a particular compiler and architecture) leads to an observable data race (e.g.
+entering a critical section erroneously), KCSAN would report the resulting
+data race.
+
+Race conditions vs. data races
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Race conditions are logic bugs, where unexpected interleaving of racing
+concurrent operations result in an erroneous state.
+
+Data races on the other hand are defined at the *memory model/language level*.
+Many data races are also harmful race conditions, which a tool like KCSAN
+reports! However, not all data races are race conditions and vice-versa.
+KCSAN's intent is to report data races according to the LKMM. A data race
+detector can only work at the memory model/language level.
+
+Deeper analysis, to find high-level race conditions only, requires conveying
+the intended kernel logic to a tool. This requires (1) the developer writing a
+specification or model of their code, and then (2) the tool verifying that the
+implementation matches. This has been done for small bits of code using model
+checkers and other formal methods, but does not scale to the level of what can
+be covered with a dynamic analysis based data race detector such as KCSAN.
+
+For reasons outlined in this `article <https://lwn.net/Articles/793253/>`_,
+data races can be much more subtle, but can cause no less harm than high-level
+race conditions.
+
+Implementation Details
+----------------------
+
+The general approach is inspired by `DataCollider
+<http://usenix.org/legacy/events/osdi10/tech/full_papers/Erickson.pdf>`_.
+Unlike DataCollider, KCSAN does not use hardware watchpoints, but instead
+relies on compiler instrumentation. Watchpoints are implemented using an
+efficient encoding that stores access type, size, and address in a long; the
+benefits of using "soft watchpoints" are portability and greater flexibility in
+limiting which accesses trigger a watchpoint.
+
+More specifically, KCSAN requires instrumenting plain (unmarked, non-atomic)
+memory operations; for each instrumented plain access:
+
+1. Check if a matching watchpoint exists; if yes, and at least one access is a
+ write, then we encountered a racing access.
+
+2. Periodically, if no matching watchpoint exists, set up a watchpoint and
+ stall for a small delay.
+
+3. Also check the data value before the delay, and re-check the data value
+ after delay; if the values mismatch, we infer a race of unknown origin.
+
+To detect data races between plain and atomic memory operations, KCSAN also
+annotates atomic accesses, but only to check if a watchpoint exists
+(``kcsan_check_atomic_*``); i.e. KCSAN never sets up a watchpoint on atomic
+accesses.
+
+Key Properties
+~~~~~~~~~~~~~~
+
+1. **Memory Overhead:** The current implementation uses a small array of longs
+ to encode watchpoint information, which is negligible.
+
+2. **Performance Overhead:** KCSAN's runtime aims to be minimal, using an
+ efficient watchpoint encoding that does not require acquiring any shared
+ locks in the fast-path. For kernel boot on a system with 8 CPUs:
+
+ - 5.0x slow-down with the default KCSAN config;
+ - 2.8x slow-down from runtime fast-path overhead only (set very large
+ ``KCSAN_SKIP_WATCH`` and unset ``KCSAN_SKIP_WATCH_RANDOMIZE``).
+
+3. **Annotation Overheads:** Minimal annotations are required outside the KCSAN
+ runtime. As a result, maintenance overheads are minimal as the kernel
+ evolves.
+
+4. **Detects Racy Writes from Devices:** Due to checking data values upon
+ setting up watchpoints, racy writes from devices can also be detected.
+
+5. **Memory Ordering:** KCSAN is *not* explicitly aware of the LKMM's ordering
+ rules; this may result in missed data races (false negatives).
+
+6. **Analysis Accuracy:** For observed executions, due to using a sampling
+ strategy, the analysis is *unsound* (false negatives possible), but aims to
+ be complete (no false positives).
+
+Alternatives Considered
+-----------------------
+
+An alternative data race detection approach for the kernel can be found in
+`Kernel Thread Sanitizer (KTSAN) <https://github.com/google/ktsan/wiki>`_.
+KTSAN is a happens-before data race detector, which explicitly establishes the
+happens-before order between memory operations, which can then be used to
+determine data races as defined in `Data Races`_. To build a correct
+happens-before relation, KTSAN must be aware of all ordering rules of the LKMM
+and synchronization primitives. Unfortunately, any omission leads to false
+positives, which is especially important in the context of the kernel which
+includes numerous custom synchronization mechanisms. Furthermore, KTSAN's
+implementation requires metadata for each memory location (shadow memory);
+currently, for each page, KTSAN requires 4 pages of shadow memory.
diff --git a/MAINTAINERS b/MAINTAINERS
index 26f281d9f32a..633564a659dd 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -9185,6 +9185,17 @@ F: Documentation/kbuild/kconfig*
F: scripts/Kconfig.include
F: scripts/kconfig/
+KCSAN
+M: Marco Elver <elver@google.com>
+R: Dmitry Vyukov <dvyukov@google.com>
+L: kasan-dev@googlegroups.com
+S: Maintained
+F: Documentation/dev-tools/kcsan.rst
+F: include/linux/kcsan*.h
+F: kernel/kcsan/
+F: lib/Kconfig.kcsan
+F: scripts/Makefile.kcsan
+
KDUMP
M: Dave Young <dyoung@redhat.com>
M: Baoquan He <bhe@redhat.com>
diff --git a/Makefile b/Makefile
index 679f302a8b8b..d0b712029cd1 100644
--- a/Makefile
+++ b/Makefile
@@ -499,7 +499,7 @@ export KBUILD_HOSTCXXFLAGS KBUILD_HOSTLDFLAGS KBUILD_HOSTLDLIBS LDFLAGS_MODULE
export KBUILD_CPPFLAGS NOSTDINC_FLAGS LINUXINCLUDE OBJCOPYFLAGS KBUILD_LDFLAGS
export KBUILD_CFLAGS CFLAGS_KERNEL CFLAGS_MODULE
-export CFLAGS_KASAN CFLAGS_KASAN_NOSANITIZE CFLAGS_UBSAN
+export CFLAGS_KASAN CFLAGS_KASAN_NOSANITIZE CFLAGS_UBSAN CFLAGS_KCSAN
export KBUILD_AFLAGS AFLAGS_KERNEL AFLAGS_MODULE
export KBUILD_AFLAGS_MODULE KBUILD_CFLAGS_MODULE KBUILD_LDFLAGS_MODULE
export KBUILD_AFLAGS_KERNEL KBUILD_CFLAGS_KERNEL
@@ -920,6 +920,7 @@ endif
include scripts/Makefile.kasan
include scripts/Makefile.extrawarn
include scripts/Makefile.ubsan
+include scripts/Makefile.kcsan
# Add user supplied CPPFLAGS, AFLAGS and CFLAGS as the last assignments
KBUILD_CPPFLAGS += $(KCPPFLAGS)
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 1197b5596d5a..43a0b7d880b6 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -229,6 +229,7 @@ config X86
select THREAD_INFO_IN_TASK
select USER_STACKTRACE_SUPPORT
select VIRT_TO_BUS
+ select HAVE_ARCH_KCSAN if X86_64
select X86_FEATURE_NAMES if PROC_FS
select PROC_PID_ARCH_STATUS if PROC_FS
imply IMA_SECURE_AND_OR_TRUSTED_BOOT if EFI
diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile
index e17be90ab312..d7aa1c3a6b25 100644
--- a/arch/x86/boot/Makefile
+++ b/arch/x86/boot/Makefile
@@ -9,7 +9,9 @@
# Changed by many, many contributors over the years.
#
+# Sanitizer runtimes are unavailable and cannot be linked for early boot code.
KASAN_SANITIZE := n
+KCSAN_SANITIZE := n
OBJECT_FILES_NON_STANDARD := y
# Kernel does not boot with kcov instrumentation here.
diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
index 5f7c262bcc99..7619742f91c9 100644
--- a/arch/x86/boot/compressed/Makefile
+++ b/arch/x86/boot/compressed/Makefile
@@ -17,7 +17,9 @@
# (see scripts/Makefile.lib size_append)
# compressed vmlinux.bin.all + u32 size of vmlinux.bin.all
+# Sanitizer runtimes are unavailable and cannot be linked for early boot code.
KASAN_SANITIZE := n
+KCSAN_SANITIZE := n
OBJECT_FILES_NON_STANDARD := y
# Prevents link failures: __sanitizer_cov_trace_pc() is not linked in.
diff --git a/arch/x86/entry/vdso/Makefile b/arch/x86/entry/vdso/Makefile
index 433a1259f61d..ecf6128c9551 100644
--- a/arch/x86/entry/vdso/Makefile
+++ b/arch/x86/entry/vdso/Makefile
@@ -10,8 +10,11 @@ ARCH_REL_TYPE_ABS += R_386_GLOB_DAT|R_386_JMP_SLOT|R_386_RELATIVE
include $(srctree)/lib/vdso/Makefile
KBUILD_CFLAGS += $(DISABLE_LTO)
+
+# Sanitizer runtimes are unavailable and cannot be linked here.
KASAN_SANITIZE := n
UBSAN_SANITIZE := n
+KCSAN_SANITIZE := n
OBJECT_FILES_NON_STANDARD := y
# Prevents link failures: __sanitizer_cov_trace_pc() is not linked in.
@@ -27,6 +30,9 @@ vobjs-y := vdso-note.o vclock_gettime.o vgetcpu.o
# files to link into kernel
obj-y += vma.o
+KASAN_SANITIZE_vma.o := y
+UBSAN_SANITIZE_vma.o := y
+KCSAN_SANITIZE_vma.o := y
OBJECT_FILES_NON_STANDARD_vma.o := n
# vDSO images to build
diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h
index 53f246e9df5a..b392571c1f1d 100644
--- a/arch/x86/include/asm/bitops.h
+++ b/arch/x86/include/asm/bitops.h
@@ -201,8 +201,12 @@ arch_test_and_change_bit(long nr, volatile unsigned long *addr)
return GEN_BINARY_RMWcc(LOCK_PREFIX __ASM_SIZE(btc), *addr, c, "Ir", nr);
}
-static __always_inline bool constant_test_bit(long nr, const volatile unsigned long *addr)
+static __no_kcsan_or_inline bool constant_test_bit(long nr, const volatile unsigned long *addr)
{
+ /*
+ * Because this is a plain access, we need to disable KCSAN here to
+ * avoid double instrumentation via instrumented bitops.
+ */
return ((1UL << (nr & (BITS_PER_LONG-1))) &
(addr[nr >> _BITOPS_LONG_SHIFT])) != 0;
}
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
index ba89cabe5fcf..d6d61c4455fa 100644
--- a/arch/x86/kernel/Makefile
+++ b/arch/x86/kernel/Makefile
@@ -28,6 +28,10 @@ KASAN_SANITIZE_dumpstack_$(BITS).o := n
KASAN_SANITIZE_stacktrace.o := n
KASAN_SANITIZE_paravirt.o := n
+# With some compiler versions the generated code results in boot hangs, caused
+# by several compilation units. To be safe, disable all instrumentation.
+KCSAN_SANITIZE := n
+
OBJECT_FILES_NON_STANDARD_test_nx.o := y
OBJECT_FILES_NON_STANDARD_paravirt_patch.o := y
diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
index 7dc4ad68eb41..dba6a83bc349 100644
--- a/arch/x86/kernel/cpu/Makefile
+++ b/arch/x86/kernel/cpu/Makefile
@@ -13,6 +13,9 @@ endif
KCOV_INSTRUMENT_common.o := n
KCOV_INSTRUMENT_perf_event.o := n
+# As above, instrumenting secondary CPU boot code causes boot hangs.
+KCSAN_SANITIZE_common.o := n
+
# Make sure load_percpu_segment has no stackprotector
nostackp := $(call cc-option, -fno-stack-protector)
CFLAGS_common.o := $(nostackp)
diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c
index c5399e80c59c..c92029651b85 100644
--- a/arch/x86/kernel/e820.c
+++ b/arch/x86/kernel/e820.c
@@ -999,7 +999,15 @@ void __init e820__reserve_setup_data(void)
while (pa_data) {
data = early_memremap(pa_data, sizeof(*data));
e820__range_update(pa_data, sizeof(*data)+data->len, E820_TYPE_RAM, E820_TYPE_RESERVED_KERN);
- e820__range_update_kexec(pa_data, sizeof(*data)+data->len, E820_TYPE_RAM, E820_TYPE_RESERVED_KERN);
+
+ /*
+ * SETUP_EFI is supplied by kexec and does not need to be
+ * reserved.
+ */
+ if (data->type != SETUP_EFI)
+ e820__range_update_kexec(pa_data,
+ sizeof(*data) + data->len,
+ E820_TYPE_RAM, E820_TYPE_RESERVED_KERN);
if (data->type == SETUP_INDIRECT &&
((struct setup_indirect *)data->data)->type != SETUP_INDIRECT) {
diff --git a/arch/x86/lib/Makefile b/arch/x86/lib/Makefile
index 5246db42de45..6110bce7237b 100644
--- a/arch/x86/lib/Makefile
+++ b/arch/x86/lib/Makefile
@@ -6,10 +6,19 @@
# Produces uninteresting flaky coverage.
KCOV_INSTRUMENT_delay.o := n
+# KCSAN uses udelay for introducing watchpoint delay; avoid recursion.
+KCSAN_SANITIZE_delay.o := n
+ifdef CONFIG_KCSAN
+# In case KCSAN+lockdep+ftrace are enabled, disable ftrace for delay.o to avoid
+# lockdep -> [other libs] -> KCSAN -> udelay -> ftrace -> lockdep recursion.
+CFLAGS_REMOVE_delay.o = $(CC_FLAGS_FTRACE)
+endif
+
# Early boot use of cmdline; don't instrument it
ifdef CONFIG_AMD_MEM_ENCRYPT
KCOV_INSTRUMENT_cmdline.o := n
KASAN_SANITIZE_cmdline.o := n
+KCSAN_SANITIZE_cmdline.o := n
ifdef CONFIG_FUNCTION_TRACER
CFLAGS_REMOVE_cmdline.o = -pg
diff --git a/arch/x86/mm/Makefile b/arch/x86/mm/Makefile
index 98f7c6fa2eaa..f7fd0e868c9c 100644
--- a/arch/x86/mm/Makefile
+++ b/arch/x86/mm/Makefile
@@ -7,6 +7,10 @@ KCOV_INSTRUMENT_mem_encrypt_identity.o := n
KASAN_SANITIZE_mem_encrypt.o := n
KASAN_SANITIZE_mem_encrypt_identity.o := n
+# Disable KCSAN entirely, because otherwise we get warnings that some functions
+# reference __initdata sections.
+KCSAN_SANITIZE := n
+
ifdef CONFIG_FUNCTION_TRACER
CFLAGS_REMOVE_mem_encrypt.o = -pg
CFLAGS_REMOVE_mem_encrypt_identity.o = -pg
diff --git a/arch/x86/purgatory/.gitignore b/arch/x86/purgatory/.gitignore
new file mode 100644
index 000000000000..d2be1500671d
--- /dev/null
+++ b/arch/x86/purgatory/.gitignore
@@ -0,0 +1 @@
+purgatory.chk
diff --git a/arch/x86/purgatory/Makefile b/arch/x86/purgatory/Makefile
index fb4ee5444379..b04e6e72a592 100644
--- a/arch/x86/purgatory/Makefile
+++ b/arch/x86/purgatory/Makefile
@@ -14,10 +14,18 @@ $(obj)/sha256.o: $(srctree)/lib/crypto/sha256.c FORCE
CFLAGS_sha256.o := -D__DISABLE_EXPORTS
-LDFLAGS_purgatory.ro := -e purgatory_start -r --no-undefined -nostdlib -z nodefaultlib
-targets += purgatory.ro
-
+# When linking purgatory.ro with -r unresolved symbols are not checked,
+# also link a purgatory.chk binary without -r to check for unresolved symbols.
+PURGATORY_LDFLAGS := -e purgatory_start -nostdlib -z nodefaultlib
+LDFLAGS_purgatory.ro := -r $(PURGATORY_LDFLAGS)
+LDFLAGS_purgatory.chk := $(PURGATORY_LDFLAGS)
+targets += purgatory.ro purgatory.chk
+
+# Sanitizer, etc. runtimes are unavailable and cannot be linked here.
+GCOV_PROFILE := n
KASAN_SANITIZE := n
+UBSAN_SANITIZE := n
+KCSAN_SANITIZE := n
KCOV_INSTRUMENT := n
# These are adjustments to the compiler flags used for objects that
@@ -25,7 +33,7 @@ KCOV_INSTRUMENT := n
PURGATORY_CFLAGS_REMOVE := -mcmodel=kernel
PURGATORY_CFLAGS := -mcmodel=large -ffreestanding -fno-zero-initialized-in-bss
-PURGATORY_CFLAGS += $(DISABLE_STACKLEAK_PLUGIN)
+PURGATORY_CFLAGS += $(DISABLE_STACKLEAK_PLUGIN) -DDISABLE_BRANCH_PROFILING
# Default KBUILD_CFLAGS can have -pg option set when FTRACE is enabled. That
# in turn leaves some undefined symbols like __fentry__ in purgatory and not
@@ -58,12 +66,15 @@ CFLAGS_string.o += $(PURGATORY_CFLAGS)
$(obj)/purgatory.ro: $(PURGATORY_OBJS) FORCE
$(call if_changed,ld)
+$(obj)/purgatory.chk: $(obj)/purgatory.ro FORCE
+ $(call if_changed,ld)
+
targets += kexec-purgatory.c
quiet_cmd_bin2c = BIN2C $@
cmd_bin2c = $(objtree)/scripts/bin2c kexec_purgatory < $< > $@
-$(obj)/kexec-purgatory.c: $(obj)/purgatory.ro FORCE
+$(obj)/kexec-purgatory.c: $(obj)/purgatory.ro $(obj)/purgatory.chk FORCE
$(call if_changed,bin2c)
obj-$(CONFIG_KEXEC_FILE) += kexec-purgatory.o
diff --git a/arch/x86/realmode/Makefile b/arch/x86/realmode/Makefile
index 682c895753d9..6b1f3a4eeb44 100644
--- a/arch/x86/realmode/Makefile
+++ b/arch/x86/realmode/Makefile
@@ -6,7 +6,10 @@
# for more details.
#
#
+
+# Sanitizer runtimes are unavailable and cannot be linked here.
KASAN_SANITIZE := n
+KCSAN_SANITIZE := n
OBJECT_FILES_NON_STANDARD := y
subdir- := rm
diff --git a/arch/x86/realmode/rm/Makefile b/arch/x86/realmode/rm/Makefile
index b11ec5d8f8ac..83f1b6a56449 100644
--- a/arch/x86/realmode/rm/Makefile
+++ b/arch/x86/realmode/rm/Makefile
@@ -6,7 +6,10 @@
# for more details.
#
#
+
+# Sanitizer runtimes are unavailable and cannot be linked here.
KASAN_SANITIZE := n
+KCSAN_SANITIZE := n
OBJECT_FILES_NON_STANDARD := y
# Prevents link failures: __sanitizer_cov_trace_pc() is not linked in.
diff --git a/drivers/firmware/efi/libstub/Makefile b/drivers/firmware/efi/libstub/Makefile
index 094eabdecfe6..dd31237fba2e 100644
--- a/drivers/firmware/efi/libstub/Makefile
+++ b/drivers/firmware/efi/libstub/Makefile
@@ -33,7 +33,9 @@ KBUILD_CFLAGS := $(cflags-y) -DDISABLE_BRANCH_PROFILING \
-D__DISABLE_EXPORTS
GCOV_PROFILE := n
+# Sanitizer runtimes are unavailable and cannot be linked here.
KASAN_SANITIZE := n
+KCSAN_SANITIZE := n
UBSAN_SANITIZE := n
OBJECT_FILES_NON_STANDARD := y
diff --git a/include/asm-generic/atomic-instrumented.h b/include/asm-generic/atomic-instrumented.h
index e8730c6b9fe2..379986e40159 100644
--- a/include/asm-generic/atomic-instrumented.h
+++ b/include/asm-generic/atomic-instrumented.h
@@ -18,1623 +18,1624 @@
#define _ASM_GENERIC_ATOMIC_INSTRUMENTED_H
#include <linux/build_bug.h>
-#include <linux/kasan-checks.h>
+#include <linux/compiler.h>
+#include <linux/instrumented.h>
-static inline int
+static __always_inline int
atomic_read(const atomic_t *v)
{
- kasan_check_read(v, sizeof(*v));
+ instrument_atomic_read(v, sizeof(*v));
return arch_atomic_read(v);
}
#define atomic_read atomic_read
#if defined(arch_atomic_read_acquire)
-static inline int
+static __always_inline int
atomic_read_acquire(const atomic_t *v)
{
- kasan_check_read(v, sizeof(*v));
+ instrument_atomic_read(v, sizeof(*v));
return arch_atomic_read_acquire(v);
}
#define atomic_read_acquire atomic_read_acquire
#endif
-static inline void
+static __always_inline void
atomic_set(atomic_t *v, int i)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
arch_atomic_set(v, i);
}
#define atomic_set atomic_set
#if defined(arch_atomic_set_release)
-static inline void
+static __always_inline void
atomic_set_release(atomic_t *v, int i)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
arch_atomic_set_release(v, i);
}
#define atomic_set_release atomic_set_release
#endif
-static inline void
+static __always_inline void
atomic_add(int i, atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
arch_atomic_add(i, v);
}
#define atomic_add atomic_add
#if !defined(arch_atomic_add_return_relaxed) || defined(arch_atomic_add_return)
-static inline int
+static __always_inline int
atomic_add_return(int i, atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_add_return(i, v);
}
#define atomic_add_return atomic_add_return
#endif
#if defined(arch_atomic_add_return_acquire)
-static inline int
+static __always_inline int
atomic_add_return_acquire(int i, atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_add_return_acquire(i, v);
}
#define atomic_add_return_acquire atomic_add_return_acquire
#endif
#if defined(arch_atomic_add_return_release)
-static inline int
+static __always_inline int
atomic_add_return_release(int i, atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_add_return_release(i, v);
}
#define atomic_add_return_release atomic_add_return_release
#endif
#if defined(arch_atomic_add_return_relaxed)
-static inline int
+static __always_inline int
atomic_add_return_relaxed(int i, atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_add_return_relaxed(i, v);
}
#define atomic_add_return_relaxed atomic_add_return_relaxed
#endif
#if !defined(arch_atomic_fetch_add_relaxed) || defined(arch_atomic_fetch_add)
-static inline int
+static __always_inline int
atomic_fetch_add(int i, atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_fetch_add(i, v);
}
#define atomic_fetch_add atomic_fetch_add
#endif
#if defined(arch_atomic_fetch_add_acquire)
-static inline int
+static __always_inline int
atomic_fetch_add_acquire(int i, atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_fetch_add_acquire(i, v);
}
#define atomic_fetch_add_acquire atomic_fetch_add_acquire
#endif
#if defined(arch_atomic_fetch_add_release)
-static inline int
+static __always_inline int
atomic_fetch_add_release(int i, atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_fetch_add_release(i, v);
}
#define atomic_fetch_add_release atomic_fetch_add_release
#endif
#if defined(arch_atomic_fetch_add_relaxed)
-static inline int
+static __always_inline int
atomic_fetch_add_relaxed(int i, atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_fetch_add_relaxed(i, v);
}
#define atomic_fetch_add_relaxed atomic_fetch_add_relaxed
#endif
-static inline void
+static __always_inline void
atomic_sub(int i, atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
arch_atomic_sub(i, v);
}
#define atomic_sub atomic_sub
#if !defined(arch_atomic_sub_return_relaxed) || defined(arch_atomic_sub_return)
-static inline int
+static __always_inline int
atomic_sub_return(int i, atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_sub_return(i, v);
}
#define atomic_sub_return atomic_sub_return
#endif
#if defined(arch_atomic_sub_return_acquire)
-static inline int
+static __always_inline int
atomic_sub_return_acquire(int i, atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_sub_return_acquire(i, v);
}
#define atomic_sub_return_acquire atomic_sub_return_acquire
#endif
#if defined(arch_atomic_sub_return_release)
-static inline int
+static __always_inline int
atomic_sub_return_release(int i, atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_sub_return_release(i, v);
}
#define atomic_sub_return_release atomic_sub_return_release
#endif
#if defined(arch_atomic_sub_return_relaxed)
-static inline int
+static __always_inline int
atomic_sub_return_relaxed(int i, atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_sub_return_relaxed(i, v);
}
#define atomic_sub_return_relaxed atomic_sub_return_relaxed
#endif
#if !defined(arch_atomic_fetch_sub_relaxed) || defined(arch_atomic_fetch_sub)
-static inline int
+static __always_inline int
atomic_fetch_sub(int i, atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_fetch_sub(i, v);
}
#define atomic_fetch_sub atomic_fetch_sub
#endif
#if defined(arch_atomic_fetch_sub_acquire)
-static inline int
+static __always_inline int
atomic_fetch_sub_acquire(int i, atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_fetch_sub_acquire(i, v);
}
#define atomic_fetch_sub_acquire atomic_fetch_sub_acquire
#endif
#if defined(arch_atomic_fetch_sub_release)
-static inline int
+static __always_inline int
atomic_fetch_sub_release(int i, atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_fetch_sub_release(i, v);
}
#define atomic_fetch_sub_release atomic_fetch_sub_release
#endif
#if defined(arch_atomic_fetch_sub_relaxed)
-static inline int
+static __always_inline int
atomic_fetch_sub_relaxed(int i, atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_fetch_sub_relaxed(i, v);
}
#define atomic_fetch_sub_relaxed atomic_fetch_sub_relaxed
#endif
#if defined(arch_atomic_inc)
-static inline void
+static __always_inline void
atomic_inc(atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
arch_atomic_inc(v);
}
#define atomic_inc atomic_inc
#endif
#if defined(arch_atomic_inc_return)
-static inline int
+static __always_inline int
atomic_inc_return(atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_inc_return(v);
}
#define atomic_inc_return atomic_inc_return
#endif
#if defined(arch_atomic_inc_return_acquire)
-static inline int
+static __always_inline int
atomic_inc_return_acquire(atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_inc_return_acquire(v);
}
#define atomic_inc_return_acquire atomic_inc_return_acquire
#endif
#if defined(arch_atomic_inc_return_release)
-static inline int
+static __always_inline int
atomic_inc_return_release(atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_inc_return_release(v);
}
#define atomic_inc_return_release atomic_inc_return_release
#endif
#if defined(arch_atomic_inc_return_relaxed)
-static inline int
+static __always_inline int
atomic_inc_return_relaxed(atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_inc_return_relaxed(v);
}
#define atomic_inc_return_relaxed atomic_inc_return_relaxed
#endif
#if defined(arch_atomic_fetch_inc)
-static inline int
+static __always_inline int
atomic_fetch_inc(atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_fetch_inc(v);
}
#define atomic_fetch_inc atomic_fetch_inc
#endif
#if defined(arch_atomic_fetch_inc_acquire)
-static inline int
+static __always_inline int
atomic_fetch_inc_acquire(atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_fetch_inc_acquire(v);
}
#define atomic_fetch_inc_acquire atomic_fetch_inc_acquire
#endif
#if defined(arch_atomic_fetch_inc_release)
-static inline int
+static __always_inline int
atomic_fetch_inc_release(atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_fetch_inc_release(v);
}
#define atomic_fetch_inc_release atomic_fetch_inc_release
#endif
#if defined(arch_atomic_fetch_inc_relaxed)
-static inline int
+static __always_inline int
atomic_fetch_inc_relaxed(atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_fetch_inc_relaxed(v);
}
#define atomic_fetch_inc_relaxed atomic_fetch_inc_relaxed
#endif
#if defined(arch_atomic_dec)
-static inline void
+static __always_inline void
atomic_dec(atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
arch_atomic_dec(v);
}
#define atomic_dec atomic_dec
#endif
#if defined(arch_atomic_dec_return)
-static inline int
+static __always_inline int
atomic_dec_return(atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_dec_return(v);
}
#define atomic_dec_return atomic_dec_return
#endif
#if defined(arch_atomic_dec_return_acquire)
-static inline int
+static __always_inline int
atomic_dec_return_acquire(atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_dec_return_acquire(v);
}
#define atomic_dec_return_acquire atomic_dec_return_acquire
#endif
#if defined(arch_atomic_dec_return_release)
-static inline int
+static __always_inline int
atomic_dec_return_release(atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_dec_return_release(v);
}
#define atomic_dec_return_release atomic_dec_return_release
#endif
#if defined(arch_atomic_dec_return_relaxed)
-static inline int
+static __always_inline int
atomic_dec_return_relaxed(atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_dec_return_relaxed(v);
}
#define atomic_dec_return_relaxed atomic_dec_return_relaxed
#endif
#if defined(arch_atomic_fetch_dec)
-static inline int
+static __always_inline int
atomic_fetch_dec(atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_fetch_dec(v);
}
#define atomic_fetch_dec atomic_fetch_dec
#endif
#if defined(arch_atomic_fetch_dec_acquire)
-static inline int
+static __always_inline int
atomic_fetch_dec_acquire(atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_fetch_dec_acquire(v);
}
#define atomic_fetch_dec_acquire atomic_fetch_dec_acquire
#endif
#if defined(arch_atomic_fetch_dec_release)
-static inline int
+static __always_inline int
atomic_fetch_dec_release(atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_fetch_dec_release(v);
}
#define atomic_fetch_dec_release atomic_fetch_dec_release
#endif
#if defined(arch_atomic_fetch_dec_relaxed)
-static inline int
+static __always_inline int
atomic_fetch_dec_relaxed(atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_fetch_dec_relaxed(v);
}
#define atomic_fetch_dec_relaxed atomic_fetch_dec_relaxed
#endif
-static inline void
+static __always_inline void
atomic_and(int i, atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
arch_atomic_and(i, v);
}
#define atomic_and atomic_and
#if !defined(arch_atomic_fetch_and_relaxed) || defined(arch_atomic_fetch_and)
-static inline int
+static __always_inline int
atomic_fetch_and(int i, atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_fetch_and(i, v);
}
#define atomic_fetch_and atomic_fetch_and
#endif
#if defined(arch_atomic_fetch_and_acquire)
-static inline int
+static __always_inline int
atomic_fetch_and_acquire(int i, atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_fetch_and_acquire(i, v);
}
#define atomic_fetch_and_acquire atomic_fetch_and_acquire
#endif
#if defined(arch_atomic_fetch_and_release)
-static inline int
+static __always_inline int
atomic_fetch_and_release(int i, atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_fetch_and_release(i, v);
}
#define atomic_fetch_and_release atomic_fetch_and_release
#endif
#if defined(arch_atomic_fetch_and_relaxed)
-static inline int
+static __always_inline int
atomic_fetch_and_relaxed(int i, atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_fetch_and_relaxed(i, v);
}
#define atomic_fetch_and_relaxed atomic_fetch_and_relaxed
#endif
#if defined(arch_atomic_andnot)
-static inline void
+static __always_inline void
atomic_andnot(int i, atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
arch_atomic_andnot(i, v);
}
#define atomic_andnot atomic_andnot
#endif
#if defined(arch_atomic_fetch_andnot)
-static inline int
+static __always_inline int
atomic_fetch_andnot(int i, atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_fetch_andnot(i, v);
}
#define atomic_fetch_andnot atomic_fetch_andnot
#endif
#if defined(arch_atomic_fetch_andnot_acquire)
-static inline int
+static __always_inline int
atomic_fetch_andnot_acquire(int i, atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_fetch_andnot_acquire(i, v);
}
#define atomic_fetch_andnot_acquire atomic_fetch_andnot_acquire
#endif
#if defined(arch_atomic_fetch_andnot_release)
-static inline int
+static __always_inline int
atomic_fetch_andnot_release(int i, atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_fetch_andnot_release(i, v);
}
#define atomic_fetch_andnot_release atomic_fetch_andnot_release
#endif
#if defined(arch_atomic_fetch_andnot_relaxed)
-static inline int
+static __always_inline int
atomic_fetch_andnot_relaxed(int i, atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_fetch_andnot_relaxed(i, v);
}
#define atomic_fetch_andnot_relaxed atomic_fetch_andnot_relaxed
#endif
-static inline void
+static __always_inline void
atomic_or(int i, atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
arch_atomic_or(i, v);
}
#define atomic_or atomic_or
#if !defined(arch_atomic_fetch_or_relaxed) || defined(arch_atomic_fetch_or)
-static inline int
+static __always_inline int
atomic_fetch_or(int i, atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_fetch_or(i, v);
}
#define atomic_fetch_or atomic_fetch_or
#endif
#if defined(arch_atomic_fetch_or_acquire)
-static inline int
+static __always_inline int
atomic_fetch_or_acquire(int i, atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_fetch_or_acquire(i, v);
}
#define atomic_fetch_or_acquire atomic_fetch_or_acquire
#endif
#if defined(arch_atomic_fetch_or_release)
-static inline int
+static __always_inline int
atomic_fetch_or_release(int i, atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_fetch_or_release(i, v);
}
#define atomic_fetch_or_release atomic_fetch_or_release
#endif
#if defined(arch_atomic_fetch_or_relaxed)
-static inline int
+static __always_inline int
atomic_fetch_or_relaxed(int i, atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_fetch_or_relaxed(i, v);
}
#define atomic_fetch_or_relaxed atomic_fetch_or_relaxed
#endif
-static inline void
+static __always_inline void
atomic_xor(int i, atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
arch_atomic_xor(i, v);
}
#define atomic_xor atomic_xor
#if !defined(arch_atomic_fetch_xor_relaxed) || defined(arch_atomic_fetch_xor)
-static inline int
+static __always_inline int
atomic_fetch_xor(int i, atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_fetch_xor(i, v);
}
#define atomic_fetch_xor atomic_fetch_xor
#endif
#if defined(arch_atomic_fetch_xor_acquire)
-static inline int
+static __always_inline int
atomic_fetch_xor_acquire(int i, atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_fetch_xor_acquire(i, v);
}
#define atomic_fetch_xor_acquire atomic_fetch_xor_acquire
#endif
#if defined(arch_atomic_fetch_xor_release)
-static inline int
+static __always_inline int
atomic_fetch_xor_release(int i, atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_fetch_xor_release(i, v);
}
#define atomic_fetch_xor_release atomic_fetch_xor_release
#endif
#if defined(arch_atomic_fetch_xor_relaxed)
-static inline int
+static __always_inline int
atomic_fetch_xor_relaxed(int i, atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_fetch_xor_relaxed(i, v);
}
#define atomic_fetch_xor_relaxed atomic_fetch_xor_relaxed
#endif
#if !defined(arch_atomic_xchg_relaxed) || defined(arch_atomic_xchg)
-static inline int
+static __always_inline int
atomic_xchg(atomic_t *v, int i)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_xchg(v, i);
}
#define atomic_xchg atomic_xchg
#endif
#if defined(arch_atomic_xchg_acquire)
-static inline int
+static __always_inline int
atomic_xchg_acquire(atomic_t *v, int i)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_xchg_acquire(v, i);
}
#define atomic_xchg_acquire atomic_xchg_acquire
#endif
#if defined(arch_atomic_xchg_release)
-static inline int
+static __always_inline int
atomic_xchg_release(atomic_t *v, int i)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_xchg_release(v, i);
}
#define atomic_xchg_release atomic_xchg_release
#endif
#if defined(arch_atomic_xchg_relaxed)
-static inline int
+static __always_inline int
atomic_xchg_relaxed(atomic_t *v, int i)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_xchg_relaxed(v, i);
}
#define atomic_xchg_relaxed atomic_xchg_relaxed
#endif
#if !defined(arch_atomic_cmpxchg_relaxed) || defined(arch_atomic_cmpxchg)
-static inline int
+static __always_inline int
atomic_cmpxchg(atomic_t *v, int old, int new)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_cmpxchg(v, old, new);
}
#define atomic_cmpxchg atomic_cmpxchg
#endif
#if defined(arch_atomic_cmpxchg_acquire)
-static inline int
+static __always_inline int
atomic_cmpxchg_acquire(atomic_t *v, int old, int new)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_cmpxchg_acquire(v, old, new);
}
#define atomic_cmpxchg_acquire atomic_cmpxchg_acquire
#endif
#if defined(arch_atomic_cmpxchg_release)
-static inline int
+static __always_inline int
atomic_cmpxchg_release(atomic_t *v, int old, int new)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_cmpxchg_release(v, old, new);
}
#define atomic_cmpxchg_release atomic_cmpxchg_release
#endif
#if defined(arch_atomic_cmpxchg_relaxed)
-static inline int
+static __always_inline int
atomic_cmpxchg_relaxed(atomic_t *v, int old, int new)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_cmpxchg_relaxed(v, old, new);
}
#define atomic_cmpxchg_relaxed atomic_cmpxchg_relaxed
#endif
#if defined(arch_atomic_try_cmpxchg)
-static inline bool
+static __always_inline bool
atomic_try_cmpxchg(atomic_t *v, int *old, int new)
{
- kasan_check_write(v, sizeof(*v));
- kasan_check_write(old, sizeof(*old));
+ instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_write(old, sizeof(*old));
return arch_atomic_try_cmpxchg(v, old, new);
}
#define atomic_try_cmpxchg atomic_try_cmpxchg
#endif
#if defined(arch_atomic_try_cmpxchg_acquire)
-static inline bool
+static __always_inline bool
atomic_try_cmpxchg_acquire(atomic_t *v, int *old, int new)
{
- kasan_check_write(v, sizeof(*v));
- kasan_check_write(old, sizeof(*old));
+ instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_write(old, sizeof(*old));
return arch_atomic_try_cmpxchg_acquire(v, old, new);
}
#define atomic_try_cmpxchg_acquire atomic_try_cmpxchg_acquire
#endif
#if defined(arch_atomic_try_cmpxchg_release)
-static inline bool
+static __always_inline bool
atomic_try_cmpxchg_release(atomic_t *v, int *old, int new)
{
- kasan_check_write(v, sizeof(*v));
- kasan_check_write(old, sizeof(*old));
+ instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_write(old, sizeof(*old));
return arch_atomic_try_cmpxchg_release(v, old, new);
}
#define atomic_try_cmpxchg_release atomic_try_cmpxchg_release
#endif
#if defined(arch_atomic_try_cmpxchg_relaxed)
-static inline bool
+static __always_inline bool
atomic_try_cmpxchg_relaxed(atomic_t *v, int *old, int new)
{
- kasan_check_write(v, sizeof(*v));
- kasan_check_write(old, sizeof(*old));
+ instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_write(old, sizeof(*old));
return arch_atomic_try_cmpxchg_relaxed(v, old, new);
}
#define atomic_try_cmpxchg_relaxed atomic_try_cmpxchg_relaxed
#endif
#if defined(arch_atomic_sub_and_test)
-static inline bool
+static __always_inline bool
atomic_sub_and_test(int i, atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_sub_and_test(i, v);
}
#define atomic_sub_and_test atomic_sub_and_test
#endif
#if defined(arch_atomic_dec_and_test)
-static inline bool
+static __always_inline bool
atomic_dec_and_test(atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_dec_and_test(v);
}
#define atomic_dec_and_test atomic_dec_and_test
#endif
#if defined(arch_atomic_inc_and_test)
-static inline bool
+static __always_inline bool
atomic_inc_and_test(atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_inc_and_test(v);
}
#define atomic_inc_and_test atomic_inc_and_test
#endif
#if defined(arch_atomic_add_negative)
-static inline bool
+static __always_inline bool
atomic_add_negative(int i, atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_add_negative(i, v);
}
#define atomic_add_negative atomic_add_negative
#endif
#if defined(arch_atomic_fetch_add_unless)
-static inline int
+static __always_inline int
atomic_fetch_add_unless(atomic_t *v, int a, int u)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_fetch_add_unless(v, a, u);
}
#define atomic_fetch_add_unless atomic_fetch_add_unless
#endif
#if defined(arch_atomic_add_unless)
-static inline bool
+static __always_inline bool
atomic_add_unless(atomic_t *v, int a, int u)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_add_unless(v, a, u);
}
#define atomic_add_unless atomic_add_unless
#endif
#if defined(arch_atomic_inc_not_zero)
-static inline bool
+static __always_inline bool
atomic_inc_not_zero(atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_inc_not_zero(v);
}
#define atomic_inc_not_zero atomic_inc_not_zero
#endif
#if defined(arch_atomic_inc_unless_negative)
-static inline bool
+static __always_inline bool
atomic_inc_unless_negative(atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_inc_unless_negative(v);
}
#define atomic_inc_unless_negative atomic_inc_unless_negative
#endif
#if defined(arch_atomic_dec_unless_positive)
-static inline bool
+static __always_inline bool
atomic_dec_unless_positive(atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_dec_unless_positive(v);
}
#define atomic_dec_unless_positive atomic_dec_unless_positive
#endif
#if defined(arch_atomic_dec_if_positive)
-static inline int
+static __always_inline int
atomic_dec_if_positive(atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_dec_if_positive(v);
}
#define atomic_dec_if_positive atomic_dec_if_positive
#endif
-static inline s64
+static __always_inline s64
atomic64_read(const atomic64_t *v)
{
- kasan_check_read(v, sizeof(*v));
+ instrument_atomic_read(v, sizeof(*v));
return arch_atomic64_read(v);
}
#define atomic64_read atomic64_read
#if defined(arch_atomic64_read_acquire)
-static inline s64
+static __always_inline s64
atomic64_read_acquire(const atomic64_t *v)
{
- kasan_check_read(v, sizeof(*v));
+ instrument_atomic_read(v, sizeof(*v));
return arch_atomic64_read_acquire(v);
}
#define atomic64_read_acquire atomic64_read_acquire
#endif
-static inline void
+static __always_inline void
atomic64_set(atomic64_t *v, s64 i)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
arch_atomic64_set(v, i);
}
#define atomic64_set atomic64_set
#if defined(arch_atomic64_set_release)
-static inline void
+static __always_inline void
atomic64_set_release(atomic64_t *v, s64 i)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
arch_atomic64_set_release(v, i);
}
#define atomic64_set_release atomic64_set_release
#endif
-static inline void
+static __always_inline void
atomic64_add(s64 i, atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
arch_atomic64_add(i, v);
}
#define atomic64_add atomic64_add
#if !defined(arch_atomic64_add_return_relaxed) || defined(arch_atomic64_add_return)
-static inline s64
+static __always_inline s64
atomic64_add_return(s64 i, atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_add_return(i, v);
}
#define atomic64_add_return atomic64_add_return
#endif
#if defined(arch_atomic64_add_return_acquire)
-static inline s64
+static __always_inline s64
atomic64_add_return_acquire(s64 i, atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_add_return_acquire(i, v);
}
#define atomic64_add_return_acquire atomic64_add_return_acquire
#endif
#if defined(arch_atomic64_add_return_release)
-static inline s64
+static __always_inline s64
atomic64_add_return_release(s64 i, atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_add_return_release(i, v);
}
#define atomic64_add_return_release atomic64_add_return_release
#endif
#if defined(arch_atomic64_add_return_relaxed)
-static inline s64
+static __always_inline s64
atomic64_add_return_relaxed(s64 i, atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_add_return_relaxed(i, v);
}
#define atomic64_add_return_relaxed atomic64_add_return_relaxed
#endif
#if !defined(arch_atomic64_fetch_add_relaxed) || defined(arch_atomic64_fetch_add)
-static inline s64
+static __always_inline s64
atomic64_fetch_add(s64 i, atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_fetch_add(i, v);
}
#define atomic64_fetch_add atomic64_fetch_add
#endif
#if defined(arch_atomic64_fetch_add_acquire)
-static inline s64
+static __always_inline s64
atomic64_fetch_add_acquire(s64 i, atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_fetch_add_acquire(i, v);
}
#define atomic64_fetch_add_acquire atomic64_fetch_add_acquire
#endif
#if defined(arch_atomic64_fetch_add_release)
-static inline s64
+static __always_inline s64
atomic64_fetch_add_release(s64 i, atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_fetch_add_release(i, v);
}
#define atomic64_fetch_add_release atomic64_fetch_add_release
#endif
#if defined(arch_atomic64_fetch_add_relaxed)
-static inline s64
+static __always_inline s64
atomic64_fetch_add_relaxed(s64 i, atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_fetch_add_relaxed(i, v);
}
#define atomic64_fetch_add_relaxed atomic64_fetch_add_relaxed
#endif
-static inline void
+static __always_inline void
atomic64_sub(s64 i, atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
arch_atomic64_sub(i, v);
}
#define atomic64_sub atomic64_sub
#if !defined(arch_atomic64_sub_return_relaxed) || defined(arch_atomic64_sub_return)
-static inline s64
+static __always_inline s64
atomic64_sub_return(s64 i, atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_sub_return(i, v);
}
#define atomic64_sub_return atomic64_sub_return
#endif
#if defined(arch_atomic64_sub_return_acquire)
-static inline s64
+static __always_inline s64
atomic64_sub_return_acquire(s64 i, atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_sub_return_acquire(i, v);
}
#define atomic64_sub_return_acquire atomic64_sub_return_acquire
#endif
#if defined(arch_atomic64_sub_return_release)
-static inline s64
+static __always_inline s64
atomic64_sub_return_release(s64 i, atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_sub_return_release(i, v);
}
#define atomic64_sub_return_release atomic64_sub_return_release
#endif
#if defined(arch_atomic64_sub_return_relaxed)
-static inline s64
+static __always_inline s64
atomic64_sub_return_relaxed(s64 i, atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_sub_return_relaxed(i, v);
}
#define atomic64_sub_return_relaxed atomic64_sub_return_relaxed
#endif
#if !defined(arch_atomic64_fetch_sub_relaxed) || defined(arch_atomic64_fetch_sub)
-static inline s64
+static __always_inline s64
atomic64_fetch_sub(s64 i, atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_fetch_sub(i, v);
}
#define atomic64_fetch_sub atomic64_fetch_sub
#endif
#if defined(arch_atomic64_fetch_sub_acquire)
-static inline s64
+static __always_inline s64
atomic64_fetch_sub_acquire(s64 i, atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_fetch_sub_acquire(i, v);
}
#define atomic64_fetch_sub_acquire atomic64_fetch_sub_acquire
#endif
#if defined(arch_atomic64_fetch_sub_release)
-static inline s64
+static __always_inline s64
atomic64_fetch_sub_release(s64 i, atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_fetch_sub_release(i, v);
}
#define atomic64_fetch_sub_release atomic64_fetch_sub_release
#endif
#if defined(arch_atomic64_fetch_sub_relaxed)
-static inline s64
+static __always_inline s64
atomic64_fetch_sub_relaxed(s64 i, atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_fetch_sub_relaxed(i, v);
}
#define atomic64_fetch_sub_relaxed atomic64_fetch_sub_relaxed
#endif
#if defined(arch_atomic64_inc)
-static inline void
+static __always_inline void
atomic64_inc(atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
arch_atomic64_inc(v);
}
#define atomic64_inc atomic64_inc
#endif
#if defined(arch_atomic64_inc_return)
-static inline s64
+static __always_inline s64
atomic64_inc_return(atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_inc_return(v);
}
#define atomic64_inc_return atomic64_inc_return
#endif
#if defined(arch_atomic64_inc_return_acquire)
-static inline s64
+static __always_inline s64
atomic64_inc_return_acquire(atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_inc_return_acquire(v);
}
#define atomic64_inc_return_acquire atomic64_inc_return_acquire
#endif
#if defined(arch_atomic64_inc_return_release)
-static inline s64
+static __always_inline s64
atomic64_inc_return_release(atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_inc_return_release(v);
}
#define atomic64_inc_return_release atomic64_inc_return_release
#endif
#if defined(arch_atomic64_inc_return_relaxed)
-static inline s64
+static __always_inline s64
atomic64_inc_return_relaxed(atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_inc_return_relaxed(v);
}
#define atomic64_inc_return_relaxed atomic64_inc_return_relaxed
#endif
#if defined(arch_atomic64_fetch_inc)
-static inline s64
+static __always_inline s64
atomic64_fetch_inc(atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_fetch_inc(v);
}
#define atomic64_fetch_inc atomic64_fetch_inc
#endif
#if defined(arch_atomic64_fetch_inc_acquire)
-static inline s64
+static __always_inline s64
atomic64_fetch_inc_acquire(atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_fetch_inc_acquire(v);
}
#define atomic64_fetch_inc_acquire atomic64_fetch_inc_acquire
#endif
#if defined(arch_atomic64_fetch_inc_release)
-static inline s64
+static __always_inline s64
atomic64_fetch_inc_release(atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_fetch_inc_release(v);
}
#define atomic64_fetch_inc_release atomic64_fetch_inc_release
#endif
#if defined(arch_atomic64_fetch_inc_relaxed)
-static inline s64
+static __always_inline s64
atomic64_fetch_inc_relaxed(atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_fetch_inc_relaxed(v);
}
#define atomic64_fetch_inc_relaxed atomic64_fetch_inc_relaxed
#endif
#if defined(arch_atomic64_dec)
-static inline void
+static __always_inline void
atomic64_dec(atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
arch_atomic64_dec(v);
}
#define atomic64_dec atomic64_dec
#endif
#if defined(arch_atomic64_dec_return)
-static inline s64
+static __always_inline s64
atomic64_dec_return(atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_dec_return(v);
}
#define atomic64_dec_return atomic64_dec_return
#endif
#if defined(arch_atomic64_dec_return_acquire)
-static inline s64
+static __always_inline s64
atomic64_dec_return_acquire(atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_dec_return_acquire(v);
}
#define atomic64_dec_return_acquire atomic64_dec_return_acquire
#endif
#if defined(arch_atomic64_dec_return_release)
-static inline s64
+static __always_inline s64
atomic64_dec_return_release(atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_dec_return_release(v);
}
#define atomic64_dec_return_release atomic64_dec_return_release
#endif
#if defined(arch_atomic64_dec_return_relaxed)
-static inline s64
+static __always_inline s64
atomic64_dec_return_relaxed(atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_dec_return_relaxed(v);
}
#define atomic64_dec_return_relaxed atomic64_dec_return_relaxed
#endif
#if defined(arch_atomic64_fetch_dec)
-static inline s64
+static __always_inline s64
atomic64_fetch_dec(atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_fetch_dec(v);
}
#define atomic64_fetch_dec atomic64_fetch_dec
#endif
#if defined(arch_atomic64_fetch_dec_acquire)
-static inline s64
+static __always_inline s64
atomic64_fetch_dec_acquire(atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_fetch_dec_acquire(v);
}
#define atomic64_fetch_dec_acquire atomic64_fetch_dec_acquire
#endif
#if defined(arch_atomic64_fetch_dec_release)
-static inline s64
+static __always_inline s64
atomic64_fetch_dec_release(atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_fetch_dec_release(v);
}
#define atomic64_fetch_dec_release atomic64_fetch_dec_release
#endif
#if defined(arch_atomic64_fetch_dec_relaxed)
-static inline s64
+static __always_inline s64
atomic64_fetch_dec_relaxed(atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_fetch_dec_relaxed(v);
}
#define atomic64_fetch_dec_relaxed atomic64_fetch_dec_relaxed
#endif
-static inline void
+static __always_inline void
atomic64_and(s64 i, atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
arch_atomic64_and(i, v);
}
#define atomic64_and atomic64_and
#if !defined(arch_atomic64_fetch_and_relaxed) || defined(arch_atomic64_fetch_and)
-static inline s64
+static __always_inline s64
atomic64_fetch_and(s64 i, atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_fetch_and(i, v);
}
#define atomic64_fetch_and atomic64_fetch_and
#endif
#if defined(arch_atomic64_fetch_and_acquire)
-static inline s64
+static __always_inline s64
atomic64_fetch_and_acquire(s64 i, atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_fetch_and_acquire(i, v);
}
#define atomic64_fetch_and_acquire atomic64_fetch_and_acquire
#endif
#if defined(arch_atomic64_fetch_and_release)
-static inline s64
+static __always_inline s64
atomic64_fetch_and_release(s64 i, atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_fetch_and_release(i, v);
}
#define atomic64_fetch_and_release atomic64_fetch_and_release
#endif
#if defined(arch_atomic64_fetch_and_relaxed)
-static inline s64
+static __always_inline s64
atomic64_fetch_and_relaxed(s64 i, atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_fetch_and_relaxed(i, v);
}
#define atomic64_fetch_and_relaxed atomic64_fetch_and_relaxed
#endif
#if defined(arch_atomic64_andnot)
-static inline void
+static __always_inline void
atomic64_andnot(s64 i, atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
arch_atomic64_andnot(i, v);
}
#define atomic64_andnot atomic64_andnot
#endif
#if defined(arch_atomic64_fetch_andnot)
-static inline s64
+static __always_inline s64
atomic64_fetch_andnot(s64 i, atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_fetch_andnot(i, v);
}
#define atomic64_fetch_andnot atomic64_fetch_andnot
#endif
#if defined(arch_atomic64_fetch_andnot_acquire)
-static inline s64
+static __always_inline s64
atomic64_fetch_andnot_acquire(s64 i, atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_fetch_andnot_acquire(i, v);
}
#define atomic64_fetch_andnot_acquire atomic64_fetch_andnot_acquire
#endif
#if defined(arch_atomic64_fetch_andnot_release)
-static inline s64
+static __always_inline s64
atomic64_fetch_andnot_release(s64 i, atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_fetch_andnot_release(i, v);
}
#define atomic64_fetch_andnot_release atomic64_fetch_andnot_release
#endif
#if defined(arch_atomic64_fetch_andnot_relaxed)
-static inline s64
+static __always_inline s64
atomic64_fetch_andnot_relaxed(s64 i, atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_fetch_andnot_relaxed(i, v);
}
#define atomic64_fetch_andnot_relaxed atomic64_fetch_andnot_relaxed
#endif
-static inline void
+static __always_inline void
atomic64_or(s64 i, atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
arch_atomic64_or(i, v);
}
#define atomic64_or atomic64_or
#if !defined(arch_atomic64_fetch_or_relaxed) || defined(arch_atomic64_fetch_or)
-static inline s64
+static __always_inline s64
atomic64_fetch_or(s64 i, atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_fetch_or(i, v);
}
#define atomic64_fetch_or atomic64_fetch_or
#endif
#if defined(arch_atomic64_fetch_or_acquire)
-static inline s64
+static __always_inline s64
atomic64_fetch_or_acquire(s64 i, atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_fetch_or_acquire(i, v);
}
#define atomic64_fetch_or_acquire atomic64_fetch_or_acquire
#endif
#if defined(arch_atomic64_fetch_or_release)
-static inline s64
+static __always_inline s64
atomic64_fetch_or_release(s64 i, atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_fetch_or_release(i, v);
}
#define atomic64_fetch_or_release atomic64_fetch_or_release
#endif
#if defined(arch_atomic64_fetch_or_relaxed)
-static inline s64
+static __always_inline s64
atomic64_fetch_or_relaxed(s64 i, atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_fetch_or_relaxed(i, v);
}
#define atomic64_fetch_or_relaxed atomic64_fetch_or_relaxed
#endif
-static inline void
+static __always_inline void
atomic64_xor(s64 i, atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
arch_atomic64_xor(i, v);
}
#define atomic64_xor atomic64_xor
#if !defined(arch_atomic64_fetch_xor_relaxed) || defined(arch_atomic64_fetch_xor)
-static inline s64
+static __always_inline s64
atomic64_fetch_xor(s64 i, atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_fetch_xor(i, v);
}
#define atomic64_fetch_xor atomic64_fetch_xor
#endif
#if defined(arch_atomic64_fetch_xor_acquire)
-static inline s64
+static __always_inline s64
atomic64_fetch_xor_acquire(s64 i, atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_fetch_xor_acquire(i, v);
}
#define atomic64_fetch_xor_acquire atomic64_fetch_xor_acquire
#endif
#if defined(arch_atomic64_fetch_xor_release)
-static inline s64
+static __always_inline s64
atomic64_fetch_xor_release(s64 i, atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_fetch_xor_release(i, v);
}
#define atomic64_fetch_xor_release atomic64_fetch_xor_release
#endif
#if defined(arch_atomic64_fetch_xor_relaxed)
-static inline s64
+static __always_inline s64
atomic64_fetch_xor_relaxed(s64 i, atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_fetch_xor_relaxed(i, v);
}
#define atomic64_fetch_xor_relaxed atomic64_fetch_xor_relaxed
#endif
#if !defined(arch_atomic64_xchg_relaxed) || defined(arch_atomic64_xchg)
-static inline s64
+static __always_inline s64
atomic64_xchg(atomic64_t *v, s64 i)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_xchg(v, i);
}
#define atomic64_xchg atomic64_xchg
#endif
#if defined(arch_atomic64_xchg_acquire)
-static inline s64
+static __always_inline s64
atomic64_xchg_acquire(atomic64_t *v, s64 i)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_xchg_acquire(v, i);
}
#define atomic64_xchg_acquire atomic64_xchg_acquire
#endif
#if defined(arch_atomic64_xchg_release)
-static inline s64
+static __always_inline s64
atomic64_xchg_release(atomic64_t *v, s64 i)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_xchg_release(v, i);
}
#define atomic64_xchg_release atomic64_xchg_release
#endif
#if defined(arch_atomic64_xchg_relaxed)
-static inline s64
+static __always_inline s64
atomic64_xchg_relaxed(atomic64_t *v, s64 i)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_xchg_relaxed(v, i);
}
#define atomic64_xchg_relaxed atomic64_xchg_relaxed
#endif
#if !defined(arch_atomic64_cmpxchg_relaxed) || defined(arch_atomic64_cmpxchg)
-static inline s64
+static __always_inline s64
atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_cmpxchg(v, old, new);
}
#define atomic64_cmpxchg atomic64_cmpxchg
#endif
#if defined(arch_atomic64_cmpxchg_acquire)
-static inline s64
+static __always_inline s64
atomic64_cmpxchg_acquire(atomic64_t *v, s64 old, s64 new)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_cmpxchg_acquire(v, old, new);
}
#define atomic64_cmpxchg_acquire atomic64_cmpxchg_acquire
#endif
#if defined(arch_atomic64_cmpxchg_release)
-static inline s64
+static __always_inline s64
atomic64_cmpxchg_release(atomic64_t *v, s64 old, s64 new)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_cmpxchg_release(v, old, new);
}
#define atomic64_cmpxchg_release atomic64_cmpxchg_release
#endif
#if defined(arch_atomic64_cmpxchg_relaxed)
-static inline s64
+static __always_inline s64
atomic64_cmpxchg_relaxed(atomic64_t *v, s64 old, s64 new)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_cmpxchg_relaxed(v, old, new);
}
#define atomic64_cmpxchg_relaxed atomic64_cmpxchg_relaxed
#endif
#if defined(arch_atomic64_try_cmpxchg)
-static inline bool
+static __always_inline bool
atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new)
{
- kasan_check_write(v, sizeof(*v));
- kasan_check_write(old, sizeof(*old));
+ instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_write(old, sizeof(*old));
return arch_atomic64_try_cmpxchg(v, old, new);
}
#define atomic64_try_cmpxchg atomic64_try_cmpxchg
#endif
#if defined(arch_atomic64_try_cmpxchg_acquire)
-static inline bool
+static __always_inline bool
atomic64_try_cmpxchg_acquire(atomic64_t *v, s64 *old, s64 new)
{
- kasan_check_write(v, sizeof(*v));
- kasan_check_write(old, sizeof(*old));
+ instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_write(old, sizeof(*old));
return arch_atomic64_try_cmpxchg_acquire(v, old, new);
}
#define atomic64_try_cmpxchg_acquire atomic64_try_cmpxchg_acquire
#endif
#if defined(arch_atomic64_try_cmpxchg_release)
-static inline bool
+static __always_inline bool
atomic64_try_cmpxchg_release(atomic64_t *v, s64 *old, s64 new)
{
- kasan_check_write(v, sizeof(*v));
- kasan_check_write(old, sizeof(*old));
+ instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_write(old, sizeof(*old));
return arch_atomic64_try_cmpxchg_release(v, old, new);
}
#define atomic64_try_cmpxchg_release atomic64_try_cmpxchg_release
#endif
#if defined(arch_atomic64_try_cmpxchg_relaxed)
-static inline bool
+static __always_inline bool
atomic64_try_cmpxchg_relaxed(atomic64_t *v, s64 *old, s64 new)
{
- kasan_check_write(v, sizeof(*v));
- kasan_check_write(old, sizeof(*old));
+ instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_write(old, sizeof(*old));
return arch_atomic64_try_cmpxchg_relaxed(v, old, new);
}
#define atomic64_try_cmpxchg_relaxed atomic64_try_cmpxchg_relaxed
#endif
#if defined(arch_atomic64_sub_and_test)
-static inline bool
+static __always_inline bool
atomic64_sub_and_test(s64 i, atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_sub_and_test(i, v);
}
#define atomic64_sub_and_test atomic64_sub_and_test
#endif
#if defined(arch_atomic64_dec_and_test)
-static inline bool
+static __always_inline bool
atomic64_dec_and_test(atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_dec_and_test(v);
}
#define atomic64_dec_and_test atomic64_dec_and_test
#endif
#if defined(arch_atomic64_inc_and_test)
-static inline bool
+static __always_inline bool
atomic64_inc_and_test(atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_inc_and_test(v);
}
#define atomic64_inc_and_test atomic64_inc_and_test
#endif
#if defined(arch_atomic64_add_negative)
-static inline bool
+static __always_inline bool
atomic64_add_negative(s64 i, atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_add_negative(i, v);
}
#define atomic64_add_negative atomic64_add_negative
#endif
#if defined(arch_atomic64_fetch_add_unless)
-static inline s64
+static __always_inline s64
atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_fetch_add_unless(v, a, u);
}
#define atomic64_fetch_add_unless atomic64_fetch_add_unless
#endif
#if defined(arch_atomic64_add_unless)
-static inline bool
+static __always_inline bool
atomic64_add_unless(atomic64_t *v, s64 a, s64 u)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_add_unless(v, a, u);
}
#define atomic64_add_unless atomic64_add_unless
#endif
#if defined(arch_atomic64_inc_not_zero)
-static inline bool
+static __always_inline bool
atomic64_inc_not_zero(atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_inc_not_zero(v);
}
#define atomic64_inc_not_zero atomic64_inc_not_zero
#endif
#if defined(arch_atomic64_inc_unless_negative)
-static inline bool
+static __always_inline bool
atomic64_inc_unless_negative(atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_inc_unless_negative(v);
}
#define atomic64_inc_unless_negative atomic64_inc_unless_negative
#endif
#if defined(arch_atomic64_dec_unless_positive)
-static inline bool
+static __always_inline bool
atomic64_dec_unless_positive(atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_dec_unless_positive(v);
}
#define atomic64_dec_unless_positive atomic64_dec_unless_positive
#endif
#if defined(arch_atomic64_dec_if_positive)
-static inline s64
+static __always_inline s64
atomic64_dec_if_positive(atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_dec_if_positive(v);
}
#define atomic64_dec_if_positive atomic64_dec_if_positive
@@ -1644,7 +1645,7 @@ atomic64_dec_if_positive(atomic64_t *v)
#define xchg(ptr, ...) \
({ \
typeof(ptr) __ai_ptr = (ptr); \
- kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \
arch_xchg(__ai_ptr, __VA_ARGS__); \
})
#endif
@@ -1653,7 +1654,7 @@ atomic64_dec_if_positive(atomic64_t *v)
#define xchg_acquire(ptr, ...) \
({ \
typeof(ptr) __ai_ptr = (ptr); \
- kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \
arch_xchg_acquire(__ai_ptr, __VA_ARGS__); \
})
#endif
@@ -1662,7 +1663,7 @@ atomic64_dec_if_positive(atomic64_t *v)
#define xchg_release(ptr, ...) \
({ \
typeof(ptr) __ai_ptr = (ptr); \
- kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \
arch_xchg_release(__ai_ptr, __VA_ARGS__); \
})
#endif
@@ -1671,7 +1672,7 @@ atomic64_dec_if_positive(atomic64_t *v)
#define xchg_relaxed(ptr, ...) \
({ \
typeof(ptr) __ai_ptr = (ptr); \
- kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \
arch_xchg_relaxed(__ai_ptr, __VA_ARGS__); \
})
#endif
@@ -1680,7 +1681,7 @@ atomic64_dec_if_positive(atomic64_t *v)
#define cmpxchg(ptr, ...) \
({ \
typeof(ptr) __ai_ptr = (ptr); \
- kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \
arch_cmpxchg(__ai_ptr, __VA_ARGS__); \
})
#endif
@@ -1689,7 +1690,7 @@ atomic64_dec_if_positive(atomic64_t *v)
#define cmpxchg_acquire(ptr, ...) \
({ \
typeof(ptr) __ai_ptr = (ptr); \
- kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \
arch_cmpxchg_acquire(__ai_ptr, __VA_ARGS__); \
})
#endif
@@ -1698,7 +1699,7 @@ atomic64_dec_if_positive(atomic64_t *v)
#define cmpxchg_release(ptr, ...) \
({ \
typeof(ptr) __ai_ptr = (ptr); \
- kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \
arch_cmpxchg_release(__ai_ptr, __VA_ARGS__); \
})
#endif
@@ -1707,7 +1708,7 @@ atomic64_dec_if_positive(atomic64_t *v)
#define cmpxchg_relaxed(ptr, ...) \
({ \
typeof(ptr) __ai_ptr = (ptr); \
- kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \
arch_cmpxchg_relaxed(__ai_ptr, __VA_ARGS__); \
})
#endif
@@ -1716,7 +1717,7 @@ atomic64_dec_if_positive(atomic64_t *v)
#define cmpxchg64(ptr, ...) \
({ \
typeof(ptr) __ai_ptr = (ptr); \
- kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \
arch_cmpxchg64(__ai_ptr, __VA_ARGS__); \
})
#endif
@@ -1725,7 +1726,7 @@ atomic64_dec_if_positive(atomic64_t *v)
#define cmpxchg64_acquire(ptr, ...) \
({ \
typeof(ptr) __ai_ptr = (ptr); \
- kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \
arch_cmpxchg64_acquire(__ai_ptr, __VA_ARGS__); \
})
#endif
@@ -1734,7 +1735,7 @@ atomic64_dec_if_positive(atomic64_t *v)
#define cmpxchg64_release(ptr, ...) \
({ \
typeof(ptr) __ai_ptr = (ptr); \
- kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \
arch_cmpxchg64_release(__ai_ptr, __VA_ARGS__); \
})
#endif
@@ -1743,7 +1744,7 @@ atomic64_dec_if_positive(atomic64_t *v)
#define cmpxchg64_relaxed(ptr, ...) \
({ \
typeof(ptr) __ai_ptr = (ptr); \
- kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \
arch_cmpxchg64_relaxed(__ai_ptr, __VA_ARGS__); \
})
#endif
@@ -1751,28 +1752,28 @@ atomic64_dec_if_positive(atomic64_t *v)
#define cmpxchg_local(ptr, ...) \
({ \
typeof(ptr) __ai_ptr = (ptr); \
- kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \
arch_cmpxchg_local(__ai_ptr, __VA_ARGS__); \
})
#define cmpxchg64_local(ptr, ...) \
({ \
typeof(ptr) __ai_ptr = (ptr); \
- kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \
arch_cmpxchg64_local(__ai_ptr, __VA_ARGS__); \
})
#define sync_cmpxchg(ptr, ...) \
({ \
typeof(ptr) __ai_ptr = (ptr); \
- kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \
arch_sync_cmpxchg(__ai_ptr, __VA_ARGS__); \
})
#define cmpxchg_double(ptr, ...) \
({ \
typeof(ptr) __ai_ptr = (ptr); \
- kasan_check_write(__ai_ptr, 2 * sizeof(*__ai_ptr)); \
+ instrument_atomic_write(__ai_ptr, 2 * sizeof(*__ai_ptr)); \
arch_cmpxchg_double(__ai_ptr, __VA_ARGS__); \
})
@@ -1780,9 +1781,9 @@ atomic64_dec_if_positive(atomic64_t *v)
#define cmpxchg_double_local(ptr, ...) \
({ \
typeof(ptr) __ai_ptr = (ptr); \
- kasan_check_write(__ai_ptr, 2 * sizeof(*__ai_ptr)); \
+ instrument_atomic_write(__ai_ptr, 2 * sizeof(*__ai_ptr)); \
arch_cmpxchg_double_local(__ai_ptr, __VA_ARGS__); \
})
#endif /* _ASM_GENERIC_ATOMIC_INSTRUMENTED_H */
-// b29b625d5de9280f680e42c7be859b55b15e5f6a
+// 89bf97f3a7509b740845e51ddf31055b48a81f40
diff --git a/include/asm-generic/atomic-long.h b/include/asm-generic/atomic-long.h
index 881c7e27af28..073cf40f431b 100644
--- a/include/asm-generic/atomic-long.h
+++ b/include/asm-generic/atomic-long.h
@@ -6,6 +6,7 @@
#ifndef _ASM_GENERIC_ATOMIC_LONG_H
#define _ASM_GENERIC_ATOMIC_LONG_H
+#include <linux/compiler.h>
#include <asm/types.h>
#ifdef CONFIG_64BIT
@@ -22,493 +23,493 @@ typedef atomic_t atomic_long_t;
#ifdef CONFIG_64BIT
-static inline long
+static __always_inline long
atomic_long_read(const atomic_long_t *v)
{
return atomic64_read(v);
}
-static inline long
+static __always_inline long
atomic_long_read_acquire(const atomic_long_t *v)
{
return atomic64_read_acquire(v);
}
-static inline void
+static __always_inline void
atomic_long_set(atomic_long_t *v, long i)
{
atomic64_set(v, i);
}
-static inline void
+static __always_inline void
atomic_long_set_release(atomic_long_t *v, long i)
{
atomic64_set_release(v, i);
}
-static inline void
+static __always_inline void
atomic_long_add(long i, atomic_long_t *v)
{
atomic64_add(i, v);
}
-static inline long
+static __always_inline long
atomic_long_add_return(long i, atomic_long_t *v)
{
return atomic64_add_return(i, v);
}
-static inline long
+static __always_inline long
atomic_long_add_return_acquire(long i, atomic_long_t *v)
{
return atomic64_add_return_acquire(i, v);
}
-static inline long
+static __always_inline long
atomic_long_add_return_release(long i, atomic_long_t *v)
{
return atomic64_add_return_release(i, v);
}
-static inline long
+static __always_inline long
atomic_long_add_return_relaxed(long i, atomic_long_t *v)
{
return atomic64_add_return_relaxed(i, v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_add(long i, atomic_long_t *v)
{
return atomic64_fetch_add(i, v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_add_acquire(long i, atomic_long_t *v)
{
return atomic64_fetch_add_acquire(i, v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_add_release(long i, atomic_long_t *v)
{
return atomic64_fetch_add_release(i, v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_add_relaxed(long i, atomic_long_t *v)
{
return atomic64_fetch_add_relaxed(i, v);
}
-static inline void
+static __always_inline void
atomic_long_sub(long i, atomic_long_t *v)
{
atomic64_sub(i, v);
}
-static inline long
+static __always_inline long
atomic_long_sub_return(long i, atomic_long_t *v)
{
return atomic64_sub_return(i, v);
}
-static inline long
+static __always_inline long
atomic_long_sub_return_acquire(long i, atomic_long_t *v)
{
return atomic64_sub_return_acquire(i, v);
}
-static inline long
+static __always_inline long
atomic_long_sub_return_release(long i, atomic_long_t *v)
{
return atomic64_sub_return_release(i, v);
}
-static inline long
+static __always_inline long
atomic_long_sub_return_relaxed(long i, atomic_long_t *v)
{
return atomic64_sub_return_relaxed(i, v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_sub(long i, atomic_long_t *v)
{
return atomic64_fetch_sub(i, v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_sub_acquire(long i, atomic_long_t *v)
{
return atomic64_fetch_sub_acquire(i, v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_sub_release(long i, atomic_long_t *v)
{
return atomic64_fetch_sub_release(i, v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_sub_relaxed(long i, atomic_long_t *v)
{
return atomic64_fetch_sub_relaxed(i, v);
}
-static inline void
+static __always_inline void
atomic_long_inc(atomic_long_t *v)
{
atomic64_inc(v);
}
-static inline long
+static __always_inline long
atomic_long_inc_return(atomic_long_t *v)
{
return atomic64_inc_return(v);
}
-static inline long
+static __always_inline long
atomic_long_inc_return_acquire(atomic_long_t *v)
{
return atomic64_inc_return_acquire(v);
}
-static inline long
+static __always_inline long
atomic_long_inc_return_release(atomic_long_t *v)
{
return atomic64_inc_return_release(v);
}
-static inline long
+static __always_inline long
atomic_long_inc_return_relaxed(atomic_long_t *v)
{
return atomic64_inc_return_relaxed(v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_inc(atomic_long_t *v)
{
return atomic64_fetch_inc(v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_inc_acquire(atomic_long_t *v)
{
return atomic64_fetch_inc_acquire(v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_inc_release(atomic_long_t *v)
{
return atomic64_fetch_inc_release(v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_inc_relaxed(atomic_long_t *v)
{
return atomic64_fetch_inc_relaxed(v);
}
-static inline void
+static __always_inline void
atomic_long_dec(atomic_long_t *v)
{
atomic64_dec(v);
}
-static inline long
+static __always_inline long
atomic_long_dec_return(atomic_long_t *v)
{
return atomic64_dec_return(v);
}
-static inline long
+static __always_inline long
atomic_long_dec_return_acquire(atomic_long_t *v)
{
return atomic64_dec_return_acquire(v);
}
-static inline long
+static __always_inline long
atomic_long_dec_return_release(atomic_long_t *v)
{
return atomic64_dec_return_release(v);
}
-static inline long
+static __always_inline long
atomic_long_dec_return_relaxed(atomic_long_t *v)
{
return atomic64_dec_return_relaxed(v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_dec(atomic_long_t *v)
{
return atomic64_fetch_dec(v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_dec_acquire(atomic_long_t *v)
{
return atomic64_fetch_dec_acquire(v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_dec_release(atomic_long_t *v)
{
return atomic64_fetch_dec_release(v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_dec_relaxed(atomic_long_t *v)
{
return atomic64_fetch_dec_relaxed(v);
}
-static inline void
+static __always_inline void
atomic_long_and(long i, atomic_long_t *v)
{
atomic64_and(i, v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_and(long i, atomic_long_t *v)
{
return atomic64_fetch_and(i, v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_and_acquire(long i, atomic_long_t *v)
{
return atomic64_fetch_and_acquire(i, v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_and_release(long i, atomic_long_t *v)
{
return atomic64_fetch_and_release(i, v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_and_relaxed(long i, atomic_long_t *v)
{
return atomic64_fetch_and_relaxed(i, v);
}
-static inline void
+static __always_inline void
atomic_long_andnot(long i, atomic_long_t *v)
{
atomic64_andnot(i, v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_andnot(long i, atomic_long_t *v)
{
return atomic64_fetch_andnot(i, v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_andnot_acquire(long i, atomic_long_t *v)
{
return atomic64_fetch_andnot_acquire(i, v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_andnot_release(long i, atomic_long_t *v)
{
return atomic64_fetch_andnot_release(i, v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_andnot_relaxed(long i, atomic_long_t *v)
{
return atomic64_fetch_andnot_relaxed(i, v);
}
-static inline void
+static __always_inline void
atomic_long_or(long i, atomic_long_t *v)
{
atomic64_or(i, v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_or(long i, atomic_long_t *v)
{
return atomic64_fetch_or(i, v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_or_acquire(long i, atomic_long_t *v)
{
return atomic64_fetch_or_acquire(i, v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_or_release(long i, atomic_long_t *v)
{
return atomic64_fetch_or_release(i, v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_or_relaxed(long i, atomic_long_t *v)
{
return atomic64_fetch_or_relaxed(i, v);
}
-static inline void
+static __always_inline void
atomic_long_xor(long i, atomic_long_t *v)
{
atomic64_xor(i, v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_xor(long i, atomic_long_t *v)
{
return atomic64_fetch_xor(i, v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_xor_acquire(long i, atomic_long_t *v)
{
return atomic64_fetch_xor_acquire(i, v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_xor_release(long i, atomic_long_t *v)
{
return atomic64_fetch_xor_release(i, v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_xor_relaxed(long i, atomic_long_t *v)
{
return atomic64_fetch_xor_relaxed(i, v);
}
-static inline long
+static __always_inline long
atomic_long_xchg(atomic_long_t *v, long i)
{
return atomic64_xchg(v, i);
}
-static inline long
+static __always_inline long
atomic_long_xchg_acquire(atomic_long_t *v, long i)
{
return atomic64_xchg_acquire(v, i);
}
-static inline long
+static __always_inline long
atomic_long_xchg_release(atomic_long_t *v, long i)
{
return atomic64_xchg_release(v, i);
}
-static inline long
+static __always_inline long
atomic_long_xchg_relaxed(atomic_long_t *v, long i)
{
return atomic64_xchg_relaxed(v, i);
}
-static inline long
+static __always_inline long
atomic_long_cmpxchg(atomic_long_t *v, long old, long new)
{
return atomic64_cmpxchg(v, old, new);
}
-static inline long
+static __always_inline long
atomic_long_cmpxchg_acquire(atomic_long_t *v, long old, long new)
{
return atomic64_cmpxchg_acquire(v, old, new);
}
-static inline long
+static __always_inline long
atomic_long_cmpxchg_release(atomic_long_t *v, long old, long new)
{
return atomic64_cmpxchg_release(v, old, new);
}
-static inline long
+static __always_inline long
atomic_long_cmpxchg_relaxed(atomic_long_t *v, long old, long new)
{
return atomic64_cmpxchg_relaxed(v, old, new);
}
-static inline bool
+static __always_inline bool
atomic_long_try_cmpxchg(atomic_long_t *v, long *old, long new)
{
return atomic64_try_cmpxchg(v, (s64 *)old, new);
}
-static inline bool
+static __always_inline bool
atomic_long_try_cmpxchg_acquire(atomic_long_t *v, long *old, long new)
{
return atomic64_try_cmpxchg_acquire(v, (s64 *)old, new);
}
-static inline bool
+static __always_inline bool
atomic_long_try_cmpxchg_release(atomic_long_t *v, long *old, long new)
{
return atomic64_try_cmpxchg_release(v, (s64 *)old, new);
}
-static inline bool
+static __always_inline bool
atomic_long_try_cmpxchg_relaxed(atomic_long_t *v, long *old, long new)
{
return atomic64_try_cmpxchg_relaxed(v, (s64 *)old, new);
}
-static inline bool
+static __always_inline bool
atomic_long_sub_and_test(long i, atomic_long_t *v)
{
return atomic64_sub_and_test(i, v);
}
-static inline bool
+static __always_inline bool
atomic_long_dec_and_test(atomic_long_t *v)
{
return atomic64_dec_and_test(v);
}
-static inline bool
+static __always_inline bool
atomic_long_inc_and_test(atomic_long_t *v)
{
return atomic64_inc_and_test(v);
}
-static inline bool
+static __always_inline bool
atomic_long_add_negative(long i, atomic_long_t *v)
{
return atomic64_add_negative(i, v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_add_unless(atomic_long_t *v, long a, long u)
{
return atomic64_fetch_add_unless(v, a, u);
}
-static inline bool
+static __always_inline bool
atomic_long_add_unless(atomic_long_t *v, long a, long u)
{
return atomic64_add_unless(v, a, u);
}
-static inline bool
+static __always_inline bool
atomic_long_inc_not_zero(atomic_long_t *v)
{
return atomic64_inc_not_zero(v);
}
-static inline bool
+static __always_inline bool
atomic_long_inc_unless_negative(atomic_long_t *v)
{
return atomic64_inc_unless_negative(v);
}
-static inline bool
+static __always_inline bool
atomic_long_dec_unless_positive(atomic_long_t *v)
{
return atomic64_dec_unless_positive(v);
}
-static inline long
+static __always_inline long
atomic_long_dec_if_positive(atomic_long_t *v)
{
return atomic64_dec_if_positive(v);
@@ -516,493 +517,493 @@ atomic_long_dec_if_positive(atomic_long_t *v)
#else /* CONFIG_64BIT */
-static inline long
+static __always_inline long
atomic_long_read(const atomic_long_t *v)
{
return atomic_read(v);
}
-static inline long
+static __always_inline long
atomic_long_read_acquire(const atomic_long_t *v)
{
return atomic_read_acquire(v);
}
-static inline void
+static __always_inline void
atomic_long_set(atomic_long_t *v, long i)
{
atomic_set(v, i);
}
-static inline void
+static __always_inline void
atomic_long_set_release(atomic_long_t *v, long i)
{
atomic_set_release(v, i);
}
-static inline void
+static __always_inline void
atomic_long_add(long i, atomic_long_t *v)
{
atomic_add(i, v);
}
-static inline long
+static __always_inline long
atomic_long_add_return(long i, atomic_long_t *v)
{
return atomic_add_return(i, v);
}
-static inline long
+static __always_inline long
atomic_long_add_return_acquire(long i, atomic_long_t *v)
{
return atomic_add_return_acquire(i, v);
}
-static inline long
+static __always_inline long
atomic_long_add_return_release(long i, atomic_long_t *v)
{
return atomic_add_return_release(i, v);
}
-static inline long
+static __always_inline long
atomic_long_add_return_relaxed(long i, atomic_long_t *v)
{
return atomic_add_return_relaxed(i, v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_add(long i, atomic_long_t *v)
{
return atomic_fetch_add(i, v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_add_acquire(long i, atomic_long_t *v)
{
return atomic_fetch_add_acquire(i, v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_add_release(long i, atomic_long_t *v)
{
return atomic_fetch_add_release(i, v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_add_relaxed(long i, atomic_long_t *v)
{
return atomic_fetch_add_relaxed(i, v);
}
-static inline void
+static __always_inline void
atomic_long_sub(long i, atomic_long_t *v)
{
atomic_sub(i, v);
}
-static inline long
+static __always_inline long
atomic_long_sub_return(long i, atomic_long_t *v)
{
return atomic_sub_return(i, v);
}
-static inline long
+static __always_inline long
atomic_long_sub_return_acquire(long i, atomic_long_t *v)
{
return atomic_sub_return_acquire(i, v);
}
-static inline long
+static __always_inline long
atomic_long_sub_return_release(long i, atomic_long_t *v)
{
return atomic_sub_return_release(i, v);
}
-static inline long
+static __always_inline long
atomic_long_sub_return_relaxed(long i, atomic_long_t *v)
{
return atomic_sub_return_relaxed(i, v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_sub(long i, atomic_long_t *v)
{
return atomic_fetch_sub(i, v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_sub_acquire(long i, atomic_long_t *v)
{
return atomic_fetch_sub_acquire(i, v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_sub_release(long i, atomic_long_t *v)
{
return atomic_fetch_sub_release(i, v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_sub_relaxed(long i, atomic_long_t *v)
{
return atomic_fetch_sub_relaxed(i, v);
}
-static inline void
+static __always_inline void
atomic_long_inc(atomic_long_t *v)
{
atomic_inc(v);
}
-static inline long
+static __always_inline long
atomic_long_inc_return(atomic_long_t *v)
{
return atomic_inc_return(v);
}
-static inline long
+static __always_inline long
atomic_long_inc_return_acquire(atomic_long_t *v)
{
return atomic_inc_return_acquire(v);
}
-static inline long
+static __always_inline long
atomic_long_inc_return_release(atomic_long_t *v)
{
return atomic_inc_return_release(v);
}
-static inline long
+static __always_inline long
atomic_long_inc_return_relaxed(atomic_long_t *v)
{
return atomic_inc_return_relaxed(v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_inc(atomic_long_t *v)
{
return atomic_fetch_inc(v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_inc_acquire(atomic_long_t *v)
{
return atomic_fetch_inc_acquire(v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_inc_release(atomic_long_t *v)
{
return atomic_fetch_inc_release(v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_inc_relaxed(atomic_long_t *v)
{
return atomic_fetch_inc_relaxed(v);
}
-static inline void
+static __always_inline void
atomic_long_dec(atomic_long_t *v)
{
atomic_dec(v);
}
-static inline long
+static __always_inline long
atomic_long_dec_return(atomic_long_t *v)
{
return atomic_dec_return(v);
}
-static inline long
+static __always_inline long
atomic_long_dec_return_acquire(atomic_long_t *v)
{
return atomic_dec_return_acquire(v);
}
-static inline long
+static __always_inline long
atomic_long_dec_return_release(atomic_long_t *v)
{
return atomic_dec_return_release(v);
}
-static inline long
+static __always_inline long
atomic_long_dec_return_relaxed(atomic_long_t *v)
{
return atomic_dec_return_relaxed(v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_dec(atomic_long_t *v)
{
return atomic_fetch_dec(v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_dec_acquire(atomic_long_t *v)
{
return atomic_fetch_dec_acquire(v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_dec_release(atomic_long_t *v)
{
return atomic_fetch_dec_release(v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_dec_relaxed(atomic_long_t *v)
{
return atomic_fetch_dec_relaxed(v);
}
-static inline void
+static __always_inline void
atomic_long_and(long i, atomic_long_t *v)
{
atomic_and(i, v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_and(long i, atomic_long_t *v)
{
return atomic_fetch_and(i, v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_and_acquire(long i, atomic_long_t *v)
{
return atomic_fetch_and_acquire(i, v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_and_release(long i, atomic_long_t *v)
{
return atomic_fetch_and_release(i, v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_and_relaxed(long i, atomic_long_t *v)
{
return atomic_fetch_and_relaxed(i, v);
}
-static inline void
+static __always_inline void
atomic_long_andnot(long i, atomic_long_t *v)
{
atomic_andnot(i, v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_andnot(long i, atomic_long_t *v)
{
return atomic_fetch_andnot(i, v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_andnot_acquire(long i, atomic_long_t *v)
{
return atomic_fetch_andnot_acquire(i, v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_andnot_release(long i, atomic_long_t *v)
{
return atomic_fetch_andnot_release(i, v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_andnot_relaxed(long i, atomic_long_t *v)
{
return atomic_fetch_andnot_relaxed(i, v);
}
-static inline void
+static __always_inline void
atomic_long_or(long i, atomic_long_t *v)
{
atomic_or(i, v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_or(long i, atomic_long_t *v)
{
return atomic_fetch_or(i, v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_or_acquire(long i, atomic_long_t *v)
{
return atomic_fetch_or_acquire(i, v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_or_release(long i, atomic_long_t *v)
{
return atomic_fetch_or_release(i, v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_or_relaxed(long i, atomic_long_t *v)
{
return atomic_fetch_or_relaxed(i, v);
}
-static inline void
+static __always_inline void
atomic_long_xor(long i, atomic_long_t *v)
{
atomic_xor(i, v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_xor(long i, atomic_long_t *v)
{
return atomic_fetch_xor(i, v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_xor_acquire(long i, atomic_long_t *v)
{
return atomic_fetch_xor_acquire(i, v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_xor_release(long i, atomic_long_t *v)
{
return atomic_fetch_xor_release(i, v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_xor_relaxed(long i, atomic_long_t *v)
{
return atomic_fetch_xor_relaxed(i, v);
}
-static inline long
+static __always_inline long
atomic_long_xchg(atomic_long_t *v, long i)
{
return atomic_xchg(v, i);
}
-static inline long
+static __always_inline long
atomic_long_xchg_acquire(atomic_long_t *v, long i)
{
return atomic_xchg_acquire(v, i);
}
-static inline long
+static __always_inline long
atomic_long_xchg_release(atomic_long_t *v, long i)
{
return atomic_xchg_release(v, i);
}
-static inline long
+static __always_inline long
atomic_long_xchg_relaxed(atomic_long_t *v, long i)
{
return atomic_xchg_relaxed(v, i);
}
-static inline long
+static __always_inline long
atomic_long_cmpxchg(atomic_long_t *v, long old, long new)
{
return atomic_cmpxchg(v, old, new);
}
-static inline long
+static __always_inline long
atomic_long_cmpxchg_acquire(atomic_long_t *v, long old, long new)
{
return atomic_cmpxchg_acquire(v, old, new);
}
-static inline long
+static __always_inline long
atomic_long_cmpxchg_release(atomic_long_t *v, long old, long new)
{
return atomic_cmpxchg_release(v, old, new);
}
-static inline long
+static __always_inline long
atomic_long_cmpxchg_relaxed(atomic_long_t *v, long old, long new)
{
return atomic_cmpxchg_relaxed(v, old, new);
}
-static inline bool
+static __always_inline bool
atomic_long_try_cmpxchg(atomic_long_t *v, long *old, long new)
{
return atomic_try_cmpxchg(v, (int *)old, new);
}
-static inline bool
+static __always_inline bool
atomic_long_try_cmpxchg_acquire(atomic_long_t *v, long *old, long new)
{
return atomic_try_cmpxchg_acquire(v, (int *)old, new);
}
-static inline bool
+static __always_inline bool
atomic_long_try_cmpxchg_release(atomic_long_t *v, long *old, long new)
{
return atomic_try_cmpxchg_release(v, (int *)old, new);
}
-static inline bool
+static __always_inline bool
atomic_long_try_cmpxchg_relaxed(atomic_long_t *v, long *old, long new)
{
return atomic_try_cmpxchg_relaxed(v, (int *)old, new);
}
-static inline bool
+static __always_inline bool
atomic_long_sub_and_test(long i, atomic_long_t *v)
{
return atomic_sub_and_test(i, v);
}
-static inline bool
+static __always_inline bool
atomic_long_dec_and_test(atomic_long_t *v)
{
return atomic_dec_and_test(v);
}
-static inline bool
+static __always_inline bool
atomic_long_inc_and_test(atomic_long_t *v)
{
return atomic_inc_and_test(v);
}
-static inline bool
+static __always_inline bool
atomic_long_add_negative(long i, atomic_long_t *v)
{
return atomic_add_negative(i, v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_add_unless(atomic_long_t *v, long a, long u)
{
return atomic_fetch_add_unless(v, a, u);
}
-static inline bool
+static __always_inline bool
atomic_long_add_unless(atomic_long_t *v, long a, long u)
{
return atomic_add_unless(v, a, u);
}
-static inline bool
+static __always_inline bool
atomic_long_inc_not_zero(atomic_long_t *v)
{
return atomic_inc_not_zero(v);
}
-static inline bool
+static __always_inline bool
atomic_long_inc_unless_negative(atomic_long_t *v)
{
return atomic_inc_unless_negative(v);
}
-static inline bool
+static __always_inline bool
atomic_long_dec_unless_positive(atomic_long_t *v)
{
return atomic_dec_unless_positive(v);
}
-static inline long
+static __always_inline long
atomic_long_dec_if_positive(atomic_long_t *v)
{
return atomic_dec_if_positive(v);
@@ -1010,4 +1011,4 @@ atomic_long_dec_if_positive(atomic_long_t *v)
#endif /* CONFIG_64BIT */
#endif /* _ASM_GENERIC_ATOMIC_LONG_H */
-// 77558968132ce4f911ad53f6f52ce423006f6268
+// a624200981f552b2c6be4f32fe44da8289f30d87
diff --git a/include/asm-generic/bitops/instrumented-atomic.h b/include/asm-generic/bitops/instrumented-atomic.h
index 18ce3c9e8eec..fb2cb33a4013 100644
--- a/include/asm-generic/bitops/instrumented-atomic.h
+++ b/include/asm-generic/bitops/instrumented-atomic.h
@@ -11,7 +11,7 @@
#ifndef _ASM_GENERIC_BITOPS_INSTRUMENTED_ATOMIC_H
#define _ASM_GENERIC_BITOPS_INSTRUMENTED_ATOMIC_H
-#include <linux/kasan-checks.h>
+#include <linux/instrumented.h>
/**
* set_bit - Atomically set a bit in memory
@@ -25,7 +25,7 @@
*/
static inline void set_bit(long nr, volatile unsigned long *addr)
{
- kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
+ instrument_atomic_write(addr + BIT_WORD(nr), sizeof(long));
arch_set_bit(nr, addr);
}
@@ -38,7 +38,7 @@ static inline void set_bit(long nr, volatile unsigned long *addr)
*/
static inline void clear_bit(long nr, volatile unsigned long *addr)
{
- kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
+ instrument_atomic_write(addr + BIT_WORD(nr), sizeof(long));
arch_clear_bit(nr, addr);
}
@@ -54,7 +54,7 @@ static inline void clear_bit(long nr, volatile unsigned long *addr)
*/
static inline void change_bit(long nr, volatile unsigned long *addr)
{
- kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
+ instrument_atomic_write(addr + BIT_WORD(nr), sizeof(long));
arch_change_bit(nr, addr);
}
@@ -67,7 +67,7 @@ static inline void change_bit(long nr, volatile unsigned long *addr)
*/
static inline bool test_and_set_bit(long nr, volatile unsigned long *addr)
{
- kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
+ instrument_atomic_write(addr + BIT_WORD(nr), sizeof(long));
return arch_test_and_set_bit(nr, addr);
}
@@ -80,7 +80,7 @@ static inline bool test_and_set_bit(long nr, volatile unsigned long *addr)
*/
static inline bool test_and_clear_bit(long nr, volatile unsigned long *addr)
{
- kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
+ instrument_atomic_write(addr + BIT_WORD(nr), sizeof(long));
return arch_test_and_clear_bit(nr, addr);
}
@@ -93,7 +93,7 @@ static inline bool test_and_clear_bit(long nr, volatile unsigned long *addr)
*/
static inline bool test_and_change_bit(long nr, volatile unsigned long *addr)
{
- kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
+ instrument_atomic_write(addr + BIT_WORD(nr), sizeof(long));
return arch_test_and_change_bit(nr, addr);
}
diff --git a/include/asm-generic/bitops/instrumented-lock.h b/include/asm-generic/bitops/instrumented-lock.h
index ec53fdeea9ec..b9bec468ae03 100644
--- a/include/asm-generic/bitops/instrumented-lock.h
+++ b/include/asm-generic/bitops/instrumented-lock.h
@@ -11,7 +11,7 @@
#ifndef _ASM_GENERIC_BITOPS_INSTRUMENTED_LOCK_H
#define _ASM_GENERIC_BITOPS_INSTRUMENTED_LOCK_H
-#include <linux/kasan-checks.h>
+#include <linux/instrumented.h>
/**
* clear_bit_unlock - Clear a bit in memory, for unlock
@@ -22,7 +22,7 @@
*/
static inline void clear_bit_unlock(long nr, volatile unsigned long *addr)
{
- kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
+ instrument_atomic_write(addr + BIT_WORD(nr), sizeof(long));
arch_clear_bit_unlock(nr, addr);
}
@@ -37,7 +37,7 @@ static inline void clear_bit_unlock(long nr, volatile unsigned long *addr)
*/
static inline void __clear_bit_unlock(long nr, volatile unsigned long *addr)
{
- kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
+ instrument_write(addr + BIT_WORD(nr), sizeof(long));
arch___clear_bit_unlock(nr, addr);
}
@@ -52,7 +52,7 @@ static inline void __clear_bit_unlock(long nr, volatile unsigned long *addr)
*/
static inline bool test_and_set_bit_lock(long nr, volatile unsigned long *addr)
{
- kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
+ instrument_atomic_write(addr + BIT_WORD(nr), sizeof(long));
return arch_test_and_set_bit_lock(nr, addr);
}
@@ -71,7 +71,7 @@ static inline bool test_and_set_bit_lock(long nr, volatile unsigned long *addr)
static inline bool
clear_bit_unlock_is_negative_byte(long nr, volatile unsigned long *addr)
{
- kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
+ instrument_atomic_write(addr + BIT_WORD(nr), sizeof(long));
return arch_clear_bit_unlock_is_negative_byte(nr, addr);
}
/* Let everybody know we have it. */
diff --git a/include/asm-generic/bitops/instrumented-non-atomic.h b/include/asm-generic/bitops/instrumented-non-atomic.h
index 95ff28d128a1..20f788a25ef9 100644
--- a/include/asm-generic/bitops/instrumented-non-atomic.h
+++ b/include/asm-generic/bitops/instrumented-non-atomic.h
@@ -11,7 +11,7 @@
#ifndef _ASM_GENERIC_BITOPS_INSTRUMENTED_NON_ATOMIC_H
#define _ASM_GENERIC_BITOPS_INSTRUMENTED_NON_ATOMIC_H
-#include <linux/kasan-checks.h>
+#include <linux/instrumented.h>
/**
* __set_bit - Set a bit in memory
@@ -24,7 +24,7 @@
*/
static inline void __set_bit(long nr, volatile unsigned long *addr)
{
- kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
+ instrument_write(addr + BIT_WORD(nr), sizeof(long));
arch___set_bit(nr, addr);
}
@@ -39,7 +39,7 @@ static inline void __set_bit(long nr, volatile unsigned long *addr)
*/
static inline void __clear_bit(long nr, volatile unsigned long *addr)
{
- kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
+ instrument_write(addr + BIT_WORD(nr), sizeof(long));
arch___clear_bit(nr, addr);
}
@@ -54,7 +54,7 @@ static inline void __clear_bit(long nr, volatile unsigned long *addr)
*/
static inline void __change_bit(long nr, volatile unsigned long *addr)
{
- kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
+ instrument_write(addr + BIT_WORD(nr), sizeof(long));
arch___change_bit(nr, addr);
}
@@ -68,7 +68,7 @@ static inline void __change_bit(long nr, volatile unsigned long *addr)
*/
static inline bool __test_and_set_bit(long nr, volatile unsigned long *addr)
{
- kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
+ instrument_write(addr + BIT_WORD(nr), sizeof(long));
return arch___test_and_set_bit(nr, addr);
}
@@ -82,7 +82,7 @@ static inline bool __test_and_set_bit(long nr, volatile unsigned long *addr)
*/
static inline bool __test_and_clear_bit(long nr, volatile unsigned long *addr)
{
- kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
+ instrument_write(addr + BIT_WORD(nr), sizeof(long));
return arch___test_and_clear_bit(nr, addr);
}
@@ -96,7 +96,7 @@ static inline bool __test_and_clear_bit(long nr, volatile unsigned long *addr)
*/
static inline bool __test_and_change_bit(long nr, volatile unsigned long *addr)
{
- kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
+ instrument_write(addr + BIT_WORD(nr), sizeof(long));
return arch___test_and_change_bit(nr, addr);
}
@@ -107,7 +107,7 @@ static inline bool __test_and_change_bit(long nr, volatile unsigned long *addr)
*/
static inline bool test_bit(long nr, const volatile unsigned long *addr)
{
- kasan_check_read(addr + BIT_WORD(nr), sizeof(long));
+ instrument_atomic_read(addr + BIT_WORD(nr), sizeof(long));
return arch_test_bit(nr, addr);
}
diff --git a/include/linux/atomic-fallback.h b/include/linux/atomic-fallback.h
index a7d240e465c0..656b5489b673 100644
--- a/include/linux/atomic-fallback.h
+++ b/include/linux/atomic-fallback.h
@@ -6,6 +6,8 @@
#ifndef _LINUX_ATOMIC_FALLBACK_H
#define _LINUX_ATOMIC_FALLBACK_H
+#include <linux/compiler.h>
+
#ifndef xchg_relaxed
#define xchg_relaxed xchg
#define xchg_acquire xchg
@@ -76,7 +78,7 @@
#endif /* cmpxchg64_relaxed */
#ifndef atomic_read_acquire
-static inline int
+static __always_inline int
atomic_read_acquire(const atomic_t *v)
{
return smp_load_acquire(&(v)->counter);
@@ -85,7 +87,7 @@ atomic_read_acquire(const atomic_t *v)
#endif
#ifndef atomic_set_release
-static inline void
+static __always_inline void
atomic_set_release(atomic_t *v, int i)
{
smp_store_release(&(v)->counter, i);
@@ -100,7 +102,7 @@ atomic_set_release(atomic_t *v, int i)
#else /* atomic_add_return_relaxed */
#ifndef atomic_add_return_acquire
-static inline int
+static __always_inline int
atomic_add_return_acquire(int i, atomic_t *v)
{
int ret = atomic_add_return_relaxed(i, v);
@@ -111,7 +113,7 @@ atomic_add_return_acquire(int i, atomic_t *v)
#endif
#ifndef atomic_add_return_release
-static inline int
+static __always_inline int
atomic_add_return_release(int i, atomic_t *v)
{
__atomic_release_fence();
@@ -121,7 +123,7 @@ atomic_add_return_release(int i, atomic_t *v)
#endif
#ifndef atomic_add_return
-static inline int
+static __always_inline int
atomic_add_return(int i, atomic_t *v)
{
int ret;
@@ -142,7 +144,7 @@ atomic_add_return(int i, atomic_t *v)
#else /* atomic_fetch_add_relaxed */
#ifndef atomic_fetch_add_acquire
-static inline int
+static __always_inline int
atomic_fetch_add_acquire(int i, atomic_t *v)
{
int ret = atomic_fetch_add_relaxed(i, v);
@@ -153,7 +155,7 @@ atomic_fetch_add_acquire(int i, atomic_t *v)
#endif
#ifndef atomic_fetch_add_release
-static inline int
+static __always_inline int
atomic_fetch_add_release(int i, atomic_t *v)
{
__atomic_release_fence();
@@ -163,7 +165,7 @@ atomic_fetch_add_release(int i, atomic_t *v)
#endif
#ifndef atomic_fetch_add
-static inline int
+static __always_inline int
atomic_fetch_add(int i, atomic_t *v)
{
int ret;
@@ -184,7 +186,7 @@ atomic_fetch_add(int i, atomic_t *v)
#else /* atomic_sub_return_relaxed */
#ifndef atomic_sub_return_acquire
-static inline int
+static __always_inline int
atomic_sub_return_acquire(int i, atomic_t *v)
{
int ret = atomic_sub_return_relaxed(i, v);
@@ -195,7 +197,7 @@ atomic_sub_return_acquire(int i, atomic_t *v)
#endif
#ifndef atomic_sub_return_release
-static inline int
+static __always_inline int
atomic_sub_return_release(int i, atomic_t *v)
{
__atomic_release_fence();
@@ -205,7 +207,7 @@ atomic_sub_return_release(int i, atomic_t *v)
#endif
#ifndef atomic_sub_return
-static inline int
+static __always_inline int
atomic_sub_return(int i, atomic_t *v)
{
int ret;
@@ -226,7 +228,7 @@ atomic_sub_return(int i, atomic_t *v)
#else /* atomic_fetch_sub_relaxed */
#ifndef atomic_fetch_sub_acquire
-static inline int
+static __always_inline int
atomic_fetch_sub_acquire(int i, atomic_t *v)
{
int ret = atomic_fetch_sub_relaxed(i, v);
@@ -237,7 +239,7 @@ atomic_fetch_sub_acquire(int i, atomic_t *v)
#endif
#ifndef atomic_fetch_sub_release
-static inline int
+static __always_inline int
atomic_fetch_sub_release(int i, atomic_t *v)
{
__atomic_release_fence();
@@ -247,7 +249,7 @@ atomic_fetch_sub_release(int i, atomic_t *v)
#endif
#ifndef atomic_fetch_sub
-static inline int
+static __always_inline int
atomic_fetch_sub(int i, atomic_t *v)
{
int ret;
@@ -262,7 +264,7 @@ atomic_fetch_sub(int i, atomic_t *v)
#endif /* atomic_fetch_sub_relaxed */
#ifndef atomic_inc
-static inline void
+static __always_inline void
atomic_inc(atomic_t *v)
{
atomic_add(1, v);
@@ -278,7 +280,7 @@ atomic_inc(atomic_t *v)
#endif /* atomic_inc_return */
#ifndef atomic_inc_return
-static inline int
+static __always_inline int
atomic_inc_return(atomic_t *v)
{
return atomic_add_return(1, v);
@@ -287,7 +289,7 @@ atomic_inc_return(atomic_t *v)
#endif
#ifndef atomic_inc_return_acquire
-static inline int
+static __always_inline int
atomic_inc_return_acquire(atomic_t *v)
{
return atomic_add_return_acquire(1, v);
@@ -296,7 +298,7 @@ atomic_inc_return_acquire(atomic_t *v)
#endif
#ifndef atomic_inc_return_release
-static inline int
+static __always_inline int
atomic_inc_return_release(atomic_t *v)
{
return atomic_add_return_release(1, v);
@@ -305,7 +307,7 @@ atomic_inc_return_release(atomic_t *v)
#endif
#ifndef atomic_inc_return_relaxed
-static inline int
+static __always_inline int
atomic_inc_return_relaxed(atomic_t *v)
{
return atomic_add_return_relaxed(1, v);
@@ -316,7 +318,7 @@ atomic_inc_return_relaxed(atomic_t *v)
#else /* atomic_inc_return_relaxed */
#ifndef atomic_inc_return_acquire
-static inline int
+static __always_inline int
atomic_inc_return_acquire(atomic_t *v)
{
int ret = atomic_inc_return_relaxed(v);
@@ -327,7 +329,7 @@ atomic_inc_return_acquire(atomic_t *v)
#endif
#ifndef atomic_inc_return_release
-static inline int
+static __always_inline int
atomic_inc_return_release(atomic_t *v)
{
__atomic_release_fence();
@@ -337,7 +339,7 @@ atomic_inc_return_release(atomic_t *v)
#endif
#ifndef atomic_inc_return
-static inline int
+static __always_inline int
atomic_inc_return(atomic_t *v)
{
int ret;
@@ -359,7 +361,7 @@ atomic_inc_return(atomic_t *v)
#endif /* atomic_fetch_inc */
#ifndef atomic_fetch_inc
-static inline int
+static __always_inline int
atomic_fetch_inc(atomic_t *v)
{
return atomic_fetch_add(1, v);
@@ -368,7 +370,7 @@ atomic_fetch_inc(atomic_t *v)
#endif
#ifndef atomic_fetch_inc_acquire
-static inline int
+static __always_inline int
atomic_fetch_inc_acquire(atomic_t *v)
{
return atomic_fetch_add_acquire(1, v);
@@ -377,7 +379,7 @@ atomic_fetch_inc_acquire(atomic_t *v)
#endif
#ifndef atomic_fetch_inc_release
-static inline int
+static __always_inline int
atomic_fetch_inc_release(atomic_t *v)
{
return atomic_fetch_add_release(1, v);
@@ -386,7 +388,7 @@ atomic_fetch_inc_release(atomic_t *v)
#endif
#ifndef atomic_fetch_inc_relaxed
-static inline int
+static __always_inline int
atomic_fetch_inc_relaxed(atomic_t *v)
{
return atomic_fetch_add_relaxed(1, v);
@@ -397,7 +399,7 @@ atomic_fetch_inc_relaxed(atomic_t *v)
#else /* atomic_fetch_inc_relaxed */
#ifndef atomic_fetch_inc_acquire
-static inline int
+static __always_inline int
atomic_fetch_inc_acquire(atomic_t *v)
{
int ret = atomic_fetch_inc_relaxed(v);
@@ -408,7 +410,7 @@ atomic_fetch_inc_acquire(atomic_t *v)
#endif
#ifndef atomic_fetch_inc_release
-static inline int
+static __always_inline int
atomic_fetch_inc_release(atomic_t *v)
{
__atomic_release_fence();
@@ -418,7 +420,7 @@ atomic_fetch_inc_release(atomic_t *v)
#endif
#ifndef atomic_fetch_inc
-static inline int
+static __always_inline int
atomic_fetch_inc(atomic_t *v)
{
int ret;
@@ -433,7 +435,7 @@ atomic_fetch_inc(atomic_t *v)
#endif /* atomic_fetch_inc_relaxed */
#ifndef atomic_dec
-static inline void
+static __always_inline void
atomic_dec(atomic_t *v)
{
atomic_sub(1, v);
@@ -449,7 +451,7 @@ atomic_dec(atomic_t *v)
#endif /* atomic_dec_return */
#ifndef atomic_dec_return
-static inline int
+static __always_inline int
atomic_dec_return(atomic_t *v)
{
return atomic_sub_return(1, v);
@@ -458,7 +460,7 @@ atomic_dec_return(atomic_t *v)
#endif
#ifndef atomic_dec_return_acquire
-static inline int
+static __always_inline int
atomic_dec_return_acquire(atomic_t *v)
{
return atomic_sub_return_acquire(1, v);
@@ -467,7 +469,7 @@ atomic_dec_return_acquire(atomic_t *v)
#endif
#ifndef atomic_dec_return_release
-static inline int
+static __always_inline int
atomic_dec_return_release(atomic_t *v)
{
return atomic_sub_return_release(1, v);
@@ -476,7 +478,7 @@ atomic_dec_return_release(atomic_t *v)
#endif
#ifndef atomic_dec_return_relaxed
-static inline int
+static __always_inline int
atomic_dec_return_relaxed(atomic_t *v)
{
return atomic_sub_return_relaxed(1, v);
@@ -487,7 +489,7 @@ atomic_dec_return_relaxed(atomic_t *v)
#else /* atomic_dec_return_relaxed */
#ifndef atomic_dec_return_acquire
-static inline int
+static __always_inline int
atomic_dec_return_acquire(atomic_t *v)
{
int ret = atomic_dec_return_relaxed(v);
@@ -498,7 +500,7 @@ atomic_dec_return_acquire(atomic_t *v)
#endif
#ifndef atomic_dec_return_release
-static inline int
+static __always_inline int
atomic_dec_return_release(atomic_t *v)
{
__atomic_release_fence();
@@ -508,7 +510,7 @@ atomic_dec_return_release(atomic_t *v)
#endif
#ifndef atomic_dec_return
-static inline int
+static __always_inline int
atomic_dec_return(atomic_t *v)
{
int ret;
@@ -530,7 +532,7 @@ atomic_dec_return(atomic_t *v)
#endif /* atomic_fetch_dec */
#ifndef atomic_fetch_dec
-static inline int
+static __always_inline int
atomic_fetch_dec(atomic_t *v)
{
return atomic_fetch_sub(1, v);
@@ -539,7 +541,7 @@ atomic_fetch_dec(atomic_t *v)
#endif
#ifndef atomic_fetch_dec_acquire
-static inline int
+static __always_inline int
atomic_fetch_dec_acquire(atomic_t *v)
{
return atomic_fetch_sub_acquire(1, v);
@@ -548,7 +550,7 @@ atomic_fetch_dec_acquire(atomic_t *v)
#endif
#ifndef atomic_fetch_dec_release
-static inline int
+static __always_inline int
atomic_fetch_dec_release(atomic_t *v)
{
return atomic_fetch_sub_release(1, v);
@@ -557,7 +559,7 @@ atomic_fetch_dec_release(atomic_t *v)
#endif
#ifndef atomic_fetch_dec_relaxed
-static inline int
+static __always_inline int
atomic_fetch_dec_relaxed(atomic_t *v)
{
return atomic_fetch_sub_relaxed(1, v);
@@ -568,7 +570,7 @@ atomic_fetch_dec_relaxed(atomic_t *v)
#else /* atomic_fetch_dec_relaxed */
#ifndef atomic_fetch_dec_acquire
-static inline int
+static __always_inline int
atomic_fetch_dec_acquire(atomic_t *v)
{
int ret = atomic_fetch_dec_relaxed(v);
@@ -579,7 +581,7 @@ atomic_fetch_dec_acquire(atomic_t *v)
#endif
#ifndef atomic_fetch_dec_release
-static inline int
+static __always_inline int
atomic_fetch_dec_release(atomic_t *v)
{
__atomic_release_fence();
@@ -589,7 +591,7 @@ atomic_fetch_dec_release(atomic_t *v)
#endif
#ifndef atomic_fetch_dec
-static inline int
+static __always_inline int
atomic_fetch_dec(atomic_t *v)
{
int ret;
@@ -610,7 +612,7 @@ atomic_fetch_dec(atomic_t *v)
#else /* atomic_fetch_and_relaxed */
#ifndef atomic_fetch_and_acquire
-static inline int
+static __always_inline int
atomic_fetch_and_acquire(int i, atomic_t *v)
{
int ret = atomic_fetch_and_relaxed(i, v);
@@ -621,7 +623,7 @@ atomic_fetch_and_acquire(int i, atomic_t *v)
#endif
#ifndef atomic_fetch_and_release
-static inline int
+static __always_inline int
atomic_fetch_and_release(int i, atomic_t *v)
{
__atomic_release_fence();
@@ -631,7 +633,7 @@ atomic_fetch_and_release(int i, atomic_t *v)
#endif
#ifndef atomic_fetch_and
-static inline int
+static __always_inline int
atomic_fetch_and(int i, atomic_t *v)
{
int ret;
@@ -646,7 +648,7 @@ atomic_fetch_and(int i, atomic_t *v)
#endif /* atomic_fetch_and_relaxed */
#ifndef atomic_andnot
-static inline void
+static __always_inline void
atomic_andnot(int i, atomic_t *v)
{
atomic_and(~i, v);
@@ -662,7 +664,7 @@ atomic_andnot(int i, atomic_t *v)
#endif /* atomic_fetch_andnot */
#ifndef atomic_fetch_andnot
-static inline int
+static __always_inline int
atomic_fetch_andnot(int i, atomic_t *v)
{
return atomic_fetch_and(~i, v);
@@ -671,7 +673,7 @@ atomic_fetch_andnot(int i, atomic_t *v)
#endif
#ifndef atomic_fetch_andnot_acquire
-static inline int
+static __always_inline int
atomic_fetch_andnot_acquire(int i, atomic_t *v)
{
return atomic_fetch_and_acquire(~i, v);
@@ -680,7 +682,7 @@ atomic_fetch_andnot_acquire(int i, atomic_t *v)
#endif
#ifndef atomic_fetch_andnot_release
-static inline int
+static __always_inline int
atomic_fetch_andnot_release(int i, atomic_t *v)
{
return atomic_fetch_and_release(~i, v);
@@ -689,7 +691,7 @@ atomic_fetch_andnot_release(int i, atomic_t *v)
#endif
#ifndef atomic_fetch_andnot_relaxed
-static inline int
+static __always_inline int
atomic_fetch_andnot_relaxed(int i, atomic_t *v)
{
return atomic_fetch_and_relaxed(~i, v);
@@ -700,7 +702,7 @@ atomic_fetch_andnot_relaxed(int i, atomic_t *v)
#else /* atomic_fetch_andnot_relaxed */
#ifndef atomic_fetch_andnot_acquire
-static inline int
+static __always_inline int
atomic_fetch_andnot_acquire(int i, atomic_t *v)
{
int ret = atomic_fetch_andnot_relaxed(i, v);
@@ -711,7 +713,7 @@ atomic_fetch_andnot_acquire(int i, atomic_t *v)
#endif
#ifndef atomic_fetch_andnot_release
-static inline int
+static __always_inline int
atomic_fetch_andnot_release(int i, atomic_t *v)
{
__atomic_release_fence();
@@ -721,7 +723,7 @@ atomic_fetch_andnot_release(int i, atomic_t *v)
#endif
#ifndef atomic_fetch_andnot
-static inline int
+static __always_inline int
atomic_fetch_andnot(int i, atomic_t *v)
{
int ret;
@@ -742,7 +744,7 @@ atomic_fetch_andnot(int i, atomic_t *v)
#else /* atomic_fetch_or_relaxed */
#ifndef atomic_fetch_or_acquire
-static inline int
+static __always_inline int
atomic_fetch_or_acquire(int i, atomic_t *v)
{
int ret = atomic_fetch_or_relaxed(i, v);
@@ -753,7 +755,7 @@ atomic_fetch_or_acquire(int i, atomic_t *v)
#endif
#ifndef atomic_fetch_or_release
-static inline int
+static __always_inline int
atomic_fetch_or_release(int i, atomic_t *v)
{
__atomic_release_fence();
@@ -763,7 +765,7 @@ atomic_fetch_or_release(int i, atomic_t *v)
#endif
#ifndef atomic_fetch_or
-static inline int
+static __always_inline int
atomic_fetch_or(int i, atomic_t *v)
{
int ret;
@@ -784,7 +786,7 @@ atomic_fetch_or(int i, atomic_t *v)
#else /* atomic_fetch_xor_relaxed */
#ifndef atomic_fetch_xor_acquire
-static inline int
+static __always_inline int
atomic_fetch_xor_acquire(int i, atomic_t *v)
{
int ret = atomic_fetch_xor_relaxed(i, v);
@@ -795,7 +797,7 @@ atomic_fetch_xor_acquire(int i, atomic_t *v)
#endif
#ifndef atomic_fetch_xor_release
-static inline int
+static __always_inline int
atomic_fetch_xor_release(int i, atomic_t *v)
{
__atomic_release_fence();
@@ -805,7 +807,7 @@ atomic_fetch_xor_release(int i, atomic_t *v)
#endif
#ifndef atomic_fetch_xor
-static inline int
+static __always_inline int
atomic_fetch_xor(int i, atomic_t *v)
{
int ret;
@@ -826,7 +828,7 @@ atomic_fetch_xor(int i, atomic_t *v)
#else /* atomic_xchg_relaxed */
#ifndef atomic_xchg_acquire
-static inline int
+static __always_inline int
atomic_xchg_acquire(atomic_t *v, int i)
{
int ret = atomic_xchg_relaxed(v, i);
@@ -837,7 +839,7 @@ atomic_xchg_acquire(atomic_t *v, int i)
#endif
#ifndef atomic_xchg_release
-static inline int
+static __always_inline int
atomic_xchg_release(atomic_t *v, int i)
{
__atomic_release_fence();
@@ -847,7 +849,7 @@ atomic_xchg_release(atomic_t *v, int i)
#endif
#ifndef atomic_xchg
-static inline int
+static __always_inline int
atomic_xchg(atomic_t *v, int i)
{
int ret;
@@ -868,7 +870,7 @@ atomic_xchg(atomic_t *v, int i)
#else /* atomic_cmpxchg_relaxed */
#ifndef atomic_cmpxchg_acquire
-static inline int
+static __always_inline int
atomic_cmpxchg_acquire(atomic_t *v, int old, int new)
{
int ret = atomic_cmpxchg_relaxed(v, old, new);
@@ -879,7 +881,7 @@ atomic_cmpxchg_acquire(atomic_t *v, int old, int new)
#endif
#ifndef atomic_cmpxchg_release
-static inline int
+static __always_inline int
atomic_cmpxchg_release(atomic_t *v, int old, int new)
{
__atomic_release_fence();
@@ -889,7 +891,7 @@ atomic_cmpxchg_release(atomic_t *v, int old, int new)
#endif
#ifndef atomic_cmpxchg
-static inline int
+static __always_inline int
atomic_cmpxchg(atomic_t *v, int old, int new)
{
int ret;
@@ -911,7 +913,7 @@ atomic_cmpxchg(atomic_t *v, int old, int new)
#endif /* atomic_try_cmpxchg */
#ifndef atomic_try_cmpxchg
-static inline bool
+static __always_inline bool
atomic_try_cmpxchg(atomic_t *v, int *old, int new)
{
int r, o = *old;
@@ -924,7 +926,7 @@ atomic_try_cmpxchg(atomic_t *v, int *old, int new)
#endif
#ifndef atomic_try_cmpxchg_acquire
-static inline bool
+static __always_inline bool
atomic_try_cmpxchg_acquire(atomic_t *v, int *old, int new)
{
int r, o = *old;
@@ -937,7 +939,7 @@ atomic_try_cmpxchg_acquire(atomic_t *v, int *old, int new)
#endif
#ifndef atomic_try_cmpxchg_release
-static inline bool
+static __always_inline bool
atomic_try_cmpxchg_release(atomic_t *v, int *old, int new)
{
int r, o = *old;
@@ -950,7 +952,7 @@ atomic_try_cmpxchg_release(atomic_t *v, int *old, int new)
#endif
#ifndef atomic_try_cmpxchg_relaxed
-static inline bool
+static __always_inline bool
atomic_try_cmpxchg_relaxed(atomic_t *v, int *old, int new)
{
int r, o = *old;
@@ -965,7 +967,7 @@ atomic_try_cmpxchg_relaxed(atomic_t *v, int *old, int new)
#else /* atomic_try_cmpxchg_relaxed */
#ifndef atomic_try_cmpxchg_acquire
-static inline bool
+static __always_inline bool
atomic_try_cmpxchg_acquire(atomic_t *v, int *old, int new)
{
bool ret = atomic_try_cmpxchg_relaxed(v, old, new);
@@ -976,7 +978,7 @@ atomic_try_cmpxchg_acquire(atomic_t *v, int *old, int new)
#endif
#ifndef atomic_try_cmpxchg_release
-static inline bool
+static __always_inline bool
atomic_try_cmpxchg_release(atomic_t *v, int *old, int new)
{
__atomic_release_fence();
@@ -986,7 +988,7 @@ atomic_try_cmpxchg_release(atomic_t *v, int *old, int new)
#endif
#ifndef atomic_try_cmpxchg
-static inline bool
+static __always_inline bool
atomic_try_cmpxchg(atomic_t *v, int *old, int new)
{
bool ret;
@@ -1010,7 +1012,7 @@ atomic_try_cmpxchg(atomic_t *v, int *old, int new)
* true if the result is zero, or false for all
* other cases.
*/
-static inline bool
+static __always_inline bool
atomic_sub_and_test(int i, atomic_t *v)
{
return atomic_sub_return(i, v) == 0;
@@ -1027,7 +1029,7 @@ atomic_sub_and_test(int i, atomic_t *v)
* returns true if the result is 0, or false for all other
* cases.
*/
-static inline bool
+static __always_inline bool
atomic_dec_and_test(atomic_t *v)
{
return atomic_dec_return(v) == 0;
@@ -1044,7 +1046,7 @@ atomic_dec_and_test(atomic_t *v)
* and returns true if the result is zero, or false for all
* other cases.
*/
-static inline bool
+static __always_inline bool
atomic_inc_and_test(atomic_t *v)
{
return atomic_inc_return(v) == 0;
@@ -1062,7 +1064,7 @@ atomic_inc_and_test(atomic_t *v)
* if the result is negative, or false when
* result is greater than or equal to zero.
*/
-static inline bool
+static __always_inline bool
atomic_add_negative(int i, atomic_t *v)
{
return atomic_add_return(i, v) < 0;
@@ -1080,7 +1082,7 @@ atomic_add_negative(int i, atomic_t *v)
* Atomically adds @a to @v, so long as @v was not already @u.
* Returns original value of @v
*/
-static inline int
+static __always_inline int
atomic_fetch_add_unless(atomic_t *v, int a, int u)
{
int c = atomic_read(v);
@@ -1105,7 +1107,7 @@ atomic_fetch_add_unless(atomic_t *v, int a, int u)
* Atomically adds @a to @v, if @v was not already @u.
* Returns true if the addition was done.
*/
-static inline bool
+static __always_inline bool
atomic_add_unless(atomic_t *v, int a, int u)
{
return atomic_fetch_add_unless(v, a, u) != u;
@@ -1121,7 +1123,7 @@ atomic_add_unless(atomic_t *v, int a, int u)
* Atomically increments @v by 1, if @v is non-zero.
* Returns true if the increment was done.
*/
-static inline bool
+static __always_inline bool
atomic_inc_not_zero(atomic_t *v)
{
return atomic_add_unless(v, 1, 0);
@@ -1130,7 +1132,7 @@ atomic_inc_not_zero(atomic_t *v)
#endif
#ifndef atomic_inc_unless_negative
-static inline bool
+static __always_inline bool
atomic_inc_unless_negative(atomic_t *v)
{
int c = atomic_read(v);
@@ -1146,7 +1148,7 @@ atomic_inc_unless_negative(atomic_t *v)
#endif
#ifndef atomic_dec_unless_positive
-static inline bool
+static __always_inline bool
atomic_dec_unless_positive(atomic_t *v)
{
int c = atomic_read(v);
@@ -1162,7 +1164,7 @@ atomic_dec_unless_positive(atomic_t *v)
#endif
#ifndef atomic_dec_if_positive
-static inline int
+static __always_inline int
atomic_dec_if_positive(atomic_t *v)
{
int dec, c = atomic_read(v);
@@ -1186,7 +1188,7 @@ atomic_dec_if_positive(atomic_t *v)
#endif
#ifndef atomic64_read_acquire
-static inline s64
+static __always_inline s64
atomic64_read_acquire(const atomic64_t *v)
{
return smp_load_acquire(&(v)->counter);
@@ -1195,7 +1197,7 @@ atomic64_read_acquire(const atomic64_t *v)
#endif
#ifndef atomic64_set_release
-static inline void
+static __always_inline void
atomic64_set_release(atomic64_t *v, s64 i)
{
smp_store_release(&(v)->counter, i);
@@ -1210,7 +1212,7 @@ atomic64_set_release(atomic64_t *v, s64 i)
#else /* atomic64_add_return_relaxed */
#ifndef atomic64_add_return_acquire
-static inline s64
+static __always_inline s64
atomic64_add_return_acquire(s64 i, atomic64_t *v)
{
s64 ret = atomic64_add_return_relaxed(i, v);
@@ -1221,7 +1223,7 @@ atomic64_add_return_acquire(s64 i, atomic64_t *v)
#endif
#ifndef atomic64_add_return_release
-static inline s64
+static __always_inline s64
atomic64_add_return_release(s64 i, atomic64_t *v)
{
__atomic_release_fence();
@@ -1231,7 +1233,7 @@ atomic64_add_return_release(s64 i, atomic64_t *v)
#endif
#ifndef atomic64_add_return
-static inline s64
+static __always_inline s64
atomic64_add_return(s64 i, atomic64_t *v)
{
s64 ret;
@@ -1252,7 +1254,7 @@ atomic64_add_return(s64 i, atomic64_t *v)
#else /* atomic64_fetch_add_relaxed */
#ifndef atomic64_fetch_add_acquire
-static inline s64
+static __always_inline s64
atomic64_fetch_add_acquire(s64 i, atomic64_t *v)
{
s64 ret = atomic64_fetch_add_relaxed(i, v);
@@ -1263,7 +1265,7 @@ atomic64_fetch_add_acquire(s64 i, atomic64_t *v)
#endif
#ifndef atomic64_fetch_add_release
-static inline s64
+static __always_inline s64
atomic64_fetch_add_release(s64 i, atomic64_t *v)
{
__atomic_release_fence();
@@ -1273,7 +1275,7 @@ atomic64_fetch_add_release(s64 i, atomic64_t *v)
#endif
#ifndef atomic64_fetch_add
-static inline s64
+static __always_inline s64
atomic64_fetch_add(s64 i, atomic64_t *v)
{
s64 ret;
@@ -1294,7 +1296,7 @@ atomic64_fetch_add(s64 i, atomic64_t *v)
#else /* atomic64_sub_return_relaxed */
#ifndef atomic64_sub_return_acquire
-static inline s64
+static __always_inline s64
atomic64_sub_return_acquire(s64 i, atomic64_t *v)
{
s64 ret = atomic64_sub_return_relaxed(i, v);
@@ -1305,7 +1307,7 @@ atomic64_sub_return_acquire(s64 i, atomic64_t *v)
#endif
#ifndef atomic64_sub_return_release
-static inline s64
+static __always_inline s64
atomic64_sub_return_release(s64 i, atomic64_t *v)
{
__atomic_release_fence();
@@ -1315,7 +1317,7 @@ atomic64_sub_return_release(s64 i, atomic64_t *v)
#endif
#ifndef atomic64_sub_return
-static inline s64
+static __always_inline s64
atomic64_sub_return(s64 i, atomic64_t *v)
{
s64 ret;
@@ -1336,7 +1338,7 @@ atomic64_sub_return(s64 i, atomic64_t *v)
#else /* atomic64_fetch_sub_relaxed */
#ifndef atomic64_fetch_sub_acquire
-static inline s64
+static __always_inline s64
atomic64_fetch_sub_acquire(s64 i, atomic64_t *v)
{
s64 ret = atomic64_fetch_sub_relaxed(i, v);
@@ -1347,7 +1349,7 @@ atomic64_fetch_sub_acquire(s64 i, atomic64_t *v)
#endif
#ifndef atomic64_fetch_sub_release
-static inline s64
+static __always_inline s64
atomic64_fetch_sub_release(s64 i, atomic64_t *v)
{
__atomic_release_fence();
@@ -1357,7 +1359,7 @@ atomic64_fetch_sub_release(s64 i, atomic64_t *v)
#endif
#ifndef atomic64_fetch_sub
-static inline s64
+static __always_inline s64
atomic64_fetch_sub(s64 i, atomic64_t *v)
{
s64 ret;
@@ -1372,7 +1374,7 @@ atomic64_fetch_sub(s64 i, atomic64_t *v)
#endif /* atomic64_fetch_sub_relaxed */
#ifndef atomic64_inc
-static inline void
+static __always_inline void
atomic64_inc(atomic64_t *v)
{
atomic64_add(1, v);
@@ -1388,7 +1390,7 @@ atomic64_inc(atomic64_t *v)
#endif /* atomic64_inc_return */
#ifndef atomic64_inc_return
-static inline s64
+static __always_inline s64
atomic64_inc_return(atomic64_t *v)
{
return atomic64_add_return(1, v);
@@ -1397,7 +1399,7 @@ atomic64_inc_return(atomic64_t *v)
#endif
#ifndef atomic64_inc_return_acquire
-static inline s64
+static __always_inline s64
atomic64_inc_return_acquire(atomic64_t *v)
{
return atomic64_add_return_acquire(1, v);
@@ -1406,7 +1408,7 @@ atomic64_inc_return_acquire(atomic64_t *v)
#endif
#ifndef atomic64_inc_return_release
-static inline s64
+static __always_inline s64
atomic64_inc_return_release(atomic64_t *v)
{
return atomic64_add_return_release(1, v);
@@ -1415,7 +1417,7 @@ atomic64_inc_return_release(atomic64_t *v)
#endif
#ifndef atomic64_inc_return_relaxed
-static inline s64
+static __always_inline s64
atomic64_inc_return_relaxed(atomic64_t *v)
{
return atomic64_add_return_relaxed(1, v);
@@ -1426,7 +1428,7 @@ atomic64_inc_return_relaxed(atomic64_t *v)
#else /* atomic64_inc_return_relaxed */
#ifndef atomic64_inc_return_acquire
-static inline s64
+static __always_inline s64
atomic64_inc_return_acquire(atomic64_t *v)
{
s64 ret = atomic64_inc_return_relaxed(v);
@@ -1437,7 +1439,7 @@ atomic64_inc_return_acquire(atomic64_t *v)
#endif
#ifndef atomic64_inc_return_release
-static inline s64
+static __always_inline s64
atomic64_inc_return_release(atomic64_t *v)
{
__atomic_release_fence();
@@ -1447,7 +1449,7 @@ atomic64_inc_return_release(atomic64_t *v)
#endif
#ifndef atomic64_inc_return
-static inline s64
+static __always_inline s64
atomic64_inc_return(atomic64_t *v)
{
s64 ret;
@@ -1469,7 +1471,7 @@ atomic64_inc_return(atomic64_t *v)
#endif /* atomic64_fetch_inc */
#ifndef atomic64_fetch_inc
-static inline s64
+static __always_inline s64
atomic64_fetch_inc(atomic64_t *v)
{
return atomic64_fetch_add(1, v);
@@ -1478,7 +1480,7 @@ atomic64_fetch_inc(atomic64_t *v)
#endif
#ifndef atomic64_fetch_inc_acquire
-static inline s64
+static __always_inline s64
atomic64_fetch_inc_acquire(atomic64_t *v)
{
return atomic64_fetch_add_acquire(1, v);
@@ -1487,7 +1489,7 @@ atomic64_fetch_inc_acquire(atomic64_t *v)
#endif
#ifndef atomic64_fetch_inc_release
-static inline s64
+static __always_inline s64
atomic64_fetch_inc_release(atomic64_t *v)
{
return atomic64_fetch_add_release(1, v);
@@ -1496,7 +1498,7 @@ atomic64_fetch_inc_release(atomic64_t *v)
#endif
#ifndef atomic64_fetch_inc_relaxed
-static inline s64
+static __always_inline s64
atomic64_fetch_inc_relaxed(atomic64_t *v)
{
return atomic64_fetch_add_relaxed(1, v);
@@ -1507,7 +1509,7 @@ atomic64_fetch_inc_relaxed(atomic64_t *v)
#else /* atomic64_fetch_inc_relaxed */
#ifndef atomic64_fetch_inc_acquire
-static inline s64
+static __always_inline s64
atomic64_fetch_inc_acquire(atomic64_t *v)
{
s64 ret = atomic64_fetch_inc_relaxed(v);
@@ -1518,7 +1520,7 @@ atomic64_fetch_inc_acquire(atomic64_t *v)
#endif
#ifndef atomic64_fetch_inc_release
-static inline s64
+static __always_inline s64
atomic64_fetch_inc_release(atomic64_t *v)
{
__atomic_release_fence();
@@ -1528,7 +1530,7 @@ atomic64_fetch_inc_release(atomic64_t *v)
#endif
#ifndef atomic64_fetch_inc
-static inline s64
+static __always_inline s64
atomic64_fetch_inc(atomic64_t *v)
{
s64 ret;
@@ -1543,7 +1545,7 @@ atomic64_fetch_inc(atomic64_t *v)
#endif /* atomic64_fetch_inc_relaxed */
#ifndef atomic64_dec
-static inline void
+static __always_inline void
atomic64_dec(atomic64_t *v)
{
atomic64_sub(1, v);
@@ -1559,7 +1561,7 @@ atomic64_dec(atomic64_t *v)
#endif /* atomic64_dec_return */
#ifndef atomic64_dec_return
-static inline s64
+static __always_inline s64
atomic64_dec_return(atomic64_t *v)
{
return atomic64_sub_return(1, v);
@@ -1568,7 +1570,7 @@ atomic64_dec_return(atomic64_t *v)
#endif
#ifndef atomic64_dec_return_acquire
-static inline s64
+static __always_inline s64
atomic64_dec_return_acquire(atomic64_t *v)
{
return atomic64_sub_return_acquire(1, v);
@@ -1577,7 +1579,7 @@ atomic64_dec_return_acquire(atomic64_t *v)
#endif
#ifndef atomic64_dec_return_release
-static inline s64
+static __always_inline s64
atomic64_dec_return_release(atomic64_t *v)
{
return atomic64_sub_return_release(1, v);
@@ -1586,7 +1588,7 @@ atomic64_dec_return_release(atomic64_t *v)
#endif
#ifndef atomic64_dec_return_relaxed
-static inline s64
+static __always_inline s64
atomic64_dec_return_relaxed(atomic64_t *v)
{
return atomic64_sub_return_relaxed(1, v);
@@ -1597,7 +1599,7 @@ atomic64_dec_return_relaxed(atomic64_t *v)
#else /* atomic64_dec_return_relaxed */
#ifndef atomic64_dec_return_acquire
-static inline s64
+static __always_inline s64
atomic64_dec_return_acquire(atomic64_t *v)
{
s64 ret = atomic64_dec_return_relaxed(v);
@@ -1608,7 +1610,7 @@ atomic64_dec_return_acquire(atomic64_t *v)
#endif
#ifndef atomic64_dec_return_release
-static inline s64
+static __always_inline s64
atomic64_dec_return_release(atomic64_t *v)
{
__atomic_release_fence();
@@ -1618,7 +1620,7 @@ atomic64_dec_return_release(atomic64_t *v)
#endif
#ifndef atomic64_dec_return
-static inline s64
+static __always_inline s64
atomic64_dec_return(atomic64_t *v)
{
s64 ret;
@@ -1640,7 +1642,7 @@ atomic64_dec_return(atomic64_t *v)
#endif /* atomic64_fetch_dec */
#ifndef atomic64_fetch_dec
-static inline s64
+static __always_inline s64
atomic64_fetch_dec(atomic64_t *v)
{
return atomic64_fetch_sub(1, v);
@@ -1649,7 +1651,7 @@ atomic64_fetch_dec(atomic64_t *v)
#endif
#ifndef atomic64_fetch_dec_acquire
-static inline s64
+static __always_inline s64
atomic64_fetch_dec_acquire(atomic64_t *v)
{
return atomic64_fetch_sub_acquire(1, v);
@@ -1658,7 +1660,7 @@ atomic64_fetch_dec_acquire(atomic64_t *v)
#endif
#ifndef atomic64_fetch_dec_release
-static inline s64
+static __always_inline s64
atomic64_fetch_dec_release(atomic64_t *v)
{
return atomic64_fetch_sub_release(1, v);
@@ -1667,7 +1669,7 @@ atomic64_fetch_dec_release(atomic64_t *v)
#endif
#ifndef atomic64_fetch_dec_relaxed
-static inline s64
+static __always_inline s64
atomic64_fetch_dec_relaxed(atomic64_t *v)
{
return atomic64_fetch_sub_relaxed(1, v);
@@ -1678,7 +1680,7 @@ atomic64_fetch_dec_relaxed(atomic64_t *v)
#else /* atomic64_fetch_dec_relaxed */
#ifndef atomic64_fetch_dec_acquire
-static inline s64
+static __always_inline s64
atomic64_fetch_dec_acquire(atomic64_t *v)
{
s64 ret = atomic64_fetch_dec_relaxed(v);
@@ -1689,7 +1691,7 @@ atomic64_fetch_dec_acquire(atomic64_t *v)
#endif
#ifndef atomic64_fetch_dec_release
-static inline s64
+static __always_inline s64
atomic64_fetch_dec_release(atomic64_t *v)
{
__atomic_release_fence();
@@ -1699,7 +1701,7 @@ atomic64_fetch_dec_release(atomic64_t *v)
#endif
#ifndef atomic64_fetch_dec
-static inline s64
+static __always_inline s64
atomic64_fetch_dec(atomic64_t *v)
{
s64 ret;
@@ -1720,7 +1722,7 @@ atomic64_fetch_dec(atomic64_t *v)
#else /* atomic64_fetch_and_relaxed */
#ifndef atomic64_fetch_and_acquire
-static inline s64
+static __always_inline s64
atomic64_fetch_and_acquire(s64 i, atomic64_t *v)
{
s64 ret = atomic64_fetch_and_relaxed(i, v);
@@ -1731,7 +1733,7 @@ atomic64_fetch_and_acquire(s64 i, atomic64_t *v)
#endif
#ifndef atomic64_fetch_and_release
-static inline s64
+static __always_inline s64
atomic64_fetch_and_release(s64 i, atomic64_t *v)
{
__atomic_release_fence();
@@ -1741,7 +1743,7 @@ atomic64_fetch_and_release(s64 i, atomic64_t *v)
#endif
#ifndef atomic64_fetch_and
-static inline s64
+static __always_inline s64
atomic64_fetch_and(s64 i, atomic64_t *v)
{
s64 ret;
@@ -1756,7 +1758,7 @@ atomic64_fetch_and(s64 i, atomic64_t *v)
#endif /* atomic64_fetch_and_relaxed */
#ifndef atomic64_andnot
-static inline void
+static __always_inline void
atomic64_andnot(s64 i, atomic64_t *v)
{
atomic64_and(~i, v);
@@ -1772,7 +1774,7 @@ atomic64_andnot(s64 i, atomic64_t *v)
#endif /* atomic64_fetch_andnot */
#ifndef atomic64_fetch_andnot
-static inline s64
+static __always_inline s64
atomic64_fetch_andnot(s64 i, atomic64_t *v)
{
return atomic64_fetch_and(~i, v);
@@ -1781,7 +1783,7 @@ atomic64_fetch_andnot(s64 i, atomic64_t *v)
#endif
#ifndef atomic64_fetch_andnot_acquire
-static inline s64
+static __always_inline s64
atomic64_fetch_andnot_acquire(s64 i, atomic64_t *v)
{
return atomic64_fetch_and_acquire(~i, v);
@@ -1790,7 +1792,7 @@ atomic64_fetch_andnot_acquire(s64 i, atomic64_t *v)
#endif
#ifndef atomic64_fetch_andnot_release
-static inline s64
+static __always_inline s64
atomic64_fetch_andnot_release(s64 i, atomic64_t *v)
{
return atomic64_fetch_and_release(~i, v);
@@ -1799,7 +1801,7 @@ atomic64_fetch_andnot_release(s64 i, atomic64_t *v)
#endif
#ifndef atomic64_fetch_andnot_relaxed
-static inline s64
+static __always_inline s64
atomic64_fetch_andnot_relaxed(s64 i, atomic64_t *v)
{
return atomic64_fetch_and_relaxed(~i, v);
@@ -1810,7 +1812,7 @@ atomic64_fetch_andnot_relaxed(s64 i, atomic64_t *v)
#else /* atomic64_fetch_andnot_relaxed */
#ifndef atomic64_fetch_andnot_acquire
-static inline s64
+static __always_inline s64
atomic64_fetch_andnot_acquire(s64 i, atomic64_t *v)
{
s64 ret = atomic64_fetch_andnot_relaxed(i, v);
@@ -1821,7 +1823,7 @@ atomic64_fetch_andnot_acquire(s64 i, atomic64_t *v)
#endif
#ifndef atomic64_fetch_andnot_release
-static inline s64
+static __always_inline s64
atomic64_fetch_andnot_release(s64 i, atomic64_t *v)
{
__atomic_release_fence();
@@ -1831,7 +1833,7 @@ atomic64_fetch_andnot_release(s64 i, atomic64_t *v)
#endif
#ifndef atomic64_fetch_andnot
-static inline s64
+static __always_inline s64
atomic64_fetch_andnot(s64 i, atomic64_t *v)
{
s64 ret;
@@ -1852,7 +1854,7 @@ atomic64_fetch_andnot(s64 i, atomic64_t *v)
#else /* atomic64_fetch_or_relaxed */
#ifndef atomic64_fetch_or_acquire
-static inline s64
+static __always_inline s64
atomic64_fetch_or_acquire(s64 i, atomic64_t *v)
{
s64 ret = atomic64_fetch_or_relaxed(i, v);
@@ -1863,7 +1865,7 @@ atomic64_fetch_or_acquire(s64 i, atomic64_t *v)
#endif
#ifndef atomic64_fetch_or_release
-static inline s64
+static __always_inline s64
atomic64_fetch_or_release(s64 i, atomic64_t *v)
{
__atomic_release_fence();
@@ -1873,7 +1875,7 @@ atomic64_fetch_or_release(s64 i, atomic64_t *v)
#endif
#ifndef atomic64_fetch_or
-static inline s64
+static __always_inline s64
atomic64_fetch_or(s64 i, atomic64_t *v)
{
s64 ret;
@@ -1894,7 +1896,7 @@ atomic64_fetch_or(s64 i, atomic64_t *v)
#else /* atomic64_fetch_xor_relaxed */
#ifndef atomic64_fetch_xor_acquire
-static inline s64
+static __always_inline s64
atomic64_fetch_xor_acquire(s64 i, atomic64_t *v)
{
s64 ret = atomic64_fetch_xor_relaxed(i, v);
@@ -1905,7 +1907,7 @@ atomic64_fetch_xor_acquire(s64 i, atomic64_t *v)
#endif
#ifndef atomic64_fetch_xor_release
-static inline s64
+static __always_inline s64
atomic64_fetch_xor_release(s64 i, atomic64_t *v)
{
__atomic_release_fence();
@@ -1915,7 +1917,7 @@ atomic64_fetch_xor_release(s64 i, atomic64_t *v)
#endif
#ifndef atomic64_fetch_xor
-static inline s64
+static __always_inline s64
atomic64_fetch_xor(s64 i, atomic64_t *v)
{
s64 ret;
@@ -1936,7 +1938,7 @@ atomic64_fetch_xor(s64 i, atomic64_t *v)
#else /* atomic64_xchg_relaxed */
#ifndef atomic64_xchg_acquire
-static inline s64
+static __always_inline s64
atomic64_xchg_acquire(atomic64_t *v, s64 i)
{
s64 ret = atomic64_xchg_relaxed(v, i);
@@ -1947,7 +1949,7 @@ atomic64_xchg_acquire(atomic64_t *v, s64 i)
#endif
#ifndef atomic64_xchg_release
-static inline s64
+static __always_inline s64
atomic64_xchg_release(atomic64_t *v, s64 i)
{
__atomic_release_fence();
@@ -1957,7 +1959,7 @@ atomic64_xchg_release(atomic64_t *v, s64 i)
#endif
#ifndef atomic64_xchg
-static inline s64
+static __always_inline s64
atomic64_xchg(atomic64_t *v, s64 i)
{
s64 ret;
@@ -1978,7 +1980,7 @@ atomic64_xchg(atomic64_t *v, s64 i)
#else /* atomic64_cmpxchg_relaxed */
#ifndef atomic64_cmpxchg_acquire
-static inline s64
+static __always_inline s64
atomic64_cmpxchg_acquire(atomic64_t *v, s64 old, s64 new)
{
s64 ret = atomic64_cmpxchg_relaxed(v, old, new);
@@ -1989,7 +1991,7 @@ atomic64_cmpxchg_acquire(atomic64_t *v, s64 old, s64 new)
#endif
#ifndef atomic64_cmpxchg_release
-static inline s64
+static __always_inline s64
atomic64_cmpxchg_release(atomic64_t *v, s64 old, s64 new)
{
__atomic_release_fence();
@@ -1999,7 +2001,7 @@ atomic64_cmpxchg_release(atomic64_t *v, s64 old, s64 new)
#endif
#ifndef atomic64_cmpxchg
-static inline s64
+static __always_inline s64
atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new)
{
s64 ret;
@@ -2021,7 +2023,7 @@ atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new)
#endif /* atomic64_try_cmpxchg */
#ifndef atomic64_try_cmpxchg
-static inline bool
+static __always_inline bool
atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new)
{
s64 r, o = *old;
@@ -2034,7 +2036,7 @@ atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new)
#endif
#ifndef atomic64_try_cmpxchg_acquire
-static inline bool
+static __always_inline bool
atomic64_try_cmpxchg_acquire(atomic64_t *v, s64 *old, s64 new)
{
s64 r, o = *old;
@@ -2047,7 +2049,7 @@ atomic64_try_cmpxchg_acquire(atomic64_t *v, s64 *old, s64 new)
#endif
#ifndef atomic64_try_cmpxchg_release
-static inline bool
+static __always_inline bool
atomic64_try_cmpxchg_release(atomic64_t *v, s64 *old, s64 new)
{
s64 r, o = *old;
@@ -2060,7 +2062,7 @@ atomic64_try_cmpxchg_release(atomic64_t *v, s64 *old, s64 new)
#endif
#ifndef atomic64_try_cmpxchg_relaxed
-static inline bool
+static __always_inline bool
atomic64_try_cmpxchg_relaxed(atomic64_t *v, s64 *old, s64 new)
{
s64 r, o = *old;
@@ -2075,7 +2077,7 @@ atomic64_try_cmpxchg_relaxed(atomic64_t *v, s64 *old, s64 new)
#else /* atomic64_try_cmpxchg_relaxed */
#ifndef atomic64_try_cmpxchg_acquire
-static inline bool
+static __always_inline bool
atomic64_try_cmpxchg_acquire(atomic64_t *v, s64 *old, s64 new)
{
bool ret = atomic64_try_cmpxchg_relaxed(v, old, new);
@@ -2086,7 +2088,7 @@ atomic64_try_cmpxchg_acquire(atomic64_t *v, s64 *old, s64 new)
#endif
#ifndef atomic64_try_cmpxchg_release
-static inline bool
+static __always_inline bool
atomic64_try_cmpxchg_release(atomic64_t *v, s64 *old, s64 new)
{
__atomic_release_fence();
@@ -2096,7 +2098,7 @@ atomic64_try_cmpxchg_release(atomic64_t *v, s64 *old, s64 new)
#endif
#ifndef atomic64_try_cmpxchg
-static inline bool
+static __always_inline bool
atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new)
{
bool ret;
@@ -2120,7 +2122,7 @@ atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new)
* true if the result is zero, or false for all
* other cases.
*/
-static inline bool
+static __always_inline bool
atomic64_sub_and_test(s64 i, atomic64_t *v)
{
return atomic64_sub_return(i, v) == 0;
@@ -2137,7 +2139,7 @@ atomic64_sub_and_test(s64 i, atomic64_t *v)
* returns true if the result is 0, or false for all other
* cases.
*/
-static inline bool
+static __always_inline bool
atomic64_dec_and_test(atomic64_t *v)
{
return atomic64_dec_return(v) == 0;
@@ -2154,7 +2156,7 @@ atomic64_dec_and_test(atomic64_t *v)
* and returns true if the result is zero, or false for all
* other cases.
*/
-static inline bool
+static __always_inline bool
atomic64_inc_and_test(atomic64_t *v)
{
return atomic64_inc_return(v) == 0;
@@ -2172,7 +2174,7 @@ atomic64_inc_and_test(atomic64_t *v)
* if the result is negative, or false when
* result is greater than or equal to zero.
*/
-static inline bool
+static __always_inline bool
atomic64_add_negative(s64 i, atomic64_t *v)
{
return atomic64_add_return(i, v) < 0;
@@ -2190,7 +2192,7 @@ atomic64_add_negative(s64 i, atomic64_t *v)
* Atomically adds @a to @v, so long as @v was not already @u.
* Returns original value of @v
*/
-static inline s64
+static __always_inline s64
atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
{
s64 c = atomic64_read(v);
@@ -2215,7 +2217,7 @@ atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
* Atomically adds @a to @v, if @v was not already @u.
* Returns true if the addition was done.
*/
-static inline bool
+static __always_inline bool
atomic64_add_unless(atomic64_t *v, s64 a, s64 u)
{
return atomic64_fetch_add_unless(v, a, u) != u;
@@ -2231,7 +2233,7 @@ atomic64_add_unless(atomic64_t *v, s64 a, s64 u)
* Atomically increments @v by 1, if @v is non-zero.
* Returns true if the increment was done.
*/
-static inline bool
+static __always_inline bool
atomic64_inc_not_zero(atomic64_t *v)
{
return atomic64_add_unless(v, 1, 0);
@@ -2240,7 +2242,7 @@ atomic64_inc_not_zero(atomic64_t *v)
#endif
#ifndef atomic64_inc_unless_negative
-static inline bool
+static __always_inline bool
atomic64_inc_unless_negative(atomic64_t *v)
{
s64 c = atomic64_read(v);
@@ -2256,7 +2258,7 @@ atomic64_inc_unless_negative(atomic64_t *v)
#endif
#ifndef atomic64_dec_unless_positive
-static inline bool
+static __always_inline bool
atomic64_dec_unless_positive(atomic64_t *v)
{
s64 c = atomic64_read(v);
@@ -2272,7 +2274,7 @@ atomic64_dec_unless_positive(atomic64_t *v)
#endif
#ifndef atomic64_dec_if_positive
-static inline s64
+static __always_inline s64
atomic64_dec_if_positive(atomic64_t *v)
{
s64 dec, c = atomic64_read(v);
@@ -2292,4 +2294,4 @@ atomic64_dec_if_positive(atomic64_t *v)
#define atomic64_cond_read_relaxed(v, c) smp_cond_load_relaxed(&(v)->counter, (c))
#endif /* _LINUX_ATOMIC_FALLBACK_H */
-// 25de4a2804d70f57e994fe3b419148658bb5378a
+// baaf45f4c24ed88ceae58baca39d7fd80bb8101b
diff --git a/include/linux/compiler-clang.h b/include/linux/compiler-clang.h
index 333a6695a918..2cb42d8bdedc 100644
--- a/include/linux/compiler-clang.h
+++ b/include/linux/compiler-clang.h
@@ -16,7 +16,7 @@
#define KASAN_ABI_VERSION 5
#if __has_feature(address_sanitizer) || __has_feature(hwaddress_sanitizer)
-/* emulate gcc's __SANITIZE_ADDRESS__ flag */
+/* Emulate GCC's __SANITIZE_ADDRESS__ flag */
#define __SANITIZE_ADDRESS__
#define __no_sanitize_address \
__attribute__((no_sanitize("address", "hwaddress")))
@@ -24,6 +24,15 @@
#define __no_sanitize_address
#endif
+#if __has_feature(thread_sanitizer)
+/* emulate gcc's __SANITIZE_THREAD__ flag */
+#define __SANITIZE_THREAD__
+#define __no_sanitize_thread \
+ __attribute__((no_sanitize("thread")))
+#else
+#define __no_sanitize_thread
+#endif
+
/*
* Not all versions of clang implement the the type-generic versions
* of the builtin overflow checkers. Fortunately, clang implements
diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h
index d7ee4c6bad48..cf294faec2f8 100644
--- a/include/linux/compiler-gcc.h
+++ b/include/linux/compiler-gcc.h
@@ -145,6 +145,12 @@
#define __no_sanitize_address
#endif
+#if defined(__SANITIZE_THREAD__) && __has_attribute(__no_sanitize_thread__)
+#define __no_sanitize_thread __attribute__((no_sanitize_thread))
+#else
+#define __no_sanitize_thread
+#endif
+
#if GCC_VERSION >= 50100
#define COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW 1
#endif
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index 034b0a644efc..491c0a2128b7 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -178,6 +178,7 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
#endif
#include <uapi/linux/types.h>
+#include <linux/kcsan-checks.h>
#define __READ_ONCE_SIZE \
({ \
@@ -193,12 +194,6 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
} \
})
-static __always_inline
-void __read_once_size(const volatile void *p, void *res, int size)
-{
- __READ_ONCE_SIZE;
-}
-
#ifdef CONFIG_KASAN
/*
* We can't declare function 'inline' because __no_sanitize_address confilcts
@@ -207,18 +202,47 @@ void __read_once_size(const volatile void *p, void *res, int size)
* '__maybe_unused' allows us to avoid defined-but-not-used warnings.
*/
# define __no_kasan_or_inline __no_sanitize_address notrace __maybe_unused
+# define __no_sanitize_or_inline __no_kasan_or_inline
#else
# define __no_kasan_or_inline __always_inline
#endif
-static __no_kasan_or_inline
+#define __no_kcsan __no_sanitize_thread
+#ifdef __SANITIZE_THREAD__
+/*
+ * Rely on __SANITIZE_THREAD__ instead of CONFIG_KCSAN, to avoid not inlining in
+ * compilation units where instrumentation is disabled. The attribute 'noinline'
+ * is required for older compilers, where implicit inlining of very small
+ * functions renders __no_sanitize_thread ineffective.
+ */
+# define __no_kcsan_or_inline __no_kcsan noinline notrace __maybe_unused
+# define __no_sanitize_or_inline __no_kcsan_or_inline
+#else
+# define __no_kcsan_or_inline __always_inline
+#endif
+
+#ifndef __no_sanitize_or_inline
+#define __no_sanitize_or_inline __always_inline
+#endif
+
+static __no_kcsan_or_inline
+void __read_once_size(const volatile void *p, void *res, int size)
+{
+ kcsan_check_atomic_read(p, size);
+ __READ_ONCE_SIZE;
+}
+
+static __no_sanitize_or_inline
void __read_once_size_nocheck(const volatile void *p, void *res, int size)
{
__READ_ONCE_SIZE;
}
-static __always_inline void __write_once_size(volatile void *p, void *res, int size)
+static __no_kcsan_or_inline
+void __write_once_size(volatile void *p, void *res, int size)
{
+ kcsan_check_atomic_write(p, size);
+
switch (size) {
case 1: *(volatile __u8 *)p = *(__u8 *)res; break;
case 2: *(volatile __u16 *)p = *(__u16 *)res; break;
@@ -289,6 +313,26 @@ unsigned long read_word_at_a_time(const void *addr)
__u.__val; \
})
+/**
+ * data_race - mark an expression as containing intentional data races
+ *
+ * This data_race() macro is useful for situations in which data races
+ * should be forgiven. One example is diagnostic code that accesses
+ * shared variables but is not a part of the core synchronization design.
+ *
+ * This macro *does not* affect normal code generation, but is a hint
+ * to tooling that data races here are to be ignored.
+ */
+#define data_race(expr) \
+ ({ \
+ typeof(({ expr; })) __val; \
+ kcsan_nestable_atomic_begin(); \
+ __val = ({ expr; }); \
+ kcsan_nestable_atomic_end(); \
+ __val; \
+ })
+#else
+
#endif /* __KERNEL__ */
/*
diff --git a/include/linux/instrumented.h b/include/linux/instrumented.h
new file mode 100644
index 000000000000..43e6ea591975
--- /dev/null
+++ b/include/linux/instrumented.h
@@ -0,0 +1,109 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/*
+ * This header provides generic wrappers for memory access instrumentation that
+ * the compiler cannot emit for: KASAN, KCSAN.
+ */
+#ifndef _LINUX_INSTRUMENTED_H
+#define _LINUX_INSTRUMENTED_H
+
+#include <linux/compiler.h>
+#include <linux/kasan-checks.h>
+#include <linux/kcsan-checks.h>
+#include <linux/types.h>
+
+/**
+ * instrument_read - instrument regular read access
+ *
+ * Instrument a regular read access. The instrumentation should be inserted
+ * before the actual read happens.
+ *
+ * @ptr address of access
+ * @size size of access
+ */
+static __always_inline void instrument_read(const volatile void *v, size_t size)
+{
+ kasan_check_read(v, size);
+ kcsan_check_read(v, size);
+}
+
+/**
+ * instrument_write - instrument regular write access
+ *
+ * Instrument a regular write access. The instrumentation should be inserted
+ * before the actual write happens.
+ *
+ * @ptr address of access
+ * @size size of access
+ */
+static __always_inline void instrument_write(const volatile void *v, size_t size)
+{
+ kasan_check_write(v, size);
+ kcsan_check_write(v, size);
+}
+
+/**
+ * instrument_atomic_read - instrument atomic read access
+ *
+ * Instrument an atomic read access. The instrumentation should be inserted
+ * before the actual read happens.
+ *
+ * @ptr address of access
+ * @size size of access
+ */
+static __always_inline void instrument_atomic_read(const volatile void *v, size_t size)
+{
+ kasan_check_read(v, size);
+ kcsan_check_atomic_read(v, size);
+}
+
+/**
+ * instrument_atomic_write - instrument atomic write access
+ *
+ * Instrument an atomic write access. The instrumentation should be inserted
+ * before the actual write happens.
+ *
+ * @ptr address of access
+ * @size size of access
+ */
+static __always_inline void instrument_atomic_write(const volatile void *v, size_t size)
+{
+ kasan_check_write(v, size);
+ kcsan_check_atomic_write(v, size);
+}
+
+/**
+ * instrument_copy_to_user - instrument reads of copy_to_user
+ *
+ * Instrument reads from kernel memory, that are due to copy_to_user (and
+ * variants). The instrumentation must be inserted before the accesses.
+ *
+ * @to destination address
+ * @from source address
+ * @n number of bytes to copy
+ */
+static __always_inline void
+instrument_copy_to_user(void __user *to, const void *from, unsigned long n)
+{
+ kasan_check_read(from, n);
+ kcsan_check_read(from, n);
+}
+
+/**
+ * instrument_copy_from_user - instrument writes of copy_from_user
+ *
+ * Instrument writes to kernel memory, that are due to copy_from_user (and
+ * variants). The instrumentation should be inserted before the accesses.
+ *
+ * @to destination address
+ * @from source address
+ * @n number of bytes to copy
+ */
+static __always_inline void
+instrument_copy_from_user(const void *to, const void __user *from, unsigned long n)
+{
+ kasan_check_write(to, n);
+ kcsan_check_write(to, n);
+}
+
+#endif /* _LINUX_INSTRUMENTED_H */
diff --git a/include/linux/kcsan-checks.h b/include/linux/kcsan-checks.h
new file mode 100644
index 000000000000..8f9f6e2191dc
--- /dev/null
+++ b/include/linux/kcsan-checks.h
@@ -0,0 +1,251 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _LINUX_KCSAN_CHECKS_H
+#define _LINUX_KCSAN_CHECKS_H
+
+#include <linux/types.h>
+
+/*
+ * ACCESS TYPE MODIFIERS
+ *
+ * <none>: normal read access;
+ * WRITE : write access;
+ * ATOMIC: access is atomic;
+ * ASSERT: access is not a regular access, but an assertion;
+ */
+#define KCSAN_ACCESS_WRITE 0x1
+#define KCSAN_ACCESS_ATOMIC 0x2
+#define KCSAN_ACCESS_ASSERT 0x4
+
+/*
+ * __kcsan_*: Always calls into the runtime when KCSAN is enabled. This may be used
+ * even in compilation units that selectively disable KCSAN, but must use KCSAN
+ * to validate access to an address. Never use these in header files!
+ */
+#ifdef CONFIG_KCSAN
+/**
+ * __kcsan_check_access - check generic access for races
+ *
+ * @ptr address of access
+ * @size size of access
+ * @type access type modifier
+ */
+void __kcsan_check_access(const volatile void *ptr, size_t size, int type);
+
+/**
+ * kcsan_nestable_atomic_begin - begin nestable atomic region
+ *
+ * Accesses within the atomic region may appear to race with other accesses but
+ * should be considered atomic.
+ */
+void kcsan_nestable_atomic_begin(void);
+
+/**
+ * kcsan_nestable_atomic_end - end nestable atomic region
+ */
+void kcsan_nestable_atomic_end(void);
+
+/**
+ * kcsan_flat_atomic_begin - begin flat atomic region
+ *
+ * Accesses within the atomic region may appear to race with other accesses but
+ * should be considered atomic.
+ */
+void kcsan_flat_atomic_begin(void);
+
+/**
+ * kcsan_flat_atomic_end - end flat atomic region
+ */
+void kcsan_flat_atomic_end(void);
+
+/**
+ * kcsan_atomic_next - consider following accesses as atomic
+ *
+ * Force treating the next n memory accesses for the current context as atomic
+ * operations.
+ *
+ * @n number of following memory accesses to treat as atomic.
+ */
+void kcsan_atomic_next(int n);
+
+/**
+ * kcsan_set_access_mask - set access mask
+ *
+ * Set the access mask for all accesses for the current context if non-zero.
+ * Only value changes to bits set in the mask will be reported.
+ *
+ * @mask bitmask
+ */
+void kcsan_set_access_mask(unsigned long mask);
+
+#else /* CONFIG_KCSAN */
+
+static inline void __kcsan_check_access(const volatile void *ptr, size_t size,
+ int type) { }
+
+static inline void kcsan_nestable_atomic_begin(void) { }
+static inline void kcsan_nestable_atomic_end(void) { }
+static inline void kcsan_flat_atomic_begin(void) { }
+static inline void kcsan_flat_atomic_end(void) { }
+static inline void kcsan_atomic_next(int n) { }
+static inline void kcsan_set_access_mask(unsigned long mask) { }
+
+#endif /* CONFIG_KCSAN */
+
+/*
+ * kcsan_*: Only calls into the runtime when the particular compilation unit has
+ * KCSAN instrumentation enabled. May be used in header files.
+ */
+#ifdef __SANITIZE_THREAD__
+#define kcsan_check_access __kcsan_check_access
+#else
+static inline void kcsan_check_access(const volatile void *ptr, size_t size,
+ int type) { }
+#endif
+
+/**
+ * __kcsan_check_read - check regular read access for races
+ *
+ * @ptr address of access
+ * @size size of access
+ */
+#define __kcsan_check_read(ptr, size) __kcsan_check_access(ptr, size, 0)
+
+/**
+ * __kcsan_check_write - check regular write access for races
+ *
+ * @ptr address of access
+ * @size size of access
+ */
+#define __kcsan_check_write(ptr, size) \
+ __kcsan_check_access(ptr, size, KCSAN_ACCESS_WRITE)
+
+/**
+ * kcsan_check_read - check regular read access for races
+ *
+ * @ptr address of access
+ * @size size of access
+ */
+#define kcsan_check_read(ptr, size) kcsan_check_access(ptr, size, 0)
+
+/**
+ * kcsan_check_write - check regular write access for races
+ *
+ * @ptr address of access
+ * @size size of access
+ */
+#define kcsan_check_write(ptr, size) \
+ kcsan_check_access(ptr, size, KCSAN_ACCESS_WRITE)
+
+/*
+ * Check for atomic accesses: if atomic accesses are not ignored, this simply
+ * aliases to kcsan_check_access(), otherwise becomes a no-op.
+ */
+#ifdef CONFIG_KCSAN_IGNORE_ATOMICS
+#define kcsan_check_atomic_read(...) do { } while (0)
+#define kcsan_check_atomic_write(...) do { } while (0)
+#else
+#define kcsan_check_atomic_read(ptr, size) \
+ kcsan_check_access(ptr, size, KCSAN_ACCESS_ATOMIC)
+#define kcsan_check_atomic_write(ptr, size) \
+ kcsan_check_access(ptr, size, KCSAN_ACCESS_ATOMIC | KCSAN_ACCESS_WRITE)
+#endif
+
+/**
+ * ASSERT_EXCLUSIVE_WRITER - assert no concurrent writes to @var
+ *
+ * Assert that there are no concurrent writes to @var; other readers are
+ * allowed. This assertion can be used to specify properties of concurrent code,
+ * where violation cannot be detected as a normal data race.
+ *
+ * For example, if a per-CPU variable is only meant to be written by a single
+ * CPU, but may be read from other CPUs; in this case, reads and writes must be
+ * marked properly, however, if an off-CPU WRITE_ONCE() races with the owning
+ * CPU's WRITE_ONCE(), would not constitute a data race but could be a harmful
+ * race condition. Using this macro allows specifying this property in the code
+ * and catch such bugs.
+ *
+ * @var variable to assert on
+ */
+#define ASSERT_EXCLUSIVE_WRITER(var) \
+ __kcsan_check_access(&(var), sizeof(var), KCSAN_ACCESS_ASSERT)
+
+/**
+ * ASSERT_EXCLUSIVE_ACCESS - assert no concurrent accesses to @var
+ *
+ * Assert that there are no concurrent accesses to @var (no readers nor
+ * writers). This assertion can be used to specify properties of concurrent
+ * code, where violation cannot be detected as a normal data race.
+ *
+ * For example, in a reference-counting algorithm where exclusive access is
+ * expected after the refcount reaches 0. We can check that this property
+ * actually holds as follows:
+ *
+ * if (refcount_dec_and_test(&obj->refcnt)) {
+ * ASSERT_EXCLUSIVE_ACCESS(*obj);
+ * safely_dispose_of(obj);
+ * }
+ *
+ * @var variable to assert on
+ */
+#define ASSERT_EXCLUSIVE_ACCESS(var) \
+ __kcsan_check_access(&(var), sizeof(var), KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ASSERT)
+
+/**
+ * ASSERT_EXCLUSIVE_BITS - assert no concurrent writes to subset of bits in @var
+ *
+ * Bit-granular variant of ASSERT_EXCLUSIVE_WRITER(var).
+ *
+ * Assert that there are no concurrent writes to a subset of bits in @var;
+ * concurrent readers are permitted. This assertion captures more detailed
+ * bit-level properties, compared to the other (word granularity) assertions.
+ * Only the bits set in @mask are checked for concurrent modifications, while
+ * ignoring the remaining bits, i.e. concurrent writes (or reads) to ~@mask bits
+ * are ignored.
+ *
+ * Use this for variables, where some bits must not be modified concurrently,
+ * yet other bits are expected to be modified concurrently.
+ *
+ * For example, variables where, after initialization, some bits are read-only,
+ * but other bits may still be modified concurrently. A reader may wish to
+ * assert that this is true as follows:
+ *
+ * ASSERT_EXCLUSIVE_BITS(flags, READ_ONLY_MASK);
+ * foo = (READ_ONCE(flags) & READ_ONLY_MASK) >> READ_ONLY_SHIFT;
+ *
+ * Note: The access that immediately follows ASSERT_EXCLUSIVE_BITS() is
+ * assumed to access the masked bits only, and KCSAN optimistically assumes it
+ * is therefore safe, even in the presence of data races, and marking it with
+ * READ_ONCE() is optional from KCSAN's point-of-view. We caution, however,
+ * that it may still be advisable to do so, since we cannot reason about all
+ * compiler optimizations when it comes to bit manipulations (on the reader
+ * and writer side). If you are sure nothing can go wrong, we can write the
+ * above simply as:
+ *
+ * ASSERT_EXCLUSIVE_BITS(flags, READ_ONLY_MASK);
+ * foo = (flags & READ_ONLY_MASK) >> READ_ONLY_SHIFT;
+ *
+ * Another example, where this may be used, is when certain bits of @var may
+ * only be modified when holding the appropriate lock, but other bits may still
+ * be modified concurrently. Writers, where other bits may change concurrently,
+ * could use the assertion as follows:
+ *
+ * spin_lock(&foo_lock);
+ * ASSERT_EXCLUSIVE_BITS(flags, FOO_MASK);
+ * old_flags = READ_ONCE(flags);
+ * new_flags = (old_flags & ~FOO_MASK) | (new_foo << FOO_SHIFT);
+ * if (cmpxchg(&flags, old_flags, new_flags) != old_flags) { ... }
+ * spin_unlock(&foo_lock);
+ *
+ * @var variable to assert on
+ * @mask only check for modifications to bits set in @mask
+ */
+#define ASSERT_EXCLUSIVE_BITS(var, mask) \
+ do { \
+ kcsan_set_access_mask(mask); \
+ __kcsan_check_access(&(var), sizeof(var), KCSAN_ACCESS_ASSERT);\
+ kcsan_set_access_mask(0); \
+ kcsan_atomic_next(1); \
+ } while (0)
+
+#endif /* _LINUX_KCSAN_CHECKS_H */
diff --git a/include/linux/kcsan.h b/include/linux/kcsan.h
new file mode 100644
index 000000000000..3b84606e1e67
--- /dev/null
+++ b/include/linux/kcsan.h
@@ -0,0 +1,72 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _LINUX_KCSAN_H
+#define _LINUX_KCSAN_H
+
+#include <linux/kcsan-checks.h>
+#include <linux/types.h>
+
+#ifdef CONFIG_KCSAN
+
+/*
+ * Context for each thread of execution: for tasks, this is stored in
+ * task_struct, and interrupts access internal per-CPU storage.
+ */
+struct kcsan_ctx {
+ int disable_count; /* disable counter */
+ int atomic_next; /* number of following atomic ops */
+
+ /*
+ * We distinguish between: (a) nestable atomic regions that may contain
+ * other nestable regions; and (b) flat atomic regions that do not keep
+ * track of nesting. Both (a) and (b) are entirely independent of each
+ * other, and a flat region may be started in a nestable region or
+ * vice-versa.
+ *
+ * This is required because, for example, in the annotations for
+ * seqlocks, we declare seqlock writer critical sections as (a) nestable
+ * atomic regions, but reader critical sections as (b) flat atomic
+ * regions, but have encountered cases where seqlock reader critical
+ * sections are contained within writer critical sections (the opposite
+ * may be possible, too).
+ *
+ * To support these cases, we independently track the depth of nesting
+ * for (a), and whether the leaf level is flat for (b).
+ */
+ int atomic_nest_count;
+ bool in_flat_atomic;
+
+ /*
+ * Access mask for all accesses if non-zero.
+ */
+ unsigned long access_mask;
+};
+
+/**
+ * kcsan_init - initialize KCSAN runtime
+ */
+void kcsan_init(void);
+
+/**
+ * kcsan_disable_current - disable KCSAN for the current context
+ *
+ * Supports nesting.
+ */
+void kcsan_disable_current(void);
+
+/**
+ * kcsan_enable_current - re-enable KCSAN for the current context
+ *
+ * Supports nesting.
+ */
+void kcsan_enable_current(void);
+
+#else /* CONFIG_KCSAN */
+
+static inline void kcsan_init(void) { }
+static inline void kcsan_disable_current(void) { }
+static inline void kcsan_enable_current(void) { }
+
+#endif /* CONFIG_KCSAN */
+
+#endif /* _LINUX_KCSAN_H */
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 4418f5cb8324..c323d9b1dae0 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -31,6 +31,7 @@
#include <linux/task_io_accounting.h>
#include <linux/posix-timers.h>
#include <linux/rseq.h>
+#include <linux/kcsan.h>
/* task_struct member predeclarations (sorted alphabetically): */
struct audit_context;
@@ -1187,6 +1188,9 @@ struct task_struct {
#ifdef CONFIG_KASAN
unsigned int kasan_depth;
#endif
+#ifdef CONFIG_KCSAN
+ struct kcsan_ctx kcsan_ctx;
+#endif
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
/* Index of current stored address in ret_stack: */
diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h
index 0491d963d47e..8b97204f35a7 100644
--- a/include/linux/seqlock.h
+++ b/include/linux/seqlock.h
@@ -37,9 +37,25 @@
#include <linux/preempt.h>
#include <linux/lockdep.h>
#include <linux/compiler.h>
+#include <linux/kcsan-checks.h>
#include <asm/processor.h>
/*
+ * The seqlock interface does not prescribe a precise sequence of read
+ * begin/retry/end. For readers, typically there is a call to
+ * read_seqcount_begin() and read_seqcount_retry(), however, there are more
+ * esoteric cases which do not follow this pattern.
+ *
+ * As a consequence, we take the following best-effort approach for raw usage
+ * via seqcount_t under KCSAN: upon beginning a seq-reader critical section,
+ * pessimistically mark the next KCSAN_SEQLOCK_REGION_MAX memory accesses as
+ * atomics; if there is a matching read_seqcount_retry() call, no following
+ * memory operations are considered atomic. Usage of seqlocks via seqlock_t
+ * interface is not affected.
+ */
+#define KCSAN_SEQLOCK_REGION_MAX 1000
+
+/*
* Version using sequence counter only.
* This can be used when code has its own mutex protecting the
* updating starting before the write_seqcountbeqin() and ending
@@ -115,6 +131,7 @@ repeat:
cpu_relax();
goto repeat;
}
+ kcsan_atomic_next(KCSAN_SEQLOCK_REGION_MAX);
return ret;
}
@@ -131,6 +148,7 @@ static inline unsigned raw_read_seqcount(const seqcount_t *s)
{
unsigned ret = READ_ONCE(s->sequence);
smp_rmb();
+ kcsan_atomic_next(KCSAN_SEQLOCK_REGION_MAX);
return ret;
}
@@ -183,6 +201,7 @@ static inline unsigned raw_seqcount_begin(const seqcount_t *s)
{
unsigned ret = READ_ONCE(s->sequence);
smp_rmb();
+ kcsan_atomic_next(KCSAN_SEQLOCK_REGION_MAX);
return ret & ~1;
}
@@ -202,7 +221,8 @@ static inline unsigned raw_seqcount_begin(const seqcount_t *s)
*/
static inline int __read_seqcount_retry(const seqcount_t *s, unsigned start)
{
- return unlikely(s->sequence != start);
+ kcsan_atomic_next(0);
+ return unlikely(READ_ONCE(s->sequence) != start);
}
/**
@@ -225,6 +245,7 @@ static inline int read_seqcount_retry(const seqcount_t *s, unsigned start)
static inline void raw_write_seqcount_begin(seqcount_t *s)
{
+ kcsan_nestable_atomic_begin();
s->sequence++;
smp_wmb();
}
@@ -233,6 +254,7 @@ static inline void raw_write_seqcount_end(seqcount_t *s)
{
smp_wmb();
s->sequence++;
+ kcsan_nestable_atomic_end();
}
/**
@@ -243,6 +265,13 @@ static inline void raw_write_seqcount_end(seqcount_t *s)
* usual consistency guarantee. It is one wmb cheaper, because we can
* collapse the two back-to-back wmb()s.
*
+ * Note that writes surrounding the barrier should be declared atomic (e.g.
+ * via WRITE_ONCE): a) to ensure the writes become visible to other threads
+ * atomically, avoiding compiler optimizations; b) to document which writes are
+ * meant to propagate to the reader critical section. This is necessary because
+ * neither writes before and after the barrier are enclosed in a seq-writer
+ * critical section that would ensure readers are aware of ongoing writes.
+ *
* seqcount_t seq;
* bool X = true, Y = false;
*
@@ -262,18 +291,20 @@ static inline void raw_write_seqcount_end(seqcount_t *s)
*
* void write(void)
* {
- * Y = true;
+ * WRITE_ONCE(Y, true);
*
* raw_write_seqcount_barrier(seq);
*
- * X = false;
+ * WRITE_ONCE(X, false);
* }
*/
static inline void raw_write_seqcount_barrier(seqcount_t *s)
{
+ kcsan_nestable_atomic_begin();
s->sequence++;
smp_wmb();
s->sequence++;
+ kcsan_nestable_atomic_end();
}
static inline int raw_read_seqcount_latch(seqcount_t *s)
@@ -398,7 +429,9 @@ static inline void write_seqcount_end(seqcount_t *s)
static inline void write_seqcount_invalidate(seqcount_t *s)
{
smp_wmb();
+ kcsan_nestable_atomic_begin();
s->sequence+=2;
+ kcsan_nestable_atomic_end();
}
typedef struct {
@@ -430,11 +463,21 @@ typedef struct {
*/
static inline unsigned read_seqbegin(const seqlock_t *sl)
{
- return read_seqcount_begin(&sl->seqcount);
+ unsigned ret = read_seqcount_begin(&sl->seqcount);
+
+ kcsan_atomic_next(0); /* non-raw usage, assume closing read_seqretry() */
+ kcsan_flat_atomic_begin();
+ return ret;
}
static inline unsigned read_seqretry(const seqlock_t *sl, unsigned start)
{
+ /*
+ * Assume not nested: read_seqretry() may be called multiple times when
+ * completing read critical section.
+ */
+ kcsan_flat_atomic_end();
+
return read_seqcount_retry(&sl->seqcount, start);
}
diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
index 67f016010aad..8a215c5c1aed 100644
--- a/include/linux/uaccess.h
+++ b/include/linux/uaccess.h
@@ -2,9 +2,9 @@
#ifndef __LINUX_UACCESS_H__
#define __LINUX_UACCESS_H__
+#include <linux/instrumented.h>
#include <linux/sched.h>
#include <linux/thread_info.h>
-#include <linux/kasan-checks.h>
#define uaccess_kernel() segment_eq(get_fs(), KERNEL_DS)
@@ -58,7 +58,7 @@
static __always_inline __must_check unsigned long
__copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
{
- kasan_check_write(to, n);
+ instrument_copy_from_user(to, from, n);
check_object_size(to, n, false);
return raw_copy_from_user(to, from, n);
}
@@ -67,7 +67,7 @@ static __always_inline __must_check unsigned long
__copy_from_user(void *to, const void __user *from, unsigned long n)
{
might_fault();
- kasan_check_write(to, n);
+ instrument_copy_from_user(to, from, n);
check_object_size(to, n, false);
return raw_copy_from_user(to, from, n);
}
@@ -88,7 +88,7 @@ __copy_from_user(void *to, const void __user *from, unsigned long n)
static __always_inline __must_check unsigned long
__copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
{
- kasan_check_read(from, n);
+ instrument_copy_to_user(to, from, n);
check_object_size(from, n, true);
return raw_copy_to_user(to, from, n);
}
@@ -97,7 +97,7 @@ static __always_inline __must_check unsigned long
__copy_to_user(void __user *to, const void *from, unsigned long n)
{
might_fault();
- kasan_check_read(from, n);
+ instrument_copy_to_user(to, from, n);
check_object_size(from, n, true);
return raw_copy_to_user(to, from, n);
}
@@ -109,7 +109,7 @@ _copy_from_user(void *to, const void __user *from, unsigned long n)
unsigned long res = n;
might_fault();
if (likely(access_ok(from, n))) {
- kasan_check_write(to, n);
+ instrument_copy_from_user(to, from, n);
res = raw_copy_from_user(to, from, n);
}
if (unlikely(res))
@@ -127,7 +127,7 @@ _copy_to_user(void __user *to, const void *from, unsigned long n)
{
might_fault();
if (access_ok(to, n)) {
- kasan_check_read(from, n);
+ instrument_copy_to_user(to, from, n);
n = raw_copy_to_user(to, from, n);
}
return n;
diff --git a/init/init_task.c b/init/init_task.c
index bd403ed3e418..de5db6de5211 100644
--- a/init/init_task.c
+++ b/init/init_task.c
@@ -162,6 +162,15 @@ struct task_struct init_task
#ifdef CONFIG_KASAN
.kasan_depth = 1,
#endif
+#ifdef CONFIG_KCSAN
+ .kcsan_ctx = {
+ .disable_count = 0,
+ .atomic_next = 0,
+ .atomic_nest_count = 0,
+ .in_flat_atomic = false,
+ .access_mask = 0,
+ },
+#endif
#ifdef CONFIG_TRACE_IRQFLAGS
.softirqs_enabled = 1,
#endif
diff --git a/init/main.c b/init/main.c
index a48617f2e5e5..295aec3a1a7a 100644
--- a/init/main.c
+++ b/init/main.c
@@ -94,6 +94,7 @@
#include <linux/rodata_test.h>
#include <linux/jump_label.h>
#include <linux/mem_encrypt.h>
+#include <linux/kcsan.h>
#include <asm/io.h>
#include <asm/bugs.h>
@@ -998,6 +999,7 @@ asmlinkage __visible void __init start_kernel(void)
acpi_subsystem_init();
arch_post_acpi_subsys_init();
sfi_init_late();
+ kcsan_init();
/* Do the rest non-__init'ed, we're now alive */
arch_call_rest_init();
diff --git a/kernel/Makefile b/kernel/Makefile
index 4cb4130ced32..5d935b63f812 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -23,6 +23,9 @@ endif
# Prevents flicker of uninteresting __do_softirq()/__local_bh_disable_ip()
# in coverage traces.
KCOV_INSTRUMENT_softirq.o := n
+# Avoid KCSAN instrumentation in softirq ("No shared variables, all the data
+# are CPU local" => assume no data races), to reduce overhead in interrupts.
+KCSAN_SANITIZE_softirq.o = n
# These are called from save_stack_trace() on slub debug path,
# and produce insane amounts of uninteresting coverage.
KCOV_INSTRUMENT_module.o := n
@@ -31,6 +34,7 @@ KCOV_INSTRUMENT_stacktrace.o := n
# Don't self-instrument.
KCOV_INSTRUMENT_kcov.o := n
KASAN_SANITIZE_kcov.o := n
+KCSAN_SANITIZE_kcov.o := n
CFLAGS_kcov.o := $(call cc-option, -fno-conserve-stack -fno-stack-protector)
# cond_syscall is currently not LTO compatible
@@ -103,6 +107,7 @@ obj-$(CONFIG_TRACEPOINTS) += trace/
obj-$(CONFIG_IRQ_WORK) += irq_work.o
obj-$(CONFIG_CPU_PM) += cpu_pm.o
obj-$(CONFIG_BPF) += bpf/
+obj-$(CONFIG_KCSAN) += kcsan/
obj-$(CONFIG_PERF_EVENTS) += events/
@@ -120,6 +125,7 @@ obj-$(CONFIG_SYSCTL_KUNIT_TEST) += sysctl-test.o
obj-$(CONFIG_GCC_PLUGIN_STACKLEAK) += stackleak.o
KASAN_SANITIZE_stackleak.o := n
+KCSAN_SANITIZE_stackleak.o := n
KCOV_INSTRUMENT_stackleak.o := n
$(obj)/configs.o: $(obj)/config_data.gz
diff --git a/kernel/kcsan/Makefile b/kernel/kcsan/Makefile
new file mode 100644
index 000000000000..d4999b38d1be
--- /dev/null
+++ b/kernel/kcsan/Makefile
@@ -0,0 +1,14 @@
+# SPDX-License-Identifier: GPL-2.0
+KCSAN_SANITIZE := n
+KCOV_INSTRUMENT := n
+UBSAN_SANITIZE := n
+
+CFLAGS_REMOVE_core.o = $(CC_FLAGS_FTRACE)
+CFLAGS_REMOVE_debugfs.o = $(CC_FLAGS_FTRACE)
+CFLAGS_REMOVE_report.o = $(CC_FLAGS_FTRACE)
+
+CFLAGS_core.o := $(call cc-option,-fno-conserve-stack,) \
+ $(call cc-option,-fno-stack-protector,)
+
+obj-y := core.o debugfs.o report.o
+obj-$(CONFIG_KCSAN_SELFTEST) += test.o
diff --git a/kernel/kcsan/atomic.h b/kernel/kcsan/atomic.h
new file mode 100644
index 000000000000..a9c193053491
--- /dev/null
+++ b/kernel/kcsan/atomic.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _KERNEL_KCSAN_ATOMIC_H
+#define _KERNEL_KCSAN_ATOMIC_H
+
+#include <linux/jiffies.h>
+
+/*
+ * Helper that returns true if access to @ptr should be considered an atomic
+ * access, even though it is not explicitly atomic.
+ *
+ * List all volatile globals that have been observed in races, to suppress
+ * data race reports between accesses to these variables.
+ *
+ * For now, we assume that volatile accesses of globals are as strong as atomic
+ * accesses (READ_ONCE, WRITE_ONCE cast to volatile). The situation is still not
+ * entirely clear, as on some architectures (Alpha) READ_ONCE/WRITE_ONCE do more
+ * than cast to volatile. Eventually, we hope to be able to remove this
+ * function.
+ */
+static __always_inline bool kcsan_is_atomic(const volatile void *ptr)
+{
+ /* only jiffies for now */
+ return ptr == &jiffies;
+}
+
+#endif /* _KERNEL_KCSAN_ATOMIC_H */
diff --git a/kernel/kcsan/core.c b/kernel/kcsan/core.c
new file mode 100644
index 000000000000..589b1e7f0f25
--- /dev/null
+++ b/kernel/kcsan/core.c
@@ -0,0 +1,730 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/atomic.h>
+#include <linux/bug.h>
+#include <linux/delay.h>
+#include <linux/export.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/moduleparam.h>
+#include <linux/percpu.h>
+#include <linux/preempt.h>
+#include <linux/random.h>
+#include <linux/sched.h>
+#include <linux/uaccess.h>
+
+#include "atomic.h"
+#include "encoding.h"
+#include "kcsan.h"
+
+static bool kcsan_early_enable = IS_ENABLED(CONFIG_KCSAN_EARLY_ENABLE);
+static unsigned int kcsan_udelay_task = CONFIG_KCSAN_UDELAY_TASK;
+static unsigned int kcsan_udelay_interrupt = CONFIG_KCSAN_UDELAY_INTERRUPT;
+static long kcsan_skip_watch = CONFIG_KCSAN_SKIP_WATCH;
+
+#ifdef MODULE_PARAM_PREFIX
+#undef MODULE_PARAM_PREFIX
+#endif
+#define MODULE_PARAM_PREFIX "kcsan."
+module_param_named(early_enable, kcsan_early_enable, bool, 0);
+module_param_named(udelay_task, kcsan_udelay_task, uint, 0644);
+module_param_named(udelay_interrupt, kcsan_udelay_interrupt, uint, 0644);
+module_param_named(skip_watch, kcsan_skip_watch, long, 0644);
+
+bool kcsan_enabled;
+
+/* Per-CPU kcsan_ctx for interrupts */
+static DEFINE_PER_CPU(struct kcsan_ctx, kcsan_cpu_ctx) = {
+ .disable_count = 0,
+ .atomic_next = 0,
+ .atomic_nest_count = 0,
+ .in_flat_atomic = false,
+ .access_mask = 0,
+};
+
+/*
+ * Helper macros to index into adjacent slots slots, starting from address slot
+ * itself, followed by the right and left slots.
+ *
+ * The purpose is 2-fold:
+ *
+ * 1. if during insertion the address slot is already occupied, check if
+ * any adjacent slots are free;
+ * 2. accesses that straddle a slot boundary due to size that exceeds a
+ * slot's range may check adjacent slots if any watchpoint matches.
+ *
+ * Note that accesses with very large size may still miss a watchpoint; however,
+ * given this should be rare, this is a reasonable trade-off to make, since this
+ * will avoid:
+ *
+ * 1. excessive contention between watchpoint checks and setup;
+ * 2. larger number of simultaneous watchpoints without sacrificing
+ * performance.
+ *
+ * Example: SLOT_IDX values for KCSAN_CHECK_ADJACENT=1, where i is [0, 1, 2]:
+ *
+ * slot=0: [ 1, 2, 0]
+ * slot=9: [10, 11, 9]
+ * slot=63: [64, 65, 63]
+ */
+#define NUM_SLOTS (1 + 2*KCSAN_CHECK_ADJACENT)
+#define SLOT_IDX(slot, i) (slot + ((i + KCSAN_CHECK_ADJACENT) % NUM_SLOTS))
+
+/*
+ * SLOT_IDX_FAST is used in the fast-path. Not first checking the address's primary
+ * slot (middle) is fine if we assume that races occur rarely. The set of
+ * indices {SLOT_IDX(slot, i) | i in [0, NUM_SLOTS)} is equivalent to
+ * {SLOT_IDX_FAST(slot, i) | i in [0, NUM_SLOTS)}.
+ */
+#define SLOT_IDX_FAST(slot, i) (slot + i)
+
+/*
+ * Watchpoints, with each entry encoded as defined in encoding.h: in order to be
+ * able to safely update and access a watchpoint without introducing locking
+ * overhead, we encode each watchpoint as a single atomic long. The initial
+ * zero-initialized state matches INVALID_WATCHPOINT.
+ *
+ * Add NUM_SLOTS-1 entries to account for overflow; this helps avoid having to
+ * use more complicated SLOT_IDX_FAST calculation with modulo in the fast-path.
+ */
+static atomic_long_t watchpoints[CONFIG_KCSAN_NUM_WATCHPOINTS + NUM_SLOTS-1];
+
+/*
+ * Instructions to skip watching counter, used in should_watch(). We use a
+ * per-CPU counter to avoid excessive contention.
+ */
+static DEFINE_PER_CPU(long, kcsan_skip);
+
+static __always_inline atomic_long_t *find_watchpoint(unsigned long addr,
+ size_t size,
+ bool expect_write,
+ long *encoded_watchpoint)
+{
+ const int slot = watchpoint_slot(addr);
+ const unsigned long addr_masked = addr & WATCHPOINT_ADDR_MASK;
+ atomic_long_t *watchpoint;
+ unsigned long wp_addr_masked;
+ size_t wp_size;
+ bool is_write;
+ int i;
+
+ BUILD_BUG_ON(CONFIG_KCSAN_NUM_WATCHPOINTS < NUM_SLOTS);
+
+ for (i = 0; i < NUM_SLOTS; ++i) {
+ watchpoint = &watchpoints[SLOT_IDX_FAST(slot, i)];
+ *encoded_watchpoint = atomic_long_read(watchpoint);
+ if (!decode_watchpoint(*encoded_watchpoint, &wp_addr_masked,
+ &wp_size, &is_write))
+ continue;
+
+ if (expect_write && !is_write)
+ continue;
+
+ /* Check if the watchpoint matches the access. */
+ if (matching_access(wp_addr_masked, wp_size, addr_masked, size))
+ return watchpoint;
+ }
+
+ return NULL;
+}
+
+static inline atomic_long_t *
+insert_watchpoint(unsigned long addr, size_t size, bool is_write)
+{
+ const int slot = watchpoint_slot(addr);
+ const long encoded_watchpoint = encode_watchpoint(addr, size, is_write);
+ atomic_long_t *watchpoint;
+ int i;
+
+ /* Check slot index logic, ensuring we stay within array bounds. */
+ BUILD_BUG_ON(SLOT_IDX(0, 0) != KCSAN_CHECK_ADJACENT);
+ BUILD_BUG_ON(SLOT_IDX(0, KCSAN_CHECK_ADJACENT+1) != 0);
+ BUILD_BUG_ON(SLOT_IDX(CONFIG_KCSAN_NUM_WATCHPOINTS-1, KCSAN_CHECK_ADJACENT) != ARRAY_SIZE(watchpoints)-1);
+ BUILD_BUG_ON(SLOT_IDX(CONFIG_KCSAN_NUM_WATCHPOINTS-1, KCSAN_CHECK_ADJACENT+1) != ARRAY_SIZE(watchpoints) - NUM_SLOTS);
+
+ for (i = 0; i < NUM_SLOTS; ++i) {
+ long expect_val = INVALID_WATCHPOINT;
+
+ /* Try to acquire this slot. */
+ watchpoint = &watchpoints[SLOT_IDX(slot, i)];
+ if (atomic_long_try_cmpxchg_relaxed(watchpoint, &expect_val, encoded_watchpoint))
+ return watchpoint;
+ }
+
+ return NULL;
+}
+
+/*
+ * Return true if watchpoint was successfully consumed, false otherwise.
+ *
+ * This may return false if:
+ *
+ * 1. another thread already consumed the watchpoint;
+ * 2. the thread that set up the watchpoint already removed it;
+ * 3. the watchpoint was removed and then re-used.
+ */
+static __always_inline bool
+try_consume_watchpoint(atomic_long_t *watchpoint, long encoded_watchpoint)
+{
+ return atomic_long_try_cmpxchg_relaxed(watchpoint, &encoded_watchpoint, CONSUMED_WATCHPOINT);
+}
+
+/*
+ * Return true if watchpoint was not touched, false if consumed.
+ */
+static inline bool remove_watchpoint(atomic_long_t *watchpoint)
+{
+ return atomic_long_xchg_relaxed(watchpoint, INVALID_WATCHPOINT) != CONSUMED_WATCHPOINT;
+}
+
+static __always_inline struct kcsan_ctx *get_ctx(void)
+{
+ /*
+ * In interrupts, use raw_cpu_ptr to avoid unnecessary checks, that would
+ * also result in calls that generate warnings in uaccess regions.
+ */
+ return in_task() ? &current->kcsan_ctx : raw_cpu_ptr(&kcsan_cpu_ctx);
+}
+
+static __always_inline bool
+is_atomic(const volatile void *ptr, size_t size, int type)
+{
+ struct kcsan_ctx *ctx;
+
+ if ((type & KCSAN_ACCESS_ATOMIC) != 0)
+ return true;
+
+ /*
+ * Unless explicitly declared atomic, never consider an assertion access
+ * as atomic. This allows using them also in atomic regions, such as
+ * seqlocks, without implicitly changing their semantics.
+ */
+ if ((type & KCSAN_ACCESS_ASSERT) != 0)
+ return false;
+
+ if (IS_ENABLED(CONFIG_KCSAN_ASSUME_PLAIN_WRITES_ATOMIC) &&
+ (type & KCSAN_ACCESS_WRITE) != 0 && size <= sizeof(long) &&
+ IS_ALIGNED((unsigned long)ptr, size))
+ return true; /* Assume aligned writes up to word size are atomic. */
+
+ ctx = get_ctx();
+ if (unlikely(ctx->atomic_next > 0)) {
+ /*
+ * Because we do not have separate contexts for nested
+ * interrupts, in case atomic_next is set, we simply assume that
+ * the outer interrupt set atomic_next. In the worst case, we
+ * will conservatively consider operations as atomic. This is a
+ * reasonable trade-off to make, since this case should be
+ * extremely rare; however, even if extremely rare, it could
+ * lead to false positives otherwise.
+ */
+ if ((hardirq_count() >> HARDIRQ_SHIFT) < 2)
+ --ctx->atomic_next; /* in task, or outer interrupt */
+ return true;
+ }
+ if (unlikely(ctx->atomic_nest_count > 0 || ctx->in_flat_atomic))
+ return true;
+
+ return kcsan_is_atomic(ptr);
+}
+
+static __always_inline bool
+should_watch(const volatile void *ptr, size_t size, int type)
+{
+ /*
+ * Never set up watchpoints when memory operations are atomic.
+ *
+ * Need to check this first, before kcsan_skip check below: (1) atomics
+ * should not count towards skipped instructions, and (2) to actually
+ * decrement kcsan_atomic_next for consecutive instruction stream.
+ */
+ if (is_atomic(ptr, size, type))
+ return false;
+
+ if (this_cpu_dec_return(kcsan_skip) >= 0)
+ return false;
+
+ /*
+ * NOTE: If we get here, kcsan_skip must always be reset in slow path
+ * via reset_kcsan_skip() to avoid underflow.
+ */
+
+ /* this operation should be watched */
+ return true;
+}
+
+static inline void reset_kcsan_skip(void)
+{
+ long skip_count = kcsan_skip_watch -
+ (IS_ENABLED(CONFIG_KCSAN_SKIP_WATCH_RANDOMIZE) ?
+ prandom_u32_max(kcsan_skip_watch) :
+ 0);
+ this_cpu_write(kcsan_skip, skip_count);
+}
+
+static __always_inline bool kcsan_is_enabled(void)
+{
+ return READ_ONCE(kcsan_enabled) && get_ctx()->disable_count == 0;
+}
+
+static inline unsigned int get_delay(void)
+{
+ unsigned int delay = in_task() ? kcsan_udelay_task : kcsan_udelay_interrupt;
+ return delay - (IS_ENABLED(CONFIG_KCSAN_DELAY_RANDOMIZE) ?
+ prandom_u32_max(delay) :
+ 0);
+}
+
+/*
+ * Pull everything together: check_access() below contains the performance
+ * critical operations; the fast-path (including check_access) functions should
+ * all be inlinable by the instrumentation functions.
+ *
+ * The slow-path (kcsan_found_watchpoint, kcsan_setup_watchpoint) are
+ * non-inlinable -- note that, we prefix these with "kcsan_" to ensure they can
+ * be filtered from the stacktrace, as well as give them unique names for the
+ * UACCESS whitelist of objtool. Each function uses user_access_save/restore(),
+ * since they do not access any user memory, but instrumentation is still
+ * emitted in UACCESS regions.
+ */
+
+static noinline void kcsan_found_watchpoint(const volatile void *ptr,
+ size_t size,
+ int type,
+ atomic_long_t *watchpoint,
+ long encoded_watchpoint)
+{
+ unsigned long flags;
+ bool consumed;
+
+ if (!kcsan_is_enabled())
+ return;
+
+ /*
+ * The access_mask check relies on value-change comparison. To avoid
+ * reporting a race where e.g. the writer set up the watchpoint, but the
+ * reader has access_mask!=0, we have to ignore the found watchpoint.
+ */
+ if (get_ctx()->access_mask != 0)
+ return;
+
+ /*
+ * Consume the watchpoint as soon as possible, to minimize the chances
+ * of !consumed. Consuming the watchpoint must always be guarded by
+ * kcsan_is_enabled() check, as otherwise we might erroneously
+ * triggering reports when disabled.
+ */
+ consumed = try_consume_watchpoint(watchpoint, encoded_watchpoint);
+
+ /* keep this after try_consume_watchpoint */
+ flags = user_access_save();
+
+ if (consumed) {
+ kcsan_report(ptr, size, type, true, raw_smp_processor_id(),
+ KCSAN_REPORT_CONSUMED_WATCHPOINT);
+ } else {
+ /*
+ * The other thread may not print any diagnostics, as it has
+ * already removed the watchpoint, or another thread consumed
+ * the watchpoint before this thread.
+ */
+ kcsan_counter_inc(KCSAN_COUNTER_REPORT_RACES);
+ }
+
+ if ((type & KCSAN_ACCESS_ASSERT) != 0)
+ kcsan_counter_inc(KCSAN_COUNTER_ASSERT_FAILURES);
+ else
+ kcsan_counter_inc(KCSAN_COUNTER_DATA_RACES);
+
+ user_access_restore(flags);
+}
+
+static noinline void
+kcsan_setup_watchpoint(const volatile void *ptr, size_t size, int type)
+{
+ const bool is_write = (type & KCSAN_ACCESS_WRITE) != 0;
+ const bool is_assert = (type & KCSAN_ACCESS_ASSERT) != 0;
+ atomic_long_t *watchpoint;
+ union {
+ u8 _1;
+ u16 _2;
+ u32 _4;
+ u64 _8;
+ } expect_value;
+ unsigned long access_mask;
+ enum kcsan_value_change value_change = KCSAN_VALUE_CHANGE_MAYBE;
+ unsigned long ua_flags = user_access_save();
+ unsigned long irq_flags;
+
+ /*
+ * Always reset kcsan_skip counter in slow-path to avoid underflow; see
+ * should_watch().
+ */
+ reset_kcsan_skip();
+
+ if (!kcsan_is_enabled())
+ goto out;
+
+ if (!check_encodable((unsigned long)ptr, size)) {
+ kcsan_counter_inc(KCSAN_COUNTER_UNENCODABLE_ACCESSES);
+ goto out;
+ }
+
+ /*
+ * Disable interrupts & preemptions to avoid another thread on the same
+ * CPU accessing memory locations for the set up watchpoint; this is to
+ * avoid reporting races to e.g. CPU-local data.
+ *
+ * An alternative would be adding the source CPU to the watchpoint
+ * encoding, and checking that watchpoint-CPU != this-CPU. There are
+ * several problems with this:
+ * 1. we should avoid stealing more bits from the watchpoint encoding
+ * as it would affect accuracy, as well as increase performance
+ * overhead in the fast-path;
+ * 2. if we are preempted, but there *is* a genuine data race, we
+ * would *not* report it -- since this is the common case (vs.
+ * CPU-local data accesses), it makes more sense (from a data race
+ * detection point of view) to simply disable preemptions to ensure
+ * as many tasks as possible run on other CPUs.
+ *
+ * Use raw versions, to avoid lockdep recursion via IRQ flags tracing.
+ */
+ raw_local_irq_save(irq_flags);
+
+ watchpoint = insert_watchpoint((unsigned long)ptr, size, is_write);
+ if (watchpoint == NULL) {
+ /*
+ * Out of capacity: the size of 'watchpoints', and the frequency
+ * with which should_watch() returns true should be tweaked so
+ * that this case happens very rarely.
+ */
+ kcsan_counter_inc(KCSAN_COUNTER_NO_CAPACITY);
+ goto out_unlock;
+ }
+
+ kcsan_counter_inc(KCSAN_COUNTER_SETUP_WATCHPOINTS);
+ kcsan_counter_inc(KCSAN_COUNTER_USED_WATCHPOINTS);
+
+ /*
+ * Read the current value, to later check and infer a race if the data
+ * was modified via a non-instrumented access, e.g. from a device.
+ */
+ expect_value._8 = 0;
+ switch (size) {
+ case 1:
+ expect_value._1 = READ_ONCE(*(const u8 *)ptr);
+ break;
+ case 2:
+ expect_value._2 = READ_ONCE(*(const u16 *)ptr);
+ break;
+ case 4:
+ expect_value._4 = READ_ONCE(*(const u32 *)ptr);
+ break;
+ case 8:
+ expect_value._8 = READ_ONCE(*(const u64 *)ptr);
+ break;
+ default:
+ break; /* ignore; we do not diff the values */
+ }
+
+ if (IS_ENABLED(CONFIG_KCSAN_DEBUG)) {
+ kcsan_disable_current();
+ pr_err("KCSAN: watching %s, size: %zu, addr: %px [slot: %d, encoded: %lx]\n",
+ is_write ? "write" : "read", size, ptr,
+ watchpoint_slot((unsigned long)ptr),
+ encode_watchpoint((unsigned long)ptr, size, is_write));
+ kcsan_enable_current();
+ }
+
+ /*
+ * Delay this thread, to increase probability of observing a racy
+ * conflicting access.
+ */
+ udelay(get_delay());
+
+ /*
+ * Re-read value, and check if it is as expected; if not, we infer a
+ * racy access.
+ */
+ access_mask = get_ctx()->access_mask;
+ switch (size) {
+ case 1:
+ expect_value._1 ^= READ_ONCE(*(const u8 *)ptr);
+ if (access_mask)
+ expect_value._1 &= (u8)access_mask;
+ break;
+ case 2:
+ expect_value._2 ^= READ_ONCE(*(const u16 *)ptr);
+ if (access_mask)
+ expect_value._2 &= (u16)access_mask;
+ break;
+ case 4:
+ expect_value._4 ^= READ_ONCE(*(const u32 *)ptr);
+ if (access_mask)
+ expect_value._4 &= (u32)access_mask;
+ break;
+ case 8:
+ expect_value._8 ^= READ_ONCE(*(const u64 *)ptr);
+ if (access_mask)
+ expect_value._8 &= (u64)access_mask;
+ break;
+ default:
+ break; /* ignore; we do not diff the values */
+ }
+
+ /* Were we able to observe a value-change? */
+ if (expect_value._8 != 0)
+ value_change = KCSAN_VALUE_CHANGE_TRUE;
+
+ /* Check if this access raced with another. */
+ if (!remove_watchpoint(watchpoint)) {
+ /*
+ * Depending on the access type, map a value_change of MAYBE to
+ * TRUE (always report) or FALSE (never report).
+ */
+ if (value_change == KCSAN_VALUE_CHANGE_MAYBE) {
+ if (access_mask != 0) {
+ /*
+ * For access with access_mask, we require a
+ * value-change, as it is likely that races on
+ * ~access_mask bits are expected.
+ */
+ value_change = KCSAN_VALUE_CHANGE_FALSE;
+ } else if (size > 8 || is_assert) {
+ /* Always assume a value-change. */
+ value_change = KCSAN_VALUE_CHANGE_TRUE;
+ }
+ }
+
+ /*
+ * No need to increment 'data_races' counter, as the racing
+ * thread already did.
+ *
+ * Count 'assert_failures' for each failed ASSERT access,
+ * therefore both this thread and the racing thread may
+ * increment this counter.
+ */
+ if (is_assert && value_change == KCSAN_VALUE_CHANGE_TRUE)
+ kcsan_counter_inc(KCSAN_COUNTER_ASSERT_FAILURES);
+
+ kcsan_report(ptr, size, type, value_change, smp_processor_id(),
+ KCSAN_REPORT_RACE_SIGNAL);
+ } else if (value_change == KCSAN_VALUE_CHANGE_TRUE) {
+ /* Inferring a race, since the value should not have changed. */
+
+ kcsan_counter_inc(KCSAN_COUNTER_RACES_UNKNOWN_ORIGIN);
+ if (is_assert)
+ kcsan_counter_inc(KCSAN_COUNTER_ASSERT_FAILURES);
+
+ if (IS_ENABLED(CONFIG_KCSAN_REPORT_RACE_UNKNOWN_ORIGIN) || is_assert)
+ kcsan_report(ptr, size, type, KCSAN_VALUE_CHANGE_TRUE,
+ smp_processor_id(),
+ KCSAN_REPORT_RACE_UNKNOWN_ORIGIN);
+ }
+
+ kcsan_counter_dec(KCSAN_COUNTER_USED_WATCHPOINTS);
+out_unlock:
+ raw_local_irq_restore(irq_flags);
+out:
+ user_access_restore(ua_flags);
+}
+
+static __always_inline void check_access(const volatile void *ptr, size_t size,
+ int type)
+{
+ const bool is_write = (type & KCSAN_ACCESS_WRITE) != 0;
+ atomic_long_t *watchpoint;
+ long encoded_watchpoint;
+
+ /*
+ * Do nothing for 0 sized check; this comparison will be optimized out
+ * for constant sized instrumentation (__tsan_{read,write}N).
+ */
+ if (unlikely(size == 0))
+ return;
+
+ /*
+ * Avoid user_access_save in fast-path: find_watchpoint is safe without
+ * user_access_save, as the address that ptr points to is only used to
+ * check if a watchpoint exists; ptr is never dereferenced.
+ */
+ watchpoint = find_watchpoint((unsigned long)ptr, size, !is_write,
+ &encoded_watchpoint);
+ /*
+ * It is safe to check kcsan_is_enabled() after find_watchpoint in the
+ * slow-path, as long as no state changes that cause a race to be
+ * detected and reported have occurred until kcsan_is_enabled() is
+ * checked.
+ */
+
+ if (unlikely(watchpoint != NULL))
+ kcsan_found_watchpoint(ptr, size, type, watchpoint,
+ encoded_watchpoint);
+ else if (unlikely(should_watch(ptr, size, type)))
+ kcsan_setup_watchpoint(ptr, size, type);
+}
+
+/* === Public interface ===================================================== */
+
+void __init kcsan_init(void)
+{
+ BUG_ON(!in_task());
+
+ kcsan_debugfs_init();
+
+ /*
+ * We are in the init task, and no other tasks should be running;
+ * WRITE_ONCE without memory barrier is sufficient.
+ */
+ if (kcsan_early_enable)
+ WRITE_ONCE(kcsan_enabled, true);
+}
+
+/* === Exported interface =================================================== */
+
+void kcsan_disable_current(void)
+{
+ ++get_ctx()->disable_count;
+}
+EXPORT_SYMBOL(kcsan_disable_current);
+
+void kcsan_enable_current(void)
+{
+ if (get_ctx()->disable_count-- == 0) {
+ /*
+ * Warn if kcsan_enable_current() calls are unbalanced with
+ * kcsan_disable_current() calls, which causes disable_count to
+ * become negative and should not happen.
+ */
+ kcsan_disable_current(); /* restore to 0, KCSAN still enabled */
+ kcsan_disable_current(); /* disable to generate warning */
+ WARN(1, "Unbalanced %s()", __func__);
+ kcsan_enable_current();
+ }
+}
+EXPORT_SYMBOL(kcsan_enable_current);
+
+void kcsan_nestable_atomic_begin(void)
+{
+ /*
+ * Do *not* check and warn if we are in a flat atomic region: nestable
+ * and flat atomic regions are independent from each other.
+ * See include/linux/kcsan.h: struct kcsan_ctx comments for more
+ * comments.
+ */
+
+ ++get_ctx()->atomic_nest_count;
+}
+EXPORT_SYMBOL(kcsan_nestable_atomic_begin);
+
+void kcsan_nestable_atomic_end(void)
+{
+ if (get_ctx()->atomic_nest_count-- == 0) {
+ /*
+ * Warn if kcsan_nestable_atomic_end() calls are unbalanced with
+ * kcsan_nestable_atomic_begin() calls, which causes
+ * atomic_nest_count to become negative and should not happen.
+ */
+ kcsan_nestable_atomic_begin(); /* restore to 0 */
+ kcsan_disable_current(); /* disable to generate warning */
+ WARN(1, "Unbalanced %s()", __func__);
+ kcsan_enable_current();
+ }
+}
+EXPORT_SYMBOL(kcsan_nestable_atomic_end);
+
+void kcsan_flat_atomic_begin(void)
+{
+ get_ctx()->in_flat_atomic = true;
+}
+EXPORT_SYMBOL(kcsan_flat_atomic_begin);
+
+void kcsan_flat_atomic_end(void)
+{
+ get_ctx()->in_flat_atomic = false;
+}
+EXPORT_SYMBOL(kcsan_flat_atomic_end);
+
+void kcsan_atomic_next(int n)
+{
+ get_ctx()->atomic_next = n;
+}
+EXPORT_SYMBOL(kcsan_atomic_next);
+
+void kcsan_set_access_mask(unsigned long mask)
+{
+ get_ctx()->access_mask = mask;
+}
+EXPORT_SYMBOL(kcsan_set_access_mask);
+
+void __kcsan_check_access(const volatile void *ptr, size_t size, int type)
+{
+ check_access(ptr, size, type);
+}
+EXPORT_SYMBOL(__kcsan_check_access);
+
+/*
+ * KCSAN uses the same instrumentation that is emitted by supported compilers
+ * for ThreadSanitizer (TSAN).
+ *
+ * When enabled, the compiler emits instrumentation calls (the functions
+ * prefixed with "__tsan" below) for all loads and stores that it generated;
+ * inline asm is not instrumented.
+ *
+ * Note that, not all supported compiler versions distinguish aligned/unaligned
+ * accesses, but e.g. recent versions of Clang do. We simply alias the unaligned
+ * version to the generic version, which can handle both.
+ */
+
+#define DEFINE_TSAN_READ_WRITE(size) \
+ void __tsan_read##size(void *ptr) \
+ { \
+ check_access(ptr, size, 0); \
+ } \
+ EXPORT_SYMBOL(__tsan_read##size); \
+ void __tsan_unaligned_read##size(void *ptr) \
+ __alias(__tsan_read##size); \
+ EXPORT_SYMBOL(__tsan_unaligned_read##size); \
+ void __tsan_write##size(void *ptr) \
+ { \
+ check_access(ptr, size, KCSAN_ACCESS_WRITE); \
+ } \
+ EXPORT_SYMBOL(__tsan_write##size); \
+ void __tsan_unaligned_write##size(void *ptr) \
+ __alias(__tsan_write##size); \
+ EXPORT_SYMBOL(__tsan_unaligned_write##size)
+
+DEFINE_TSAN_READ_WRITE(1);
+DEFINE_TSAN_READ_WRITE(2);
+DEFINE_TSAN_READ_WRITE(4);
+DEFINE_TSAN_READ_WRITE(8);
+DEFINE_TSAN_READ_WRITE(16);
+
+void __tsan_read_range(void *ptr, size_t size)
+{
+ check_access(ptr, size, 0);
+}
+EXPORT_SYMBOL(__tsan_read_range);
+
+void __tsan_write_range(void *ptr, size_t size)
+{
+ check_access(ptr, size, KCSAN_ACCESS_WRITE);
+}
+EXPORT_SYMBOL(__tsan_write_range);
+
+/*
+ * The below are not required by KCSAN, but can still be emitted by the
+ * compiler.
+ */
+void __tsan_func_entry(void *call_pc)
+{
+}
+EXPORT_SYMBOL(__tsan_func_entry);
+void __tsan_func_exit(void)
+{
+}
+EXPORT_SYMBOL(__tsan_func_exit);
+void __tsan_init(void)
+{
+}
+EXPORT_SYMBOL(__tsan_init);
diff --git a/kernel/kcsan/debugfs.c b/kernel/kcsan/debugfs.c
new file mode 100644
index 000000000000..2ff196123977
--- /dev/null
+++ b/kernel/kcsan/debugfs.c
@@ -0,0 +1,326 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/atomic.h>
+#include <linux/bsearch.h>
+#include <linux/bug.h>
+#include <linux/debugfs.h>
+#include <linux/init.h>
+#include <linux/kallsyms.h>
+#include <linux/sched.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <linux/sort.h>
+#include <linux/string.h>
+#include <linux/uaccess.h>
+
+#include "kcsan.h"
+
+/*
+ * Statistics counters.
+ */
+static atomic_long_t counters[KCSAN_COUNTER_COUNT];
+
+/*
+ * Addresses for filtering functions from reporting. This list can be used as a
+ * whitelist or blacklist.
+ */
+static struct {
+ unsigned long *addrs; /* array of addresses */
+ size_t size; /* current size */
+ int used; /* number of elements used */
+ bool sorted; /* if elements are sorted */
+ bool whitelist; /* if list is a blacklist or whitelist */
+} report_filterlist = {
+ .addrs = NULL,
+ .size = 8, /* small initial size */
+ .used = 0,
+ .sorted = false,
+ .whitelist = false, /* default is blacklist */
+};
+static DEFINE_SPINLOCK(report_filterlist_lock);
+
+static const char *counter_to_name(enum kcsan_counter_id id)
+{
+ switch (id) {
+ case KCSAN_COUNTER_USED_WATCHPOINTS: return "used_watchpoints";
+ case KCSAN_COUNTER_SETUP_WATCHPOINTS: return "setup_watchpoints";
+ case KCSAN_COUNTER_DATA_RACES: return "data_races";
+ case KCSAN_COUNTER_ASSERT_FAILURES: return "assert_failures";
+ case KCSAN_COUNTER_NO_CAPACITY: return "no_capacity";
+ case KCSAN_COUNTER_REPORT_RACES: return "report_races";
+ case KCSAN_COUNTER_RACES_UNKNOWN_ORIGIN: return "races_unknown_origin";
+ case KCSAN_COUNTER_UNENCODABLE_ACCESSES: return "unencodable_accesses";
+ case KCSAN_COUNTER_ENCODING_FALSE_POSITIVES: return "encoding_false_positives";
+ case KCSAN_COUNTER_COUNT:
+ BUG();
+ }
+ return NULL;
+}
+
+void kcsan_counter_inc(enum kcsan_counter_id id)
+{
+ atomic_long_inc(&counters[id]);
+}
+
+void kcsan_counter_dec(enum kcsan_counter_id id)
+{
+ atomic_long_dec(&counters[id]);
+}
+
+/*
+ * The microbenchmark allows benchmarking KCSAN core runtime only. To run
+ * multiple threads, pipe 'microbench=<iters>' from multiple tasks into the
+ * debugfs file. This will not generate any conflicts, and tests fast-path only.
+ */
+static noinline void microbenchmark(unsigned long iters)
+{
+ cycles_t cycles;
+
+ pr_info("KCSAN: %s begin | iters: %lu\n", __func__, iters);
+
+ cycles = get_cycles();
+ while (iters--) {
+ /*
+ * We can run this benchmark from multiple tasks; this address
+ * calculation increases likelyhood of some accesses
+ * overlapping. Make the access type an atomic read, to never
+ * set up watchpoints and test the fast-path only.
+ */
+ unsigned long addr =
+ iters % (CONFIG_KCSAN_NUM_WATCHPOINTS * PAGE_SIZE);
+ __kcsan_check_access((void *)addr, sizeof(long), KCSAN_ACCESS_ATOMIC);
+ }
+ cycles = get_cycles() - cycles;
+
+ pr_info("KCSAN: %s end | cycles: %llu\n", __func__, cycles);
+}
+
+/*
+ * Simple test to create conflicting accesses. Write 'test=<iters>' to KCSAN's
+ * debugfs file from multiple tasks to generate real conflicts and show reports.
+ */
+static long test_dummy;
+static long test_flags;
+static noinline void test_thread(unsigned long iters)
+{
+ const long CHANGE_BITS = 0xff00ff00ff00ff00L;
+ const struct kcsan_ctx ctx_save = current->kcsan_ctx;
+ cycles_t cycles;
+
+ /* We may have been called from an atomic region; reset context. */
+ memset(&current->kcsan_ctx, 0, sizeof(current->kcsan_ctx));
+
+ pr_info("KCSAN: %s begin | iters: %lu\n", __func__, iters);
+ pr_info("test_dummy@%px, test_flags@%px\n", &test_dummy, &test_flags);
+
+ cycles = get_cycles();
+ while (iters--) {
+ /* These all should generate reports. */
+ __kcsan_check_read(&test_dummy, sizeof(test_dummy));
+ ASSERT_EXCLUSIVE_WRITER(test_dummy);
+ ASSERT_EXCLUSIVE_ACCESS(test_dummy);
+
+ ASSERT_EXCLUSIVE_BITS(test_flags, ~CHANGE_BITS); /* no report */
+ __kcsan_check_read(&test_flags, sizeof(test_flags)); /* no report */
+
+ ASSERT_EXCLUSIVE_BITS(test_flags, CHANGE_BITS); /* report */
+ __kcsan_check_read(&test_flags, sizeof(test_flags)); /* no report */
+
+ /* not actually instrumented */
+ WRITE_ONCE(test_dummy, iters); /* to observe value-change */
+ __kcsan_check_write(&test_dummy, sizeof(test_dummy));
+
+ test_flags ^= CHANGE_BITS; /* generate value-change */
+ __kcsan_check_write(&test_flags, sizeof(test_flags));
+ }
+ cycles = get_cycles() - cycles;
+
+ pr_info("KCSAN: %s end | cycles: %llu\n", __func__, cycles);
+
+ /* restore context */
+ current->kcsan_ctx = ctx_save;
+}
+
+static int cmp_filterlist_addrs(const void *rhs, const void *lhs)
+{
+ const unsigned long a = *(const unsigned long *)rhs;
+ const unsigned long b = *(const unsigned long *)lhs;
+
+ return a < b ? -1 : a == b ? 0 : 1;
+}
+
+bool kcsan_skip_report_debugfs(unsigned long func_addr)
+{
+ unsigned long symbolsize, offset;
+ unsigned long flags;
+ bool ret = false;
+
+ if (!kallsyms_lookup_size_offset(func_addr, &symbolsize, &offset))
+ return false;
+ func_addr -= offset; /* Get function start */
+
+ spin_lock_irqsave(&report_filterlist_lock, flags);
+ if (report_filterlist.used == 0)
+ goto out;
+
+ /* Sort array if it is unsorted, and then do a binary search. */
+ if (!report_filterlist.sorted) {
+ sort(report_filterlist.addrs, report_filterlist.used,
+ sizeof(unsigned long), cmp_filterlist_addrs, NULL);
+ report_filterlist.sorted = true;
+ }
+ ret = !!bsearch(&func_addr, report_filterlist.addrs,
+ report_filterlist.used, sizeof(unsigned long),
+ cmp_filterlist_addrs);
+ if (report_filterlist.whitelist)
+ ret = !ret;
+
+out:
+ spin_unlock_irqrestore(&report_filterlist_lock, flags);
+ return ret;
+}
+
+static void set_report_filterlist_whitelist(bool whitelist)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&report_filterlist_lock, flags);
+ report_filterlist.whitelist = whitelist;
+ spin_unlock_irqrestore(&report_filterlist_lock, flags);
+}
+
+/* Returns 0 on success, error-code otherwise. */
+static ssize_t insert_report_filterlist(const char *func)
+{
+ unsigned long flags;
+ unsigned long addr = kallsyms_lookup_name(func);
+ ssize_t ret = 0;
+
+ if (!addr) {
+ pr_err("KCSAN: could not find function: '%s'\n", func);
+ return -ENOENT;
+ }
+
+ spin_lock_irqsave(&report_filterlist_lock, flags);
+
+ if (report_filterlist.addrs == NULL) {
+ /* initial allocation */
+ report_filterlist.addrs =
+ kmalloc_array(report_filterlist.size,
+ sizeof(unsigned long), GFP_KERNEL);
+ if (report_filterlist.addrs == NULL) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ } else if (report_filterlist.used == report_filterlist.size) {
+ /* resize filterlist */
+ size_t new_size = report_filterlist.size * 2;
+ unsigned long *new_addrs =
+ krealloc(report_filterlist.addrs,
+ new_size * sizeof(unsigned long), GFP_KERNEL);
+
+ if (new_addrs == NULL) {
+ /* leave filterlist itself untouched */
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ report_filterlist.size = new_size;
+ report_filterlist.addrs = new_addrs;
+ }
+
+ /* Note: deduplicating should be done in userspace. */
+ report_filterlist.addrs[report_filterlist.used++] =
+ kallsyms_lookup_name(func);
+ report_filterlist.sorted = false;
+
+out:
+ spin_unlock_irqrestore(&report_filterlist_lock, flags);
+
+ return ret;
+}
+
+static int show_info(struct seq_file *file, void *v)
+{
+ int i;
+ unsigned long flags;
+
+ /* show stats */
+ seq_printf(file, "enabled: %i\n", READ_ONCE(kcsan_enabled));
+ for (i = 0; i < KCSAN_COUNTER_COUNT; ++i)
+ seq_printf(file, "%s: %ld\n", counter_to_name(i),
+ atomic_long_read(&counters[i]));
+
+ /* show filter functions, and filter type */
+ spin_lock_irqsave(&report_filterlist_lock, flags);
+ seq_printf(file, "\n%s functions: %s\n",
+ report_filterlist.whitelist ? "whitelisted" : "blacklisted",
+ report_filterlist.used == 0 ? "none" : "");
+ for (i = 0; i < report_filterlist.used; ++i)
+ seq_printf(file, " %ps\n", (void *)report_filterlist.addrs[i]);
+ spin_unlock_irqrestore(&report_filterlist_lock, flags);
+
+ return 0;
+}
+
+static int debugfs_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, show_info, NULL);
+}
+
+static ssize_t
+debugfs_write(struct file *file, const char __user *buf, size_t count, loff_t *off)
+{
+ char kbuf[KSYM_NAME_LEN];
+ char *arg;
+ int read_len = count < (sizeof(kbuf) - 1) ? count : (sizeof(kbuf) - 1);
+
+ if (copy_from_user(kbuf, buf, read_len))
+ return -EFAULT;
+ kbuf[read_len] = '\0';
+ arg = strstrip(kbuf);
+
+ if (!strcmp(arg, "on")) {
+ WRITE_ONCE(kcsan_enabled, true);
+ } else if (!strcmp(arg, "off")) {
+ WRITE_ONCE(kcsan_enabled, false);
+ } else if (!strncmp(arg, "microbench=", sizeof("microbench=") - 1)) {
+ unsigned long iters;
+
+ if (kstrtoul(&arg[sizeof("microbench=") - 1], 0, &iters))
+ return -EINVAL;
+ microbenchmark(iters);
+ } else if (!strncmp(arg, "test=", sizeof("test=") - 1)) {
+ unsigned long iters;
+
+ if (kstrtoul(&arg[sizeof("test=") - 1], 0, &iters))
+ return -EINVAL;
+ test_thread(iters);
+ } else if (!strcmp(arg, "whitelist")) {
+ set_report_filterlist_whitelist(true);
+ } else if (!strcmp(arg, "blacklist")) {
+ set_report_filterlist_whitelist(false);
+ } else if (arg[0] == '!') {
+ ssize_t ret = insert_report_filterlist(&arg[1]);
+
+ if (ret < 0)
+ return ret;
+ } else {
+ return -EINVAL;
+ }
+
+ return count;
+}
+
+static const struct file_operations debugfs_ops =
+{
+ .read = seq_read,
+ .open = debugfs_open,
+ .write = debugfs_write,
+ .release = single_release
+};
+
+void __init kcsan_debugfs_init(void)
+{
+ debugfs_create_file("kcsan", 0644, NULL, NULL, &debugfs_ops);
+}
diff --git a/kernel/kcsan/encoding.h b/kernel/kcsan/encoding.h
new file mode 100644
index 000000000000..f03562aaf2eb
--- /dev/null
+++ b/kernel/kcsan/encoding.h
@@ -0,0 +1,95 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _KERNEL_KCSAN_ENCODING_H
+#define _KERNEL_KCSAN_ENCODING_H
+
+#include <linux/bits.h>
+#include <linux/log2.h>
+#include <linux/mm.h>
+
+#include "kcsan.h"
+
+#define SLOT_RANGE PAGE_SIZE
+
+#define INVALID_WATCHPOINT 0
+#define CONSUMED_WATCHPOINT 1
+
+/*
+ * The maximum useful size of accesses for which we set up watchpoints is the
+ * max range of slots we check on an access.
+ */
+#define MAX_ENCODABLE_SIZE (SLOT_RANGE * (1 + KCSAN_CHECK_ADJACENT))
+
+/*
+ * Number of bits we use to store size info.
+ */
+#define WATCHPOINT_SIZE_BITS bits_per(MAX_ENCODABLE_SIZE)
+/*
+ * This encoding for addresses discards the upper (1 for is-write + SIZE_BITS);
+ * however, most 64-bit architectures do not use the full 64-bit address space.
+ * Also, in order for a false positive to be observable 2 things need to happen:
+ *
+ * 1. different addresses but with the same encoded address race;
+ * 2. and both map onto the same watchpoint slots;
+ *
+ * Both these are assumed to be very unlikely. However, in case it still happens
+ * happens, the report logic will filter out the false positive (see report.c).
+ */
+#define WATCHPOINT_ADDR_BITS (BITS_PER_LONG-1 - WATCHPOINT_SIZE_BITS)
+
+/*
+ * Masks to set/retrieve the encoded data.
+ */
+#define WATCHPOINT_WRITE_MASK BIT(BITS_PER_LONG-1)
+#define WATCHPOINT_SIZE_MASK \
+ GENMASK(BITS_PER_LONG-2, BITS_PER_LONG-2 - WATCHPOINT_SIZE_BITS)
+#define WATCHPOINT_ADDR_MASK \
+ GENMASK(BITS_PER_LONG-3 - WATCHPOINT_SIZE_BITS, 0)
+
+static inline bool check_encodable(unsigned long addr, size_t size)
+{
+ return size <= MAX_ENCODABLE_SIZE;
+}
+
+static inline long
+encode_watchpoint(unsigned long addr, size_t size, bool is_write)
+{
+ return (long)((is_write ? WATCHPOINT_WRITE_MASK : 0) |
+ (size << WATCHPOINT_ADDR_BITS) |
+ (addr & WATCHPOINT_ADDR_MASK));
+}
+
+static __always_inline bool decode_watchpoint(long watchpoint,
+ unsigned long *addr_masked,
+ size_t *size,
+ bool *is_write)
+{
+ if (watchpoint == INVALID_WATCHPOINT ||
+ watchpoint == CONSUMED_WATCHPOINT)
+ return false;
+
+ *addr_masked = (unsigned long)watchpoint & WATCHPOINT_ADDR_MASK;
+ *size = ((unsigned long)watchpoint & WATCHPOINT_SIZE_MASK) >> WATCHPOINT_ADDR_BITS;
+ *is_write = !!((unsigned long)watchpoint & WATCHPOINT_WRITE_MASK);
+
+ return true;
+}
+
+/*
+ * Return watchpoint slot for an address.
+ */
+static __always_inline int watchpoint_slot(unsigned long addr)
+{
+ return (addr / PAGE_SIZE) % CONFIG_KCSAN_NUM_WATCHPOINTS;
+}
+
+static __always_inline bool matching_access(unsigned long addr1, size_t size1,
+ unsigned long addr2, size_t size2)
+{
+ unsigned long end_range1 = addr1 + size1 - 1;
+ unsigned long end_range2 = addr2 + size2 - 1;
+
+ return addr1 <= end_range2 && addr2 <= end_range1;
+}
+
+#endif /* _KERNEL_KCSAN_ENCODING_H */
diff --git a/kernel/kcsan/kcsan.h b/kernel/kcsan/kcsan.h
new file mode 100644
index 000000000000..892de5120c1b
--- /dev/null
+++ b/kernel/kcsan/kcsan.h
@@ -0,0 +1,138 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/*
+ * The Kernel Concurrency Sanitizer (KCSAN) infrastructure. For more info please
+ * see Documentation/dev-tools/kcsan.rst.
+ */
+
+#ifndef _KERNEL_KCSAN_KCSAN_H
+#define _KERNEL_KCSAN_KCSAN_H
+
+#include <linux/kcsan.h>
+
+/* The number of adjacent watchpoints to check. */
+#define KCSAN_CHECK_ADJACENT 1
+
+/*
+ * Globally enable and disable KCSAN.
+ */
+extern bool kcsan_enabled;
+
+/*
+ * Initialize debugfs file.
+ */
+void kcsan_debugfs_init(void);
+
+enum kcsan_counter_id {
+ /*
+ * Number of watchpoints currently in use.
+ */
+ KCSAN_COUNTER_USED_WATCHPOINTS,
+
+ /*
+ * Total number of watchpoints set up.
+ */
+ KCSAN_COUNTER_SETUP_WATCHPOINTS,
+
+ /*
+ * Total number of data races.
+ */
+ KCSAN_COUNTER_DATA_RACES,
+
+ /*
+ * Total number of ASSERT failures due to races. If the observed race is
+ * due to two conflicting ASSERT type accesses, then both will be
+ * counted.
+ */
+ KCSAN_COUNTER_ASSERT_FAILURES,
+
+ /*
+ * Number of times no watchpoints were available.
+ */
+ KCSAN_COUNTER_NO_CAPACITY,
+
+ /*
+ * A thread checking a watchpoint raced with another checking thread;
+ * only one will be reported.
+ */
+ KCSAN_COUNTER_REPORT_RACES,
+
+ /*
+ * Observed data value change, but writer thread unknown.
+ */
+ KCSAN_COUNTER_RACES_UNKNOWN_ORIGIN,
+
+ /*
+ * The access cannot be encoded to a valid watchpoint.
+ */
+ KCSAN_COUNTER_UNENCODABLE_ACCESSES,
+
+ /*
+ * Watchpoint encoding caused a watchpoint to fire on mismatching
+ * accesses.
+ */
+ KCSAN_COUNTER_ENCODING_FALSE_POSITIVES,
+
+ KCSAN_COUNTER_COUNT, /* number of counters */
+};
+
+/*
+ * Increment/decrement counter with given id; avoid calling these in fast-path.
+ */
+extern void kcsan_counter_inc(enum kcsan_counter_id id);
+extern void kcsan_counter_dec(enum kcsan_counter_id id);
+
+/*
+ * Returns true if data races in the function symbol that maps to func_addr
+ * (offsets are ignored) should *not* be reported.
+ */
+extern bool kcsan_skip_report_debugfs(unsigned long func_addr);
+
+/*
+ * Value-change states.
+ */
+enum kcsan_value_change {
+ /*
+ * Did not observe a value-change, however, it is valid to report the
+ * race, depending on preferences.
+ */
+ KCSAN_VALUE_CHANGE_MAYBE,
+
+ /*
+ * Did not observe a value-change, and it is invalid to report the race.
+ */
+ KCSAN_VALUE_CHANGE_FALSE,
+
+ /*
+ * The value was observed to change, and the race should be reported.
+ */
+ KCSAN_VALUE_CHANGE_TRUE,
+};
+
+enum kcsan_report_type {
+ /*
+ * The thread that set up the watchpoint and briefly stalled was
+ * signalled that another thread triggered the watchpoint.
+ */
+ KCSAN_REPORT_RACE_SIGNAL,
+
+ /*
+ * A thread found and consumed a matching watchpoint.
+ */
+ KCSAN_REPORT_CONSUMED_WATCHPOINT,
+
+ /*
+ * No other thread was observed to race with the access, but the data
+ * value before and after the stall differs.
+ */
+ KCSAN_REPORT_RACE_UNKNOWN_ORIGIN,
+};
+
+/*
+ * Print a race report from thread that encountered the race.
+ */
+extern void kcsan_report(const volatile void *ptr, size_t size, int access_type,
+ enum kcsan_value_change value_change, int cpu_id,
+ enum kcsan_report_type type);
+
+#endif /* _KERNEL_KCSAN_KCSAN_H */
diff --git a/kernel/kcsan/report.c b/kernel/kcsan/report.c
new file mode 100644
index 000000000000..11c791b886f3
--- /dev/null
+++ b/kernel/kcsan/report.c
@@ -0,0 +1,515 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/jiffies.h>
+#include <linux/kernel.h>
+#include <linux/lockdep.h>
+#include <linux/preempt.h>
+#include <linux/printk.h>
+#include <linux/sched.h>
+#include <linux/spinlock.h>
+#include <linux/stacktrace.h>
+
+#include "kcsan.h"
+#include "encoding.h"
+
+/*
+ * Max. number of stack entries to show in the report.
+ */
+#define NUM_STACK_ENTRIES 64
+
+/*
+ * Other thread info: communicated from other racing thread to thread that set
+ * up the watchpoint, which then prints the complete report atomically. Only
+ * need one struct, as all threads should to be serialized regardless to print
+ * the reports, with reporting being in the slow-path.
+ */
+static struct {
+ const volatile void *ptr;
+ size_t size;
+ int access_type;
+ int task_pid;
+ int cpu_id;
+ unsigned long stack_entries[NUM_STACK_ENTRIES];
+ int num_stack_entries;
+} other_info = { .ptr = NULL };
+
+/*
+ * Information about reported races; used to rate limit reporting.
+ */
+struct report_time {
+ /*
+ * The last time the race was reported.
+ */
+ unsigned long time;
+
+ /*
+ * The frames of the 2 threads; if only 1 thread is known, one frame
+ * will be 0.
+ */
+ unsigned long frame1;
+ unsigned long frame2;
+};
+
+/*
+ * Since we also want to be able to debug allocators with KCSAN, to avoid
+ * deadlock, report_times cannot be dynamically resized with krealloc in
+ * rate_limit_report.
+ *
+ * Therefore, we use a fixed-size array, which at most will occupy a page. This
+ * still adequately rate limits reports, assuming that a) number of unique data
+ * races is not excessive, and b) occurrence of unique races within the
+ * same time window is limited.
+ */
+#define REPORT_TIMES_MAX (PAGE_SIZE / sizeof(struct report_time))
+#define REPORT_TIMES_SIZE \
+ (CONFIG_KCSAN_REPORT_ONCE_IN_MS > REPORT_TIMES_MAX ? \
+ REPORT_TIMES_MAX : \
+ CONFIG_KCSAN_REPORT_ONCE_IN_MS)
+static struct report_time report_times[REPORT_TIMES_SIZE];
+
+/*
+ * This spinlock protects reporting and other_info, since other_info is usually
+ * required when reporting.
+ */
+static DEFINE_SPINLOCK(report_lock);
+
+/*
+ * Checks if the race identified by thread frames frame1 and frame2 has
+ * been reported since (now - KCSAN_REPORT_ONCE_IN_MS).
+ */
+static bool rate_limit_report(unsigned long frame1, unsigned long frame2)
+{
+ struct report_time *use_entry = &report_times[0];
+ unsigned long invalid_before;
+ int i;
+
+ BUILD_BUG_ON(CONFIG_KCSAN_REPORT_ONCE_IN_MS != 0 && REPORT_TIMES_SIZE == 0);
+
+ if (CONFIG_KCSAN_REPORT_ONCE_IN_MS == 0)
+ return false;
+
+ invalid_before = jiffies - msecs_to_jiffies(CONFIG_KCSAN_REPORT_ONCE_IN_MS);
+
+ /* Check if a matching race report exists. */
+ for (i = 0; i < REPORT_TIMES_SIZE; ++i) {
+ struct report_time *rt = &report_times[i];
+
+ /*
+ * Must always select an entry for use to store info as we
+ * cannot resize report_times; at the end of the scan, use_entry
+ * will be the oldest entry, which ideally also happened before
+ * KCSAN_REPORT_ONCE_IN_MS ago.
+ */
+ if (time_before(rt->time, use_entry->time))
+ use_entry = rt;
+
+ /*
+ * Initially, no need to check any further as this entry as well
+ * as following entries have never been used.
+ */
+ if (rt->time == 0)
+ break;
+
+ /* Check if entry expired. */
+ if (time_before(rt->time, invalid_before))
+ continue; /* before KCSAN_REPORT_ONCE_IN_MS ago */
+
+ /* Reported recently, check if race matches. */
+ if ((rt->frame1 == frame1 && rt->frame2 == frame2) ||
+ (rt->frame1 == frame2 && rt->frame2 == frame1))
+ return true;
+ }
+
+ use_entry->time = jiffies;
+ use_entry->frame1 = frame1;
+ use_entry->frame2 = frame2;
+ return false;
+}
+
+/*
+ * Special rules to skip reporting.
+ */
+static bool
+skip_report(enum kcsan_value_change value_change, unsigned long top_frame)
+{
+ /* Should never get here if value_change==FALSE. */
+ WARN_ON_ONCE(value_change == KCSAN_VALUE_CHANGE_FALSE);
+
+ /*
+ * The first call to skip_report always has value_change==TRUE, since we
+ * cannot know the value written of an instrumented access. For the 2nd
+ * call there are 6 cases with CONFIG_KCSAN_REPORT_VALUE_CHANGE_ONLY:
+ *
+ * 1. read watchpoint, conflicting write (value_change==TRUE): report;
+ * 2. read watchpoint, conflicting write (value_change==MAYBE): skip;
+ * 3. write watchpoint, conflicting write (value_change==TRUE): report;
+ * 4. write watchpoint, conflicting write (value_change==MAYBE): skip;
+ * 5. write watchpoint, conflicting read (value_change==MAYBE): skip;
+ * 6. write watchpoint, conflicting read (value_change==TRUE): report;
+ *
+ * Cases 1-4 are intuitive and expected; case 5 ensures we do not report
+ * data races where the write may have rewritten the same value; case 6
+ * is possible either if the size is larger than what we check value
+ * changes for or the access type is KCSAN_ACCESS_ASSERT.
+ */
+ if (IS_ENABLED(CONFIG_KCSAN_REPORT_VALUE_CHANGE_ONLY) &&
+ value_change == KCSAN_VALUE_CHANGE_MAYBE) {
+ /*
+ * The access is a write, but the data value did not change.
+ *
+ * We opt-out of this filter for certain functions at request of
+ * maintainers.
+ */
+ char buf[64];
+
+ snprintf(buf, sizeof(buf), "%ps", (void *)top_frame);
+ if (!strnstr(buf, "rcu_", sizeof(buf)) &&
+ !strnstr(buf, "_rcu", sizeof(buf)) &&
+ !strnstr(buf, "_srcu", sizeof(buf)))
+ return true;
+ }
+
+ return kcsan_skip_report_debugfs(top_frame);
+}
+
+static const char *get_access_type(int type)
+{
+ switch (type) {
+ case 0:
+ return "read";
+ case KCSAN_ACCESS_ATOMIC:
+ return "read (marked)";
+ case KCSAN_ACCESS_WRITE:
+ return "write";
+ case KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ATOMIC:
+ return "write (marked)";
+
+ /*
+ * ASSERT variants:
+ */
+ case KCSAN_ACCESS_ASSERT:
+ case KCSAN_ACCESS_ASSERT | KCSAN_ACCESS_ATOMIC:
+ return "assert no writes";
+ case KCSAN_ACCESS_ASSERT | KCSAN_ACCESS_WRITE:
+ case KCSAN_ACCESS_ASSERT | KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ATOMIC:
+ return "assert no accesses";
+
+ default:
+ BUG();
+ }
+}
+
+static const char *get_bug_type(int type)
+{
+ return (type & KCSAN_ACCESS_ASSERT) != 0 ? "assert: race" : "data-race";
+}
+
+/* Return thread description: in task or interrupt. */
+static const char *get_thread_desc(int task_id)
+{
+ if (task_id != -1) {
+ static char buf[32]; /* safe: protected by report_lock */
+
+ snprintf(buf, sizeof(buf), "task %i", task_id);
+ return buf;
+ }
+ return "interrupt";
+}
+
+/* Helper to skip KCSAN-related functions in stack-trace. */
+static int get_stack_skipnr(unsigned long stack_entries[], int num_entries)
+{
+ char buf[64];
+ int skip = 0;
+
+ for (; skip < num_entries; ++skip) {
+ snprintf(buf, sizeof(buf), "%ps", (void *)stack_entries[skip]);
+ if (!strnstr(buf, "csan_", sizeof(buf)) &&
+ !strnstr(buf, "tsan_", sizeof(buf)) &&
+ !strnstr(buf, "_once_size", sizeof(buf))) {
+ break;
+ }
+ }
+ return skip;
+}
+
+/* Compares symbolized strings of addr1 and addr2. */
+static int sym_strcmp(void *addr1, void *addr2)
+{
+ char buf1[64];
+ char buf2[64];
+
+ snprintf(buf1, sizeof(buf1), "%pS", addr1);
+ snprintf(buf2, sizeof(buf2), "%pS", addr2);
+
+ return strncmp(buf1, buf2, sizeof(buf1));
+}
+
+/*
+ * Returns true if a report was generated, false otherwise.
+ */
+static bool print_report(const volatile void *ptr, size_t size, int access_type,
+ enum kcsan_value_change value_change, int cpu_id,
+ enum kcsan_report_type type)
+{
+ unsigned long stack_entries[NUM_STACK_ENTRIES] = { 0 };
+ int num_stack_entries = stack_trace_save(stack_entries, NUM_STACK_ENTRIES, 1);
+ int skipnr = get_stack_skipnr(stack_entries, num_stack_entries);
+ unsigned long this_frame = stack_entries[skipnr];
+ unsigned long other_frame = 0;
+ int other_skipnr = 0; /* silence uninit warnings */
+
+ /*
+ * Must check report filter rules before starting to print.
+ */
+ if (skip_report(KCSAN_VALUE_CHANGE_TRUE, stack_entries[skipnr]))
+ return false;
+
+ if (type == KCSAN_REPORT_RACE_SIGNAL) {
+ other_skipnr = get_stack_skipnr(other_info.stack_entries,
+ other_info.num_stack_entries);
+ other_frame = other_info.stack_entries[other_skipnr];
+
+ /* @value_change is only known for the other thread */
+ if (skip_report(value_change, other_frame))
+ return false;
+ }
+
+ if (rate_limit_report(this_frame, other_frame))
+ return false;
+
+ /* Print report header. */
+ pr_err("==================================================================\n");
+ switch (type) {
+ case KCSAN_REPORT_RACE_SIGNAL: {
+ int cmp;
+
+ /*
+ * Order functions lexographically for consistent bug titles.
+ * Do not print offset of functions to keep title short.
+ */
+ cmp = sym_strcmp((void *)other_frame, (void *)this_frame);
+ pr_err("BUG: KCSAN: %s in %ps / %ps\n",
+ get_bug_type(access_type | other_info.access_type),
+ (void *)(cmp < 0 ? other_frame : this_frame),
+ (void *)(cmp < 0 ? this_frame : other_frame));
+ } break;
+
+ case KCSAN_REPORT_RACE_UNKNOWN_ORIGIN:
+ pr_err("BUG: KCSAN: %s in %pS\n", get_bug_type(access_type),
+ (void *)this_frame);
+ break;
+
+ default:
+ BUG();
+ }
+
+ pr_err("\n");
+
+ /* Print information about the racing accesses. */
+ switch (type) {
+ case KCSAN_REPORT_RACE_SIGNAL:
+ pr_err("%s to 0x%px of %zu bytes by %s on cpu %i:\n",
+ get_access_type(other_info.access_type), other_info.ptr,
+ other_info.size, get_thread_desc(other_info.task_pid),
+ other_info.cpu_id);
+
+ /* Print the other thread's stack trace. */
+ stack_trace_print(other_info.stack_entries + other_skipnr,
+ other_info.num_stack_entries - other_skipnr,
+ 0);
+
+ pr_err("\n");
+ pr_err("%s to 0x%px of %zu bytes by %s on cpu %i:\n",
+ get_access_type(access_type), ptr, size,
+ get_thread_desc(in_task() ? task_pid_nr(current) : -1),
+ cpu_id);
+ break;
+
+ case KCSAN_REPORT_RACE_UNKNOWN_ORIGIN:
+ pr_err("race at unknown origin, with %s to 0x%px of %zu bytes by %s on cpu %i:\n",
+ get_access_type(access_type), ptr, size,
+ get_thread_desc(in_task() ? task_pid_nr(current) : -1),
+ cpu_id);
+ break;
+
+ default:
+ BUG();
+ }
+ /* Print stack trace of this thread. */
+ stack_trace_print(stack_entries + skipnr, num_stack_entries - skipnr,
+ 0);
+
+ /* Print report footer. */
+ pr_err("\n");
+ pr_err("Reported by Kernel Concurrency Sanitizer on:\n");
+ dump_stack_print_info(KERN_DEFAULT);
+ pr_err("==================================================================\n");
+
+ return true;
+}
+
+static void release_report(unsigned long *flags, enum kcsan_report_type type)
+{
+ if (type == KCSAN_REPORT_RACE_SIGNAL)
+ other_info.ptr = NULL; /* mark for reuse */
+
+ spin_unlock_irqrestore(&report_lock, *flags);
+}
+
+/*
+ * Depending on the report type either sets other_info and returns false, or
+ * acquires the matching other_info and returns true. If other_info is not
+ * required for the report type, simply acquires report_lock and returns true.
+ */
+static bool prepare_report(unsigned long *flags, const volatile void *ptr,
+ size_t size, int access_type, int cpu_id,
+ enum kcsan_report_type type)
+{
+ if (type != KCSAN_REPORT_CONSUMED_WATCHPOINT &&
+ type != KCSAN_REPORT_RACE_SIGNAL) {
+ /* other_info not required; just acquire report_lock */
+ spin_lock_irqsave(&report_lock, *flags);
+ return true;
+ }
+
+retry:
+ spin_lock_irqsave(&report_lock, *flags);
+
+ switch (type) {
+ case KCSAN_REPORT_CONSUMED_WATCHPOINT:
+ if (other_info.ptr != NULL)
+ break; /* still in use, retry */
+
+ other_info.ptr = ptr;
+ other_info.size = size;
+ other_info.access_type = access_type;
+ other_info.task_pid = in_task() ? task_pid_nr(current) : -1;
+ other_info.cpu_id = cpu_id;
+ other_info.num_stack_entries = stack_trace_save(other_info.stack_entries, NUM_STACK_ENTRIES, 1);
+
+ spin_unlock_irqrestore(&report_lock, *flags);
+
+ /*
+ * The other thread will print the summary; other_info may now
+ * be consumed.
+ */
+ return false;
+
+ case KCSAN_REPORT_RACE_SIGNAL:
+ if (other_info.ptr == NULL)
+ break; /* no data available yet, retry */
+
+ /*
+ * First check if this is the other_info we are expecting, i.e.
+ * matches based on how watchpoint was encoded.
+ */
+ if (!matching_access((unsigned long)other_info.ptr &
+ WATCHPOINT_ADDR_MASK,
+ other_info.size,
+ (unsigned long)ptr & WATCHPOINT_ADDR_MASK,
+ size))
+ break; /* mismatching watchpoint, retry */
+
+ if (!matching_access((unsigned long)other_info.ptr,
+ other_info.size, (unsigned long)ptr,
+ size)) {
+ /*
+ * If the actual accesses to not match, this was a false
+ * positive due to watchpoint encoding.
+ */
+ kcsan_counter_inc(
+ KCSAN_COUNTER_ENCODING_FALSE_POSITIVES);
+
+ /* discard this other_info */
+ release_report(flags, KCSAN_REPORT_RACE_SIGNAL);
+ return false;
+ }
+
+ access_type |= other_info.access_type;
+ if ((access_type & KCSAN_ACCESS_WRITE) == 0) {
+ /*
+ * While the address matches, this is not the other_info
+ * from the thread that consumed our watchpoint, since
+ * neither this nor the access in other_info is a write.
+ * It is invalid to continue with the report, since we
+ * only have information about reads.
+ *
+ * This can happen due to concurrent races on the same
+ * address, with at least 4 threads. To avoid locking up
+ * other_info and all other threads, we have to consume
+ * it regardless.
+ *
+ * A concrete case to illustrate why we might lock up if
+ * we do not consume other_info:
+ *
+ * We have 4 threads, all accessing the same address
+ * (or matching address ranges). Assume the following
+ * watcher and watchpoint consumer pairs:
+ * write1-read1, read2-write2. The first to populate
+ * other_info is write2, however, write1 consumes it,
+ * resulting in a report of write1-write2. This report
+ * is valid, however, now read1 populates other_info;
+ * read2-read1 is an invalid conflict, yet, no other
+ * conflicting access is left. Therefore, we must
+ * consume read1's other_info.
+ *
+ * Since this case is assumed to be rare, it is
+ * reasonable to omit this report: one of the other
+ * reports includes information about the same shared
+ * data, and at this point the likelihood that we
+ * re-report the same race again is high.
+ */
+ release_report(flags, KCSAN_REPORT_RACE_SIGNAL);
+ return false;
+ }
+
+ /*
+ * Matching & usable access in other_info: keep other_info_lock
+ * locked, as this thread consumes it to print the full report;
+ * unlocked in release_report.
+ */
+ return true;
+
+ default:
+ BUG();
+ }
+
+ spin_unlock_irqrestore(&report_lock, *flags);
+
+ goto retry;
+}
+
+void kcsan_report(const volatile void *ptr, size_t size, int access_type,
+ enum kcsan_value_change value_change, int cpu_id,
+ enum kcsan_report_type type)
+{
+ unsigned long flags = 0;
+
+ /*
+ * With TRACE_IRQFLAGS, lockdep's IRQ trace state becomes corrupted if
+ * we do not turn off lockdep here; this could happen due to recursion
+ * into lockdep via KCSAN if we detect a race in utilities used by
+ * lockdep.
+ */
+ lockdep_off();
+
+ kcsan_disable_current();
+ if (prepare_report(&flags, ptr, size, access_type, cpu_id, type)) {
+ /*
+ * Never report if value_change is FALSE, only if we it is
+ * either TRUE or MAYBE. In case of MAYBE, further filtering may
+ * be done once we know the full stack trace in print_report().
+ */
+ bool reported = value_change != KCSAN_VALUE_CHANGE_FALSE &&
+ print_report(ptr, size, access_type, value_change, cpu_id, type);
+
+ if (reported && panic_on_warn)
+ panic("panic_on_warn set ...\n");
+
+ release_report(&flags, type);
+ }
+ kcsan_enable_current();
+
+ lockdep_on();
+}
diff --git a/kernel/kcsan/test.c b/kernel/kcsan/test.c
new file mode 100644
index 000000000000..d26a052d3383
--- /dev/null
+++ b/kernel/kcsan/test.c
@@ -0,0 +1,131 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/printk.h>
+#include <linux/random.h>
+#include <linux/types.h>
+
+#include "encoding.h"
+
+#define ITERS_PER_TEST 2000
+
+/* Test requirements. */
+static bool test_requires(void)
+{
+ /* random should be initialized for the below tests */
+ return prandom_u32() + prandom_u32() != 0;
+}
+
+/*
+ * Test watchpoint encode and decode: check that encoding some access's info,
+ * and then subsequent decode preserves the access's info.
+ */
+static bool test_encode_decode(void)
+{
+ int i;
+
+ for (i = 0; i < ITERS_PER_TEST; ++i) {
+ size_t size = prandom_u32_max(MAX_ENCODABLE_SIZE) + 1;
+ bool is_write = !!prandom_u32_max(2);
+ unsigned long addr;
+
+ prandom_bytes(&addr, sizeof(addr));
+ if (WARN_ON(!check_encodable(addr, size)))
+ return false;
+
+ /* Encode and decode */
+ {
+ const long encoded_watchpoint =
+ encode_watchpoint(addr, size, is_write);
+ unsigned long verif_masked_addr;
+ size_t verif_size;
+ bool verif_is_write;
+
+ /* Check special watchpoints */
+ if (WARN_ON(decode_watchpoint(
+ INVALID_WATCHPOINT, &verif_masked_addr,
+ &verif_size, &verif_is_write)))
+ return false;
+ if (WARN_ON(decode_watchpoint(
+ CONSUMED_WATCHPOINT, &verif_masked_addr,
+ &verif_size, &verif_is_write)))
+ return false;
+
+ /* Check decoding watchpoint returns same data */
+ if (WARN_ON(!decode_watchpoint(
+ encoded_watchpoint, &verif_masked_addr,
+ &verif_size, &verif_is_write)))
+ return false;
+ if (WARN_ON(verif_masked_addr !=
+ (addr & WATCHPOINT_ADDR_MASK)))
+ goto fail;
+ if (WARN_ON(verif_size != size))
+ goto fail;
+ if (WARN_ON(is_write != verif_is_write))
+ goto fail;
+
+ continue;
+fail:
+ pr_err("%s fail: %s %zu bytes @ %lx -> encoded: %lx -> %s %zu bytes @ %lx\n",
+ __func__, is_write ? "write" : "read", size,
+ addr, encoded_watchpoint,
+ verif_is_write ? "write" : "read", verif_size,
+ verif_masked_addr);
+ return false;
+ }
+ }
+
+ return true;
+}
+
+/* Test access matching function. */
+static bool test_matching_access(void)
+{
+ if (WARN_ON(!matching_access(10, 1, 10, 1)))
+ return false;
+ if (WARN_ON(!matching_access(10, 2, 11, 1)))
+ return false;
+ if (WARN_ON(!matching_access(10, 1, 9, 2)))
+ return false;
+ if (WARN_ON(matching_access(10, 1, 11, 1)))
+ return false;
+ if (WARN_ON(matching_access(9, 1, 10, 1)))
+ return false;
+
+ /*
+ * An access of size 0 could match another access, as demonstrated here.
+ * Rather than add more comparisons to 'matching_access()', which would
+ * end up in the fast-path for *all* checks, check_access() simply
+ * returns for all accesses of size 0.
+ */
+ if (WARN_ON(!matching_access(8, 8, 12, 0)))
+ return false;
+
+ return true;
+}
+
+static int __init kcsan_selftest(void)
+{
+ int passed = 0;
+ int total = 0;
+
+#define RUN_TEST(do_test) \
+ do { \
+ ++total; \
+ if (do_test()) \
+ ++passed; \
+ else \
+ pr_err("KCSAN selftest: " #do_test " failed"); \
+ } while (0)
+
+ RUN_TEST(test_requires);
+ RUN_TEST(test_encode_decode);
+ RUN_TEST(test_matching_access);
+
+ pr_info("KCSAN selftest: %d/%d tests passed\n", passed, total);
+ if (passed != total)
+ panic("KCSAN selftests failed");
+ return 0;
+}
+postcore_initcall(kcsan_selftest);
diff --git a/kernel/locking/Makefile b/kernel/locking/Makefile
index 45452facff3b..6d11cfb9b41f 100644
--- a/kernel/locking/Makefile
+++ b/kernel/locking/Makefile
@@ -5,6 +5,9 @@ KCOV_INSTRUMENT := n
obj-y += mutex.o semaphore.o rwsem.o percpu-rwsem.o
+# Avoid recursion lockdep -> KCSAN -> ... -> lockdep.
+KCSAN_SANITIZE_lockdep.o := n
+
ifdef CONFIG_FUNCTION_TRACER
CFLAGS_REMOVE_lockdep.o = $(CC_FLAGS_FTRACE)
CFLAGS_REMOVE_lockdep_proc.o = $(CC_FLAGS_FTRACE)
diff --git a/kernel/sched/Makefile b/kernel/sched/Makefile
index 21fb5a5662b5..5fc9c9b70862 100644
--- a/kernel/sched/Makefile
+++ b/kernel/sched/Makefile
@@ -7,6 +7,12 @@ endif
# that is not a function of syscall inputs. E.g. involuntary context switches.
KCOV_INSTRUMENT := n
+# There are numerous data races here, however, most of them are due to plain accesses.
+# This would make it even harder for syzbot to find reproducers, because these
+# bugs trigger without specific input. Disable by default, but should re-enable
+# eventually.
+KCSAN_SANITIZE := n
+
ifneq ($(CONFIG_SCHED_OMIT_FRAME_POINTER),y)
# According to Alan Modra <alan@linuxcare.com.au>, the -fno-omit-frame-pointer is
# needed for x86 only. Why this used to be enabled for all architectures is beyond
diff --git a/kernel/trace/Makefile b/kernel/trace/Makefile
index f9dcd19165fa..6b601d88bf71 100644
--- a/kernel/trace/Makefile
+++ b/kernel/trace/Makefile
@@ -6,6 +6,9 @@ ifdef CONFIG_FUNCTION_TRACER
ORIG_CFLAGS := $(KBUILD_CFLAGS)
KBUILD_CFLAGS = $(subst $(CC_FLAGS_FTRACE),,$(ORIG_CFLAGS))
+# Avoid recursion due to instrumentation.
+KCSAN_SANITIZE := n
+
ifdef CONFIG_FTRACE_SELFTEST
# selftest needs instrumentation
CFLAGS_trace_selftest_dynamic.o = $(CC_FLAGS_FTRACE)
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 977b5a190297..3263b43c9118 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -1519,6 +1519,8 @@ config PROVIDE_OHCI1394_DMA_INIT
source "samples/Kconfig"
+source "lib/Kconfig.kcsan"
+
config ARCH_HAS_DEVMEM_IS_ALLOWED
bool
diff --git a/lib/Kconfig.kcsan b/lib/Kconfig.kcsan
new file mode 100644
index 000000000000..f0b791143c6a
--- /dev/null
+++ b/lib/Kconfig.kcsan
@@ -0,0 +1,156 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+config HAVE_ARCH_KCSAN
+ bool
+
+menuconfig KCSAN
+ bool "KCSAN: dynamic race detector"
+ depends on HAVE_ARCH_KCSAN && DEBUG_KERNEL && !KASAN
+ select STACKTRACE
+ help
+ The Kernel Concurrency Sanitizer (KCSAN) is a dynamic race detector,
+ which relies on compile-time instrumentation, and uses a
+ watchpoint-based sampling approach to detect races.
+
+ KCSAN's primary purpose is to detect data races. KCSAN can also be
+ used to check properties, with the help of provided assertions, of
+ concurrent code where bugs do not manifest as data races.
+
+ See <file:Documentation/dev-tools/kcsan.rst> for more details.
+
+if KCSAN
+
+config KCSAN_DEBUG
+ bool "Debugging of KCSAN internals"
+
+config KCSAN_SELFTEST
+ bool "Perform short selftests on boot"
+ default y
+ help
+ Run KCSAN selftests on boot. On test failure, causes the kernel to panic.
+
+config KCSAN_EARLY_ENABLE
+ bool "Early enable during boot"
+ default y
+ help
+ If KCSAN should be enabled globally as soon as possible. KCSAN can
+ later be enabled/disabled via debugfs.
+
+config KCSAN_NUM_WATCHPOINTS
+ int "Number of available watchpoints"
+ default 64
+ help
+ Total number of available watchpoints. An address range maps into a
+ specific watchpoint slot as specified in kernel/kcsan/encoding.h.
+ Although larger number of watchpoints may not be usable due to
+ limited number of CPUs, a larger value helps to improve performance
+ due to reducing cache-line contention. The chosen default is a
+ conservative value; we should almost never observe "no_capacity"
+ events (see /sys/kernel/debug/kcsan).
+
+config KCSAN_UDELAY_TASK
+ int "Delay in microseconds (for tasks)"
+ default 80
+ help
+ For tasks, the microsecond delay after setting up a watchpoint.
+
+config KCSAN_UDELAY_INTERRUPT
+ int "Delay in microseconds (for interrupts)"
+ default 20
+ help
+ For interrupts, the microsecond delay after setting up a watchpoint.
+ Interrupts have tighter latency requirements, and their delay should
+ be lower than for tasks.
+
+config KCSAN_DELAY_RANDOMIZE
+ bool "Randomize above delays"
+ default y
+ help
+ If delays should be randomized, where the maximum is KCSAN_UDELAY_*.
+ If false, the chosen delays are always the KCSAN_UDELAY_* values
+ as defined above.
+
+config KCSAN_SKIP_WATCH
+ int "Skip instructions before setting up watchpoint"
+ default 4000
+ help
+ The number of per-CPU memory operations to skip, before another
+ watchpoint is set up, i.e. one in KCSAN_WATCH_SKIP per-CPU
+ memory operations are used to set up a watchpoint. A smaller value
+ results in more aggressive race detection, whereas a larger value
+ improves system performance at the cost of missing some races.
+
+config KCSAN_SKIP_WATCH_RANDOMIZE
+ bool "Randomize watchpoint instruction skip count"
+ default y
+ help
+ If instruction skip count should be randomized, where the maximum is
+ KCSAN_WATCH_SKIP. If false, the chosen value is always
+ KCSAN_WATCH_SKIP.
+
+config KCSAN_REPORT_ONCE_IN_MS
+ int "Duration in milliseconds, in which any given race is only reported once"
+ default 3000
+ help
+ Any given race is only reported once in the defined time window.
+ Different races may still generate reports within a duration that is
+ smaller than the duration defined here. This allows rate limiting
+ reporting to avoid flooding the console with reports. Setting this
+ to 0 disables rate limiting.
+
+# The main purpose of the below options is to control reported data races (e.g.
+# in fuzzer configs), and are not expected to be switched frequently by other
+# users. We could turn some of them into boot parameters, but given they should
+# not be switched normally, let's keep them here to simplify configuration.
+#
+# The defaults below are chosen to be very conservative, and may miss certain
+# bugs.
+
+config KCSAN_REPORT_RACE_UNKNOWN_ORIGIN
+ bool "Report races of unknown origin"
+ default y
+ help
+ If KCSAN should report races where only one access is known, and the
+ conflicting access is of unknown origin. This type of race is
+ reported if it was only possible to infer a race due to a data value
+ change while an access is being delayed on a watchpoint.
+
+config KCSAN_REPORT_VALUE_CHANGE_ONLY
+ bool "Only report races where watcher observed a data value change"
+ default y
+ help
+ If enabled and a conflicting write is observed via a watchpoint, but
+ the data value of the memory location was observed to remain
+ unchanged, do not report the data race.
+
+config KCSAN_ASSUME_PLAIN_WRITES_ATOMIC
+ bool "Assume that plain aligned writes up to word size are atomic"
+ default y
+ help
+ Assume that plain aligned writes up to word size are atomic by
+ default, and also not subject to other unsafe compiler optimizations
+ resulting in data races. This will cause KCSAN to not report data
+ races due to conflicts where the only plain accesses are aligned
+ writes up to word size: conflicts between marked reads and plain
+ aligned writes up to word size will not be reported as data races;
+ notice that data races between two conflicting plain aligned writes
+ will also not be reported.
+
+config KCSAN_IGNORE_ATOMICS
+ bool "Do not instrument marked atomic accesses"
+ help
+ Never instrument marked atomic accesses. This option can be used for
+ additional filtering. Conflicting marked atomic reads and plain
+ writes will never be reported as a data race, however, will cause
+ plain reads and marked writes to result in "unknown origin" reports.
+ If combined with CONFIG_KCSAN_REPORT_RACE_UNKNOWN_ORIGIN=n, data
+ races where at least one access is marked atomic will never be
+ reported.
+
+ Similar to KCSAN_ASSUME_PLAIN_WRITES_ATOMIC, but including unaligned
+ accesses, conflicting marked atomic reads and plain writes will not
+ be reported as data races; however, unlike that option, data races
+ due to two conflicting plain writes will be reported (aligned and
+ unaligned, if CONFIG_KCSAN_ASSUME_PLAIN_WRITES_ATOMIC=n).
+
+endif # KCSAN
diff --git a/lib/Makefile b/lib/Makefile
index 685aee60de1d..ab68a8674360 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -25,6 +25,9 @@ KASAN_SANITIZE_string.o := n
CFLAGS_string.o := $(call cc-option, -fno-stack-protector)
endif
+# Used by KCSAN while enabled, avoid recursion.
+KCSAN_SANITIZE_random32.o := n
+
lib-y := ctype.o string.o vsprintf.o cmdline.o \
rbtree.o radix-tree.o timerqueue.o xarray.o \
idr.o extable.o sha1.o irq_regs.o argv_split.o \
@@ -292,6 +295,7 @@ endif
UBSAN_SANITIZE_ubsan.o := n
KASAN_SANITIZE_ubsan.o := n
+KCSAN_SANITIZE_ubsan.o := n
CFLAGS_ubsan.o := $(call cc-option, -fno-stack-protector) $(DISABLE_STACKLEAK_PLUGIN)
obj-$(CONFIG_SBITMAP) += sbitmap.o
diff --git a/lib/iov_iter.c b/lib/iov_iter.c
index 51595bf3af85..bf538c2bec77 100644
--- a/lib/iov_iter.c
+++ b/lib/iov_iter.c
@@ -8,6 +8,7 @@
#include <linux/splice.h>
#include <net/checksum.h>
#include <linux/scatterlist.h>
+#include <linux/instrumented.h>
#define PIPE_PARANOIA /* for now */
@@ -138,7 +139,7 @@
static int copyout(void __user *to, const void *from, size_t n)
{
if (access_ok(to, n)) {
- kasan_check_read(from, n);
+ instrument_copy_to_user(to, from, n);
n = raw_copy_to_user(to, from, n);
}
return n;
@@ -147,7 +148,7 @@ static int copyout(void __user *to, const void *from, size_t n)
static int copyin(void *to, const void __user *from, size_t n)
{
if (access_ok(from, n)) {
- kasan_check_write(to, n);
+ instrument_copy_from_user(to, from, n);
n = raw_copy_from_user(to, from, n);
}
return n;
@@ -639,7 +640,7 @@ EXPORT_SYMBOL(_copy_to_iter);
static int copyout_mcsafe(void __user *to, const void *from, size_t n)
{
if (access_ok(to, n)) {
- kasan_check_read(from, n);
+ instrument_copy_to_user(to, from, n);
n = copy_to_user_mcsafe((__force void *) to, from, n);
}
return n;
diff --git a/lib/usercopy.c b/lib/usercopy.c
index cbb4d9ec00f2..4bb1c5e7a3eb 100644
--- a/lib/usercopy.c
+++ b/lib/usercopy.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
-#include <linux/uaccess.h>
#include <linux/bitops.h>
+#include <linux/instrumented.h>
+#include <linux/uaccess.h>
/* out-of-line parts */
@@ -10,7 +11,7 @@ unsigned long _copy_from_user(void *to, const void __user *from, unsigned long n
unsigned long res = n;
might_fault();
if (likely(access_ok(from, n))) {
- kasan_check_write(to, n);
+ instrument_copy_from_user(to, from, n);
res = raw_copy_from_user(to, from, n);
}
if (unlikely(res))
@@ -25,7 +26,7 @@ unsigned long _copy_to_user(void __user *to, const void *from, unsigned long n)
{
might_fault();
if (likely(access_ok(to, n))) {
- kasan_check_read(from, n);
+ instrument_copy_to_user(to, from, n);
n = raw_copy_to_user(to, from, n);
}
return n;
diff --git a/mm/Makefile b/mm/Makefile
index fccd3756b25f..7881b8ede627 100644
--- a/mm/Makefile
+++ b/mm/Makefile
@@ -8,6 +8,14 @@ KASAN_SANITIZE_slab.o := n
KASAN_SANITIZE_slub.o := n
KCSAN_SANITIZE_kmemleak.o := n
+# These produce frequent data race reports: most of them are due to races on
+# the same word but accesses to different bits of that word. Re-enable KCSAN
+# for these when we have more consensus on what to do about them.
+KCSAN_SANITIZE_slab_common.o := n
+KCSAN_SANITIZE_slab.o := n
+KCSAN_SANITIZE_slub.o := n
+KCSAN_SANITIZE_page_alloc.o := n
+
# These files are disabled because they produce non-interesting and/or
# flaky coverage that is not a function of syscall inputs. E.g. slab is out of
# free pages, or a task is migrated between nodes.
diff --git a/scripts/Makefile.kcsan b/scripts/Makefile.kcsan
new file mode 100644
index 000000000000..caf1111a28ae
--- /dev/null
+++ b/scripts/Makefile.kcsan
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0
+ifdef CONFIG_KCSAN
+
+CFLAGS_KCSAN := -fsanitize=thread
+
+endif # CONFIG_KCSAN
diff --git a/scripts/Makefile.lib b/scripts/Makefile.lib
index 4b799737722c..37d7d27b85da 100644
--- a/scripts/Makefile.lib
+++ b/scripts/Makefile.lib
@@ -149,6 +149,16 @@ _c_flags += $(if $(patsubst n%,, \
$(CFLAGS_KCOV))
endif
+#
+# Enable KCSAN flags except some files or directories we don't want to check
+# (depends on variables KCSAN_SANITIZE_obj.o, KCSAN_SANITIZE)
+#
+ifeq ($(CONFIG_KCSAN),y)
+_c_flags += $(if $(patsubst n%,, \
+ $(KCSAN_SANITIZE_$(basetarget).o)$(KCSAN_SANITIZE)y), \
+ $(CFLAGS_KCSAN))
+endif
+
# $(srctree)/$(src) for including checkin headers from generated source files
# $(objtree)/$(obj) for including generated headers from checkin source files
ifeq ($(KBUILD_EXTMOD),)
diff --git a/scripts/atomic/fallbacks/acquire b/scripts/atomic/fallbacks/acquire
index e38871e64db6..ea489acc285e 100755
--- a/scripts/atomic/fallbacks/acquire
+++ b/scripts/atomic/fallbacks/acquire
@@ -1,5 +1,5 @@
cat <<EOF
-static inline ${ret}
+static __always_inline ${ret}
${atomic}_${pfx}${name}${sfx}_acquire(${params})
{
${ret} ret = ${atomic}_${pfx}${name}${sfx}_relaxed(${args});
diff --git a/scripts/atomic/fallbacks/add_negative b/scripts/atomic/fallbacks/add_negative
index e6f4815637de..03cc2e07fac5 100755
--- a/scripts/atomic/fallbacks/add_negative
+++ b/scripts/atomic/fallbacks/add_negative
@@ -8,7 +8,7 @@ cat <<EOF
* if the result is negative, or false when
* result is greater than or equal to zero.
*/
-static inline bool
+static __always_inline bool
${atomic}_add_negative(${int} i, ${atomic}_t *v)
{
return ${atomic}_add_return(i, v) < 0;
diff --git a/scripts/atomic/fallbacks/add_unless b/scripts/atomic/fallbacks/add_unless
index 792533885fbf..daf87a04c850 100755
--- a/scripts/atomic/fallbacks/add_unless
+++ b/scripts/atomic/fallbacks/add_unless
@@ -8,7 +8,7 @@ cat << EOF
* Atomically adds @a to @v, if @v was not already @u.
* Returns true if the addition was done.
*/
-static inline bool
+static __always_inline bool
${atomic}_add_unless(${atomic}_t *v, ${int} a, ${int} u)
{
return ${atomic}_fetch_add_unless(v, a, u) != u;
diff --git a/scripts/atomic/fallbacks/andnot b/scripts/atomic/fallbacks/andnot
index 9f3a3216b5e3..14efce01225a 100755
--- a/scripts/atomic/fallbacks/andnot
+++ b/scripts/atomic/fallbacks/andnot
@@ -1,5 +1,5 @@
cat <<EOF
-static inline ${ret}
+static __always_inline ${ret}
${atomic}_${pfx}andnot${sfx}${order}(${int} i, ${atomic}_t *v)
{
${retstmt}${atomic}_${pfx}and${sfx}${order}(~i, v);
diff --git a/scripts/atomic/fallbacks/dec b/scripts/atomic/fallbacks/dec
index 10bbc82be31d..118282f3a5a3 100755
--- a/scripts/atomic/fallbacks/dec
+++ b/scripts/atomic/fallbacks/dec
@@ -1,5 +1,5 @@
cat <<EOF
-static inline ${ret}
+static __always_inline ${ret}
${atomic}_${pfx}dec${sfx}${order}(${atomic}_t *v)
{
${retstmt}${atomic}_${pfx}sub${sfx}${order}(1, v);
diff --git a/scripts/atomic/fallbacks/dec_and_test b/scripts/atomic/fallbacks/dec_and_test
index 0ce7103b3df2..f8967a891117 100755
--- a/scripts/atomic/fallbacks/dec_and_test
+++ b/scripts/atomic/fallbacks/dec_and_test
@@ -7,7 +7,7 @@ cat <<EOF
* returns true if the result is 0, or false for all other
* cases.
*/
-static inline bool
+static __always_inline bool
${atomic}_dec_and_test(${atomic}_t *v)
{
return ${atomic}_dec_return(v) == 0;
diff --git a/scripts/atomic/fallbacks/dec_if_positive b/scripts/atomic/fallbacks/dec_if_positive
index c52eacec43c8..cfb380bd2da6 100755
--- a/scripts/atomic/fallbacks/dec_if_positive
+++ b/scripts/atomic/fallbacks/dec_if_positive
@@ -1,5 +1,5 @@
cat <<EOF
-static inline ${ret}
+static __always_inline ${ret}
${atomic}_dec_if_positive(${atomic}_t *v)
{
${int} dec, c = ${atomic}_read(v);
diff --git a/scripts/atomic/fallbacks/dec_unless_positive b/scripts/atomic/fallbacks/dec_unless_positive
index 8a2578f14268..69cb7aa01f9c 100755
--- a/scripts/atomic/fallbacks/dec_unless_positive
+++ b/scripts/atomic/fallbacks/dec_unless_positive
@@ -1,5 +1,5 @@
cat <<EOF
-static inline bool
+static __always_inline bool
${atomic}_dec_unless_positive(${atomic}_t *v)
{
${int} c = ${atomic}_read(v);
diff --git a/scripts/atomic/fallbacks/fence b/scripts/atomic/fallbacks/fence
index 82f68fa6931a..92a3a4691bab 100755
--- a/scripts/atomic/fallbacks/fence
+++ b/scripts/atomic/fallbacks/fence
@@ -1,5 +1,5 @@
cat <<EOF
-static inline ${ret}
+static __always_inline ${ret}
${atomic}_${pfx}${name}${sfx}(${params})
{
${ret} ret;
diff --git a/scripts/atomic/fallbacks/fetch_add_unless b/scripts/atomic/fallbacks/fetch_add_unless
index d2c091db7eae..fffbc0d16fdf 100755
--- a/scripts/atomic/fallbacks/fetch_add_unless
+++ b/scripts/atomic/fallbacks/fetch_add_unless
@@ -8,7 +8,7 @@ cat << EOF
* Atomically adds @a to @v, so long as @v was not already @u.
* Returns original value of @v
*/
-static inline ${int}
+static __always_inline ${int}
${atomic}_fetch_add_unless(${atomic}_t *v, ${int} a, ${int} u)
{
${int} c = ${atomic}_read(v);
diff --git a/scripts/atomic/fallbacks/inc b/scripts/atomic/fallbacks/inc
index f866b3ad2353..10751cd62829 100755
--- a/scripts/atomic/fallbacks/inc
+++ b/scripts/atomic/fallbacks/inc
@@ -1,5 +1,5 @@
cat <<EOF
-static inline ${ret}
+static __always_inline ${ret}
${atomic}_${pfx}inc${sfx}${order}(${atomic}_t *v)
{
${retstmt}${atomic}_${pfx}add${sfx}${order}(1, v);
diff --git a/scripts/atomic/fallbacks/inc_and_test b/scripts/atomic/fallbacks/inc_and_test
index 4e2068869f7e..4acea9c93604 100755
--- a/scripts/atomic/fallbacks/inc_and_test
+++ b/scripts/atomic/fallbacks/inc_and_test
@@ -7,7 +7,7 @@ cat <<EOF
* and returns true if the result is zero, or false for all
* other cases.
*/
-static inline bool
+static __always_inline bool
${atomic}_inc_and_test(${atomic}_t *v)
{
return ${atomic}_inc_return(v) == 0;
diff --git a/scripts/atomic/fallbacks/inc_not_zero b/scripts/atomic/fallbacks/inc_not_zero
index a7c45c8d107c..d9f7b97aab42 100755
--- a/scripts/atomic/fallbacks/inc_not_zero
+++ b/scripts/atomic/fallbacks/inc_not_zero
@@ -6,7 +6,7 @@ cat <<EOF
* Atomically increments @v by 1, if @v is non-zero.
* Returns true if the increment was done.
*/
-static inline bool
+static __always_inline bool
${atomic}_inc_not_zero(${atomic}_t *v)
{
return ${atomic}_add_unless(v, 1, 0);
diff --git a/scripts/atomic/fallbacks/inc_unless_negative b/scripts/atomic/fallbacks/inc_unless_negative
index 0c266e71dbd4..177a7cb51eda 100755
--- a/scripts/atomic/fallbacks/inc_unless_negative
+++ b/scripts/atomic/fallbacks/inc_unless_negative
@@ -1,5 +1,5 @@
cat <<EOF
-static inline bool
+static __always_inline bool
${atomic}_inc_unless_negative(${atomic}_t *v)
{
${int} c = ${atomic}_read(v);
diff --git a/scripts/atomic/fallbacks/read_acquire b/scripts/atomic/fallbacks/read_acquire
index 75863b5203f7..12fa83cb3a6d 100755
--- a/scripts/atomic/fallbacks/read_acquire
+++ b/scripts/atomic/fallbacks/read_acquire
@@ -1,5 +1,5 @@
cat <<EOF
-static inline ${ret}
+static __always_inline ${ret}
${atomic}_read_acquire(const ${atomic}_t *v)
{
return smp_load_acquire(&(v)->counter);
diff --git a/scripts/atomic/fallbacks/release b/scripts/atomic/fallbacks/release
index 3f628a3802d9..730d2a6d3e07 100755
--- a/scripts/atomic/fallbacks/release
+++ b/scripts/atomic/fallbacks/release
@@ -1,5 +1,5 @@
cat <<EOF
-static inline ${ret}
+static __always_inline ${ret}
${atomic}_${pfx}${name}${sfx}_release(${params})
{
__atomic_release_fence();
diff --git a/scripts/atomic/fallbacks/set_release b/scripts/atomic/fallbacks/set_release
index 45bb5e0cfc08..e5d72c717434 100755
--- a/scripts/atomic/fallbacks/set_release
+++ b/scripts/atomic/fallbacks/set_release
@@ -1,5 +1,5 @@
cat <<EOF
-static inline void
+static __always_inline void
${atomic}_set_release(${atomic}_t *v, ${int} i)
{
smp_store_release(&(v)->counter, i);
diff --git a/scripts/atomic/fallbacks/sub_and_test b/scripts/atomic/fallbacks/sub_and_test
index 289ef17a2d7a..6cfe4ed49746 100755
--- a/scripts/atomic/fallbacks/sub_and_test
+++ b/scripts/atomic/fallbacks/sub_and_test
@@ -8,7 +8,7 @@ cat <<EOF
* true if the result is zero, or false for all
* other cases.
*/
-static inline bool
+static __always_inline bool
${atomic}_sub_and_test(${int} i, ${atomic}_t *v)
{
return ${atomic}_sub_return(i, v) == 0;
diff --git a/scripts/atomic/fallbacks/try_cmpxchg b/scripts/atomic/fallbacks/try_cmpxchg
index 4ed85e2f5378..c7a26213b978 100755
--- a/scripts/atomic/fallbacks/try_cmpxchg
+++ b/scripts/atomic/fallbacks/try_cmpxchg
@@ -1,5 +1,5 @@
cat <<EOF
-static inline bool
+static __always_inline bool
${atomic}_try_cmpxchg${order}(${atomic}_t *v, ${int} *old, ${int} new)
{
${int} r, o = *old;
diff --git a/scripts/atomic/gen-atomic-fallback.sh b/scripts/atomic/gen-atomic-fallback.sh
index 1bd7c1707633..b6c6f5d306a7 100755
--- a/scripts/atomic/gen-atomic-fallback.sh
+++ b/scripts/atomic/gen-atomic-fallback.sh
@@ -149,6 +149,8 @@ cat << EOF
#ifndef _LINUX_ATOMIC_FALLBACK_H
#define _LINUX_ATOMIC_FALLBACK_H
+#include <linux/compiler.h>
+
EOF
for xchg in "xchg" "cmpxchg" "cmpxchg64"; do
diff --git a/scripts/atomic/gen-atomic-instrumented.sh b/scripts/atomic/gen-atomic-instrumented.sh
index e09812372b17..6afadf73da17 100755
--- a/scripts/atomic/gen-atomic-instrumented.sh
+++ b/scripts/atomic/gen-atomic-instrumented.sh
@@ -20,7 +20,7 @@ gen_param_check()
# We don't write to constant parameters
[ ${type#c} != ${type} ] && rw="read"
- printf "\tkasan_check_${rw}(${name}, sizeof(*${name}));\n"
+ printf "\tinstrument_atomic_${rw}(${name}, sizeof(*${name}));\n"
}
#gen_param_check(arg...)
@@ -84,7 +84,7 @@ gen_proto_order_variant()
[ ! -z "${guard}" ] && printf "#if ${guard}\n"
cat <<EOF
-static inline ${ret}
+static __always_inline ${ret}
${atomicname}(${params})
{
${checks}
@@ -107,7 +107,7 @@ cat <<EOF
#define ${xchg}(ptr, ...) \\
({ \\
typeof(ptr) __ai_ptr = (ptr); \\
- kasan_check_write(__ai_ptr, ${mult}sizeof(*__ai_ptr)); \\
+ instrument_atomic_write(__ai_ptr, ${mult}sizeof(*__ai_ptr)); \\
arch_${xchg}(__ai_ptr, __VA_ARGS__); \\
})
EOF
@@ -147,7 +147,8 @@ cat << EOF
#define _ASM_GENERIC_ATOMIC_INSTRUMENTED_H
#include <linux/build_bug.h>
-#include <linux/kasan-checks.h>
+#include <linux/compiler.h>
+#include <linux/instrumented.h>
EOF
diff --git a/scripts/atomic/gen-atomic-long.sh b/scripts/atomic/gen-atomic-long.sh
index c240a7231b2e..e318d3f92e53 100755
--- a/scripts/atomic/gen-atomic-long.sh
+++ b/scripts/atomic/gen-atomic-long.sh
@@ -46,7 +46,7 @@ gen_proto_order_variant()
local retstmt="$(gen_ret_stmt "${meta}")"
cat <<EOF
-static inline ${ret}
+static __always_inline ${ret}
atomic_long_${name}(${params})
{
${retstmt}${atomic}_${name}(${argscast});
@@ -64,6 +64,7 @@ cat << EOF
#ifndef _ASM_GENERIC_ATOMIC_LONG_H
#define _ASM_GENERIC_ATOMIC_LONG_H
+#include <linux/compiler.h>
#include <asm/types.h>
#ifdef CONFIG_64BIT
diff --git a/tools/objtool/check.c b/tools/objtool/check.c
index 32dea5f3feed..dbbd19d73b90 100644
--- a/tools/objtool/check.c
+++ b/tools/objtool/check.c
@@ -496,6 +496,24 @@ static const char *uaccess_safe_builtin[] = {
"__asan_report_store4_noabort",
"__asan_report_store8_noabort",
"__asan_report_store16_noabort",
+ /* KCSAN */
+ "kcsan_found_watchpoint",
+ "kcsan_setup_watchpoint",
+ /* KCSAN/TSAN */
+ "__tsan_func_entry",
+ "__tsan_func_exit",
+ "__tsan_read_range",
+ "__tsan_write_range",
+ "__tsan_read1",
+ "__tsan_read2",
+ "__tsan_read4",
+ "__tsan_read8",
+ "__tsan_read16",
+ "__tsan_write1",
+ "__tsan_write2",
+ "__tsan_write4",
+ "__tsan_write8",
+ "__tsan_write16",
/* KCOV */
"write_comp_data",
"__sanitizer_cov_trace_pc",