diff options
author | Huacai Chen <chenhuacai@loongson.cn> | 2025-05-30 21:45:48 +0800 |
---|---|---|
committer | Huacai Chen <chenhuacai@loongson.cn> | 2025-05-30 21:45:48 +0800 |
commit | 52c22661c79a7b6af7fad9f77200738fc6c51878 (patch) | |
tree | 5a6b5dbfc9160b0aa3b920b2c42df190a03b85b8 | |
parent | a24f2fb70cb62180486ad4d74f809ff35ddd1cf9 (diff) |
LoongArch: Avoid using $r0/$r1 as "mask" for csrxchg
When building kernel with LLVM there are occasionally such errors:
In file included from ./include/linux/spinlock.h:59:
In file included from ./include/linux/irqflags.h:17:
arch/loongarch/include/asm/irqflags.h:38:3: error: must not be $r0 or $r1
38 | "csrxchg %[val], %[mask], %[reg]\n\t"
| ^
<inline asm>:1:16: note: instantiated into assembly here
1 | csrxchg $a1, $ra, 0
| ^
To prevent the compiler from allocating $r0 or $r1 for the "mask" of the
csrxchg instruction, the 'q' constraint must be used but Clang < 21 does
not support it. So force to use $t0 in the inline asm, in order to avoid
using $r0/$r1 while keeping the backward compatibility.
Cc: stable@vger.kernel.org
Link: https://github.com/llvm/llvm-project/pull/141037
Reviewed-by: Yanteng Si <si.yanteng@linux.dev>
Suggested-by: WANG Rui <wangrui@loongson.cn>
Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
-rw-r--r-- | arch/loongarch/include/asm/irqflags.h | 16 |
1 files changed, 12 insertions, 4 deletions
diff --git a/arch/loongarch/include/asm/irqflags.h b/arch/loongarch/include/asm/irqflags.h index 319a8c616f1f..003172b8406b 100644 --- a/arch/loongarch/include/asm/irqflags.h +++ b/arch/loongarch/include/asm/irqflags.h @@ -14,40 +14,48 @@ static inline void arch_local_irq_enable(void) { u32 flags = CSR_CRMD_IE; + register u32 mask asm("t0") = CSR_CRMD_IE; + __asm__ __volatile__( "csrxchg %[val], %[mask], %[reg]\n\t" : [val] "+r" (flags) - : [mask] "r" (CSR_CRMD_IE), [reg] "i" (LOONGARCH_CSR_CRMD) + : [mask] "r" (mask), [reg] "i" (LOONGARCH_CSR_CRMD) : "memory"); } static inline void arch_local_irq_disable(void) { u32 flags = 0; + register u32 mask asm("t0") = CSR_CRMD_IE; + __asm__ __volatile__( "csrxchg %[val], %[mask], %[reg]\n\t" : [val] "+r" (flags) - : [mask] "r" (CSR_CRMD_IE), [reg] "i" (LOONGARCH_CSR_CRMD) + : [mask] "r" (mask), [reg] "i" (LOONGARCH_CSR_CRMD) : "memory"); } static inline unsigned long arch_local_irq_save(void) { u32 flags = 0; + register u32 mask asm("t0") = CSR_CRMD_IE; + __asm__ __volatile__( "csrxchg %[val], %[mask], %[reg]\n\t" : [val] "+r" (flags) - : [mask] "r" (CSR_CRMD_IE), [reg] "i" (LOONGARCH_CSR_CRMD) + : [mask] "r" (mask), [reg] "i" (LOONGARCH_CSR_CRMD) : "memory"); return flags; } static inline void arch_local_irq_restore(unsigned long flags) { + register u32 mask asm("t0") = CSR_CRMD_IE; + __asm__ __volatile__( "csrxchg %[val], %[mask], %[reg]\n\t" : [val] "+r" (flags) - : [mask] "r" (CSR_CRMD_IE), [reg] "i" (LOONGARCH_CSR_CRMD) + : [mask] "r" (mask), [reg] "i" (LOONGARCH_CSR_CRMD) : "memory"); } |