summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2024-05-17 08:53:47 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2024-05-17 08:53:47 -0700
commit4853f1f6ace32c68a04287353e428c4cfc3fa8ed (patch)
tree15bf600ce002c8aca583d9525dfd8241447987e0
parentea5f6ad9ad9645733b72ab53a98e719b460d36a6 (diff)
parentf698d314eec2e16b980128b503d96bd73df77e90 (diff)
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rmk/linux
Pull ARM updates from Russell King: - Updates to AMBA bus subsystem to drop .owner struct device_driver initialisations, moving that to code instead. - Add LPAE privileged-access-never support - Add support for Clang CFI - clkdev: report over-sized device or connection strings * tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rmk/linux: (36 commits) ARM: 9398/1: Fix userspace enter on LPAE with CC_OPTIMIZE_FOR_SIZE=y clkdev: report over-sized strings when creating clkdev entries ARM: 9393/1: mm: Use conditionals for CFI branches ARM: 9392/2: Support CLANG CFI ARM: 9391/2: hw_breakpoint: Handle CFI breakpoints ARM: 9390/2: lib: Annotate loop delay instructions for CFI ARM: 9389/2: mm: Define prototypes for all per-processor calls ARM: 9388/2: mm: Type-annotate all per-processor assembly routines ARM: 9387/2: mm: Rewrite cacheflush vtables in CFI safe C ARM: 9386/2: mm: Use symbol alias for cache functions ARM: 9385/2: mm: Type-annotate all cache assembly routines ARM: 9384/2: mm: Make tlbflush routines CFI safe ARM: 9382/1: ftrace: Define ftrace_stub_graph ARM: 9358/2: Implement PAN for LPAE by TTBR0 page table walks disablement ARM: 9357/2: Reduce the number of #ifdef CONFIG_CPU_SW_DOMAIN_PAN ARM: 9356/2: Move asm statements accessing TTBCR into C functions ARM: 9355/2: Add TTBCR_* definitions to pgtable-3level-hwdef.h ARM: 9379/1: coresight: tpda: drop owner assignment ARM: 9378/1: coresight: etm4x: drop owner assignment ARM: 9377/1: hwrng: nomadik: drop owner assignment ...
-rw-r--r--arch/arm/Kconfig23
-rw-r--r--arch/arm/include/asm/assembler.h1
-rw-r--r--arch/arm/include/asm/glue-cache.h28
-rw-r--r--arch/arm/include/asm/hw_breakpoint.h1
-rw-r--r--arch/arm/include/asm/pgtable-3level-hwdef.h26
-rw-r--r--arch/arm/include/asm/proc-fns.h12
-rw-r--r--arch/arm/include/asm/ptrace.h1
-rw-r--r--arch/arm/include/asm/uaccess-asm.h58
-rw-r--r--arch/arm/include/asm/uaccess.h45
-rw-r--r--arch/arm/kernel/asm-offsets.c1
-rw-r--r--arch/arm/kernel/entry-ftrace.S4
-rw-r--r--arch/arm/kernel/hw_breakpoint.c35
-rw-r--r--arch/arm/kernel/suspend.c8
-rw-r--r--arch/arm/lib/csumpartialcopyuser.S20
-rw-r--r--arch/arm/lib/delay-loop.S16
-rw-r--r--arch/arm/mm/Makefile3
-rw-r--r--arch/arm/mm/cache-b15-rac.c1
-rw-r--r--arch/arm/mm/cache-fa.S47
-rw-r--r--arch/arm/mm/cache-nop.S61
-rw-r--r--arch/arm/mm/cache-v4.S55
-rw-r--r--arch/arm/mm/cache-v4wb.S49
-rw-r--r--arch/arm/mm/cache-v4wt.S55
-rw-r--r--arch/arm/mm/cache-v6.S51
-rw-r--r--arch/arm/mm/cache-v7.S76
-rw-r--r--arch/arm/mm/cache-v7m.S55
-rw-r--r--arch/arm/mm/cache.c663
-rw-r--r--arch/arm/mm/fault.c29
-rw-r--r--arch/arm/mm/mmu.c7
-rw-r--r--arch/arm/mm/proc-arm1020.S69
-rw-r--r--arch/arm/mm/proc-arm1020e.S70
-rw-r--r--arch/arm/mm/proc-arm1022.S69
-rw-r--r--arch/arm/mm/proc-arm1026.S70
-rw-r--r--arch/arm/mm/proc-arm720.S25
-rw-r--r--arch/arm/mm/proc-arm740.S26
-rw-r--r--arch/arm/mm/proc-arm7tdmi.S34
-rw-r--r--arch/arm/mm/proc-arm920.S76
-rw-r--r--arch/arm/mm/proc-arm922.S69
-rw-r--r--arch/arm/mm/proc-arm925.S66
-rw-r--r--arch/arm/mm/proc-arm926.S75
-rw-r--r--arch/arm/mm/proc-arm940.S69
-rw-r--r--arch/arm/mm/proc-arm946.S65
-rw-r--r--arch/arm/mm/proc-arm9tdmi.S26
-rw-r--r--arch/arm/mm/proc-fa526.S24
-rw-r--r--arch/arm/mm/proc-feroceon.S105
-rw-r--r--arch/arm/mm/proc-macros.S33
-rw-r--r--arch/arm/mm/proc-mohawk.S74
-rw-r--r--arch/arm/mm/proc-sa110.S23
-rw-r--r--arch/arm/mm/proc-sa1100.S31
-rw-r--r--arch/arm/mm/proc-v6.S31
-rw-r--r--arch/arm/mm/proc-v7-2level.S8
-rw-r--r--arch/arm/mm/proc-v7-3level.S8
-rw-r--r--arch/arm/mm/proc-v7.S66
-rw-r--r--arch/arm/mm/proc-v7m.S41
-rw-r--r--arch/arm/mm/proc-xsc3.S75
-rw-r--r--arch/arm/mm/proc-xscale.S125
-rw-r--r--arch/arm/mm/proc.c500
-rw-r--r--arch/arm/mm/tlb-fa.S12
-rw-r--r--arch/arm/mm/tlb-v4.S15
-rw-r--r--arch/arm/mm/tlb-v4wb.S12
-rw-r--r--arch/arm/mm/tlb-v4wbi.S12
-rw-r--r--arch/arm/mm/tlb-v6.S12
-rw-r--r--arch/arm/mm/tlb-v7.S14
-rw-r--r--arch/arm/mm/tlb.c84
-rw-r--r--drivers/amba/bus.c11
-rw-r--r--drivers/char/hw_random/nomadik-rng.c1
-rw-r--r--drivers/clk/clkdev.c35
-rw-r--r--drivers/dma/pl330.c1
-rw-r--r--drivers/hwtracing/coresight/coresight-catu.c1
-rw-r--r--drivers/hwtracing/coresight/coresight-cti-core.c1
-rw-r--r--drivers/hwtracing/coresight/coresight-etb10.c1
-rw-r--r--drivers/hwtracing/coresight/coresight-etm3x-core.c1
-rw-r--r--drivers/hwtracing/coresight/coresight-etm4x-core.c1
-rw-r--r--drivers/hwtracing/coresight/coresight-funnel.c1
-rw-r--r--drivers/hwtracing/coresight/coresight-replicator.c1
-rw-r--r--drivers/hwtracing/coresight/coresight-stm.c1
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc-core.c1
-rw-r--r--drivers/hwtracing/coresight/coresight-tpda.c1
-rw-r--r--drivers/hwtracing/coresight/coresight-tpdm.c1
-rw-r--r--drivers/hwtracing/coresight/coresight-tpiu.c1
-rw-r--r--drivers/i2c/busses/i2c-nomadik.c1
-rw-r--r--drivers/input/serio/ambakmi.c1
-rw-r--r--drivers/memory/pl353-smc.c1
-rw-r--r--drivers/vfio/platform/vfio_amba.c1
-rw-r--r--include/linux/amba/bus.h11
84 files changed, 2607 insertions, 1013 deletions
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index b14aed3a17ab..749d6e3788b4 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -35,6 +35,7 @@ config ARM
select ARCH_OPTIONAL_KERNEL_RWX if ARCH_HAS_STRICT_KERNEL_RWX
select ARCH_OPTIONAL_KERNEL_RWX_DEFAULT if CPU_V7
select ARCH_SUPPORTS_ATOMIC_RMW
+ select ARCH_SUPPORTS_CFI_CLANG
select ARCH_SUPPORTS_HUGETLBFS if ARM_LPAE
select ARCH_SUPPORTS_PER_VMA_LOCK
select ARCH_USE_BUILTIN_BSWAP
@@ -1233,9 +1234,9 @@ config HIGHPTE
consumed by page tables. Setting this option will allow
user-space 2nd level page tables to reside in high memory.
-config CPU_SW_DOMAIN_PAN
- bool "Enable use of CPU domains to implement privileged no-access"
- depends on MMU && !ARM_LPAE
+config ARM_PAN
+ bool "Enable privileged no-access"
+ depends on MMU
default y
help
Increase kernel security by ensuring that normal kernel accesses
@@ -1244,10 +1245,26 @@ config CPU_SW_DOMAIN_PAN
by ensuring that magic values (such as LIST_POISON) will always
fault when dereferenced.
+ The implementation uses CPU domains when !CONFIG_ARM_LPAE and
+ disabling of TTBR0 page table walks with CONFIG_ARM_LPAE.
+
+config CPU_SW_DOMAIN_PAN
+ def_bool y
+ depends on ARM_PAN && !ARM_LPAE
+ help
+ Enable use of CPU domains to implement privileged no-access.
+
CPUs with low-vector mappings use a best-efforts implementation.
Their lower 1MB needs to remain accessible for the vectors, but
the remainder of userspace will become appropriately inaccessible.
+config CPU_TTBR0_PAN
+ def_bool y
+ depends on ARM_PAN && ARM_LPAE
+ help
+ Enable privileged no-access by disabling TTBR0 page table walks when
+ running in kernel mode.
+
config HW_PERF_EVENTS
def_bool y
depends on ARM_PMU
diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h
index aebe2c8f6a68..d33c1e24e00b 100644
--- a/arch/arm/include/asm/assembler.h
+++ b/arch/arm/include/asm/assembler.h
@@ -21,6 +21,7 @@
#include <asm/opcodes-virt.h>
#include <asm/asm-offsets.h>
#include <asm/page.h>
+#include <asm/pgtable.h>
#include <asm/thread_info.h>
#include <asm/uaccess-asm.h>
diff --git a/arch/arm/include/asm/glue-cache.h b/arch/arm/include/asm/glue-cache.h
index 724f8dac1e5b..4186fbf7341f 100644
--- a/arch/arm/include/asm/glue-cache.h
+++ b/arch/arm/include/asm/glue-cache.h
@@ -118,6 +118,10 @@
# define MULTI_CACHE 1
#endif
+#ifdef CONFIG_CPU_CACHE_NOP
+# define MULTI_CACHE 1
+#endif
+
#if defined(CONFIG_CPU_V7M)
# define MULTI_CACHE 1
#endif
@@ -126,29 +130,15 @@
#error Unknown cache maintenance model
#endif
-#ifndef __ASSEMBLER__
-static inline void nop_flush_icache_all(void) { }
-static inline void nop_flush_kern_cache_all(void) { }
-static inline void nop_flush_kern_cache_louis(void) { }
-static inline void nop_flush_user_cache_all(void) { }
-static inline void nop_flush_user_cache_range(unsigned long a,
- unsigned long b, unsigned int c) { }
-
-static inline void nop_coherent_kern_range(unsigned long a, unsigned long b) { }
-static inline int nop_coherent_user_range(unsigned long a,
- unsigned long b) { return 0; }
-static inline void nop_flush_kern_dcache_area(void *a, size_t s) { }
-
-static inline void nop_dma_flush_range(const void *a, const void *b) { }
-
-static inline void nop_dma_map_area(const void *s, size_t l, int f) { }
-static inline void nop_dma_unmap_area(const void *s, size_t l, int f) { }
-#endif
-
#ifndef MULTI_CACHE
#define __cpuc_flush_icache_all __glue(_CACHE,_flush_icache_all)
#define __cpuc_flush_kern_all __glue(_CACHE,_flush_kern_cache_all)
+/* This function only has a dedicated assembly callback on the v7 cache */
+#ifdef CONFIG_CPU_CACHE_V7
#define __cpuc_flush_kern_louis __glue(_CACHE,_flush_kern_cache_louis)
+#else
+#define __cpuc_flush_kern_louis __glue(_CACHE,_flush_kern_cache_all)
+#endif
#define __cpuc_flush_user_all __glue(_CACHE,_flush_user_cache_all)
#define __cpuc_flush_user_range __glue(_CACHE,_flush_user_cache_range)
#define __cpuc_coherent_kern_range __glue(_CACHE,_coherent_kern_range)
diff --git a/arch/arm/include/asm/hw_breakpoint.h b/arch/arm/include/asm/hw_breakpoint.h
index 62358d3ca0a8..e7f9961c53b2 100644
--- a/arch/arm/include/asm/hw_breakpoint.h
+++ b/arch/arm/include/asm/hw_breakpoint.h
@@ -84,6 +84,7 @@ static inline void decode_ctrl_reg(u32 reg,
#define ARM_DSCR_MOE(x) ((x >> 2) & 0xf)
#define ARM_ENTRY_BREAKPOINT 0x1
#define ARM_ENTRY_ASYNC_WATCHPOINT 0x2
+#define ARM_ENTRY_CFI_BREAKPOINT 0x3
#define ARM_ENTRY_SYNC_WATCHPOINT 0xa
/* DSCR monitor/halting bits. */
diff --git a/arch/arm/include/asm/pgtable-3level-hwdef.h b/arch/arm/include/asm/pgtable-3level-hwdef.h
index 2f35b4eddaa8..323ad811732e 100644
--- a/arch/arm/include/asm/pgtable-3level-hwdef.h
+++ b/arch/arm/include/asm/pgtable-3level-hwdef.h
@@ -74,6 +74,7 @@
#define PHYS_MASK_SHIFT (40)
#define PHYS_MASK ((1ULL << PHYS_MASK_SHIFT) - 1)
+#ifndef CONFIG_CPU_TTBR0_PAN
/*
* TTBR0/TTBR1 split (PAGE_OFFSET):
* 0x40000000: T0SZ = 2, T1SZ = 0 (not used)
@@ -93,5 +94,30 @@
#endif
#define TTBR1_SIZE (((PAGE_OFFSET >> 30) - 1) << 16)
+#else
+/*
+ * With CONFIG_CPU_TTBR0_PAN enabled, TTBR1 is only used during uaccess
+ * disabled regions when TTBR0 is disabled.
+ */
+#define TTBR1_OFFSET 0 /* pointing to swapper_pg_dir */
+#define TTBR1_SIZE 0 /* TTBR1 size controlled via TTBCR.T0SZ */
+#endif
+
+/*
+ * TTBCR register bits.
+ */
+#define TTBCR_EAE (1 << 31)
+#define TTBCR_IMP (1 << 30)
+#define TTBCR_SH1_MASK (3 << 28)
+#define TTBCR_ORGN1_MASK (3 << 26)
+#define TTBCR_IRGN1_MASK (3 << 24)
+#define TTBCR_EPD1 (1 << 23)
+#define TTBCR_A1 (1 << 22)
+#define TTBCR_T1SZ_MASK (7 << 16)
+#define TTBCR_SH0_MASK (3 << 12)
+#define TTBCR_ORGN0_MASK (3 << 10)
+#define TTBCR_IRGN0_MASK (3 << 8)
+#define TTBCR_EPD0 (1 << 7)
+#define TTBCR_T0SZ_MASK (7 << 0)
#endif
diff --git a/arch/arm/include/asm/proc-fns.h b/arch/arm/include/asm/proc-fns.h
index 280396483f5d..b4986a23d852 100644
--- a/arch/arm/include/asm/proc-fns.h
+++ b/arch/arm/include/asm/proc-fns.h
@@ -178,6 +178,18 @@ extern void cpu_resume(void);
})
#endif
+static inline unsigned int cpu_get_ttbcr(void)
+{
+ unsigned int ttbcr;
+ asm("mrc p15, 0, %0, c2, c0, 2" : "=r" (ttbcr));
+ return ttbcr;
+}
+
+static inline void cpu_set_ttbcr(unsigned int ttbcr)
+{
+ asm volatile("mcr p15, 0, %0, c2, c0, 2" : : "r" (ttbcr) : "memory");
+}
+
#else /*!CONFIG_MMU */
#define cpu_switch_mm(pgd,mm) { }
diff --git a/arch/arm/include/asm/ptrace.h b/arch/arm/include/asm/ptrace.h
index 14a38cc67e0b..6eb311fb2da0 100644
--- a/arch/arm/include/asm/ptrace.h
+++ b/arch/arm/include/asm/ptrace.h
@@ -20,6 +20,7 @@ struct pt_regs {
struct svc_pt_regs {
struct pt_regs regs;
u32 dacr;
+ u32 ttbcr;
};
#define to_svc_pt_regs(r) container_of(r, struct svc_pt_regs, regs)
diff --git a/arch/arm/include/asm/uaccess-asm.h b/arch/arm/include/asm/uaccess-asm.h
index 65da32e1f1c1..4bccd895d954 100644
--- a/arch/arm/include/asm/uaccess-asm.h
+++ b/arch/arm/include/asm/uaccess-asm.h
@@ -39,8 +39,9 @@
#endif
.endm
+#if defined(CONFIG_CPU_SW_DOMAIN_PAN)
+
.macro uaccess_disable, tmp, isb=1
-#ifdef CONFIG_CPU_SW_DOMAIN_PAN
/*
* Whenever we re-enter userspace, the domains should always be
* set appropriately.
@@ -50,11 +51,9 @@
.if \isb
instr_sync
.endif
-#endif
.endm
.macro uaccess_enable, tmp, isb=1
-#ifdef CONFIG_CPU_SW_DOMAIN_PAN
/*
* Whenever we re-enter userspace, the domains should always be
* set appropriately.
@@ -64,15 +63,61 @@
.if \isb
instr_sync
.endif
-#endif
.endm
+#elif defined(CONFIG_CPU_TTBR0_PAN)
+
+ .macro uaccess_disable, tmp, isb=1
+ /*
+ * Disable TTBR0 page table walks (EDP0 = 1), use the reserved ASID
+ * from TTBR1 (A1 = 1) and enable TTBR1 page table walks for kernel
+ * addresses by reducing TTBR0 range to 32MB (T0SZ = 7).
+ */
+ mrc p15, 0, \tmp, c2, c0, 2 @ read TTBCR
+ orr \tmp, \tmp, #TTBCR_EPD0 | TTBCR_T0SZ_MASK
+ orr \tmp, \tmp, #TTBCR_A1
+ mcr p15, 0, \tmp, c2, c0, 2 @ write TTBCR
+ .if \isb
+ instr_sync
+ .endif
+ .endm
+
+ .macro uaccess_enable, tmp, isb=1
+ /*
+ * Enable TTBR0 page table walks (T0SZ = 0, EDP0 = 0) and ASID from
+ * TTBR0 (A1 = 0).
+ */
+ mrc p15, 0, \tmp, c2, c0, 2 @ read TTBCR
+ bic \tmp, \tmp, #TTBCR_EPD0 | TTBCR_T0SZ_MASK
+ bic \tmp, \tmp, #TTBCR_A1
+ mcr p15, 0, \tmp, c2, c0, 2 @ write TTBCR
+ .if \isb
+ instr_sync
+ .endif
+ .endm
+
+#else
+
+ .macro uaccess_disable, tmp, isb=1
+ .endm
+
+ .macro uaccess_enable, tmp, isb=1
+ .endm
+
+#endif
+
#if defined(CONFIG_CPU_SW_DOMAIN_PAN) || defined(CONFIG_CPU_USE_DOMAINS)
#define DACR(x...) x
#else
#define DACR(x...)
#endif
+#ifdef CONFIG_CPU_TTBR0_PAN
+#define PAN(x...) x
+#else
+#define PAN(x...)
+#endif
+
/*
* Save the address limit on entry to a privileged exception.
*
@@ -86,6 +131,8 @@
.macro uaccess_entry, tsk, tmp0, tmp1, tmp2, disable
DACR( mrc p15, 0, \tmp0, c3, c0, 0)
DACR( str \tmp0, [sp, #SVC_DACR])
+ PAN( mrc p15, 0, \tmp0, c2, c0, 2)
+ PAN( str \tmp0, [sp, #SVC_TTBCR])
.if \disable && IS_ENABLED(CONFIG_CPU_SW_DOMAIN_PAN)
/* kernel=client, user=no access */
mov \tmp2, #DACR_UACCESS_DISABLE
@@ -104,8 +151,11 @@
.macro uaccess_exit, tsk, tmp0, tmp1
DACR( ldr \tmp0, [sp, #SVC_DACR])
DACR( mcr p15, 0, \tmp0, c3, c0, 0)
+ PAN( ldr \tmp0, [sp, #SVC_TTBCR])
+ PAN( mcr p15, 0, \tmp0, c2, c0, 2)
.endm
#undef DACR
+#undef PAN
#endif /* __ASM_UACCESS_ASM_H__ */
diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
index 9556d04387f7..6c9c16d767cf 100644
--- a/arch/arm/include/asm/uaccess.h
+++ b/arch/arm/include/asm/uaccess.h
@@ -14,6 +14,8 @@
#include <asm/domain.h>
#include <asm/unaligned.h>
#include <asm/unified.h>
+#include <asm/pgtable.h>
+#include <asm/proc-fns.h>
#include <asm/compiler.h>
#include <asm/extable.h>
@@ -24,9 +26,10 @@
* perform such accesses (eg, via list poison values) which could then
* be exploited for priviledge escalation.
*/
+#if defined(CONFIG_CPU_SW_DOMAIN_PAN)
+
static __always_inline unsigned int uaccess_save_and_enable(void)
{
-#ifdef CONFIG_CPU_SW_DOMAIN_PAN
unsigned int old_domain = get_domain();
/* Set the current domain access to permit user accesses */
@@ -34,19 +37,49 @@ static __always_inline unsigned int uaccess_save_and_enable(void)
domain_val(DOMAIN_USER, DOMAIN_CLIENT));
return old_domain;
-#else
- return 0;
-#endif
}
static __always_inline void uaccess_restore(unsigned int flags)
{
-#ifdef CONFIG_CPU_SW_DOMAIN_PAN
/* Restore the user access mask */
set_domain(flags);
-#endif
}
+#elif defined(CONFIG_CPU_TTBR0_PAN)
+
+static __always_inline unsigned int uaccess_save_and_enable(void)
+{
+ unsigned int old_ttbcr = cpu_get_ttbcr();
+
+ /*
+ * Enable TTBR0 page table walks (T0SZ = 0, EDP0 = 0) and ASID from
+ * TTBR0 (A1 = 0).
+ */
+ cpu_set_ttbcr(old_ttbcr & ~(TTBCR_A1 | TTBCR_EPD0 | TTBCR_T0SZ_MASK));
+ isb();
+
+ return old_ttbcr;
+}
+
+static inline void uaccess_restore(unsigned int flags)
+{
+ cpu_set_ttbcr(flags);
+ isb();
+}
+
+#else
+
+static inline unsigned int uaccess_save_and_enable(void)
+{
+ return 0;
+}
+
+static inline void uaccess_restore(unsigned int flags)
+{
+}
+
+#endif
+
/*
* These two are intentionally not defined anywhere - if the kernel
* code generates any references to them, that's a bug.
diff --git a/arch/arm/kernel/asm-offsets.c b/arch/arm/kernel/asm-offsets.c
index 4915662842ff..4853875740d0 100644
--- a/arch/arm/kernel/asm-offsets.c
+++ b/arch/arm/kernel/asm-offsets.c
@@ -85,6 +85,7 @@ int main(void)
DEFINE(S_OLD_R0, offsetof(struct pt_regs, ARM_ORIG_r0));
DEFINE(PT_REGS_SIZE, sizeof(struct pt_regs));
DEFINE(SVC_DACR, offsetof(struct svc_pt_regs, dacr));
+ DEFINE(SVC_TTBCR, offsetof(struct svc_pt_regs, ttbcr));
DEFINE(SVC_REGS_SIZE, sizeof(struct svc_pt_regs));
BLANK();
DEFINE(SIGFRAME_RC3_OFFSET, offsetof(struct sigframe, retcode[3]));
diff --git a/arch/arm/kernel/entry-ftrace.S b/arch/arm/kernel/entry-ftrace.S
index 3e7bcaca5e07..bc598e3d8dd2 100644
--- a/arch/arm/kernel/entry-ftrace.S
+++ b/arch/arm/kernel/entry-ftrace.S
@@ -271,6 +271,10 @@ ENTRY(ftrace_stub)
ret lr
ENDPROC(ftrace_stub)
+ENTRY(ftrace_stub_graph)
+ ret lr
+ENDPROC(ftrace_stub_graph)
+
#ifdef CONFIG_DYNAMIC_FTRACE
__INIT
diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c
index 054e9199f30d..a12efd0f43e8 100644
--- a/arch/arm/kernel/hw_breakpoint.c
+++ b/arch/arm/kernel/hw_breakpoint.c
@@ -17,6 +17,7 @@
#include <linux/perf_event.h>
#include <linux/hw_breakpoint.h>
#include <linux/smp.h>
+#include <linux/cfi.h>
#include <linux/cpu_pm.h>
#include <linux/coresight.h>
@@ -903,6 +904,37 @@ unlock:
watchpoint_single_step_handler(addr);
}
+#ifdef CONFIG_CFI_CLANG
+static void hw_breakpoint_cfi_handler(struct pt_regs *regs)
+{
+ /*
+ * TODO: implementing target and type to pass to CFI using the more
+ * elaborate report_cfi_failure() requires compiler work. To be able
+ * to properly extract target information the compiler needs to
+ * emit a stable instructions sequence for the CFI checks so we can
+ * decode the instructions preceding the trap and figure out which
+ * registers were used.
+ */
+
+ switch (report_cfi_failure_noaddr(regs, instruction_pointer(regs))) {
+ case BUG_TRAP_TYPE_BUG:
+ die("Oops - CFI", regs, 0);
+ break;
+ case BUG_TRAP_TYPE_WARN:
+ /* Skip the breaking instruction */
+ instruction_pointer(regs) += 4;
+ break;
+ default:
+ die("Unknown CFI error", regs, 0);
+ break;
+ }
+}
+#else
+static void hw_breakpoint_cfi_handler(struct pt_regs *regs)
+{
+}
+#endif
+
/*
* Called from either the Data Abort Handler [watchpoint] or the
* Prefetch Abort Handler [breakpoint] with interrupts disabled.
@@ -932,6 +964,9 @@ static int hw_breakpoint_pending(unsigned long addr, unsigned int fsr,
case ARM_ENTRY_SYNC_WATCHPOINT:
watchpoint_handler(addr, fsr, regs);
break;
+ case ARM_ENTRY_CFI_BREAKPOINT:
+ hw_breakpoint_cfi_handler(regs);
+ break;
default:
ret = 1; /* Unhandled fault. */
}
diff --git a/arch/arm/kernel/suspend.c b/arch/arm/kernel/suspend.c
index c3ec3861dd07..58a6441b58c4 100644
--- a/arch/arm/kernel/suspend.c
+++ b/arch/arm/kernel/suspend.c
@@ -12,6 +12,7 @@
#include <asm/smp_plat.h>
#include <asm/suspend.h>
#include <asm/tlbflush.h>
+#include <asm/uaccess.h>
extern int __cpu_suspend(unsigned long, int (*)(unsigned long), u32 cpuid);
extern void cpu_resume_mmu(void);
@@ -27,6 +28,13 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
return -EINVAL;
/*
+ * Needed for the MMU disabling/enabing code to be able to run from
+ * TTBR0 addresses.
+ */
+ if (IS_ENABLED(CONFIG_CPU_TTBR0_PAN))
+ uaccess_save_and_enable();
+
+ /*
* Function graph tracer state gets incosistent when the kernel
* calls functions that never return (aka suspend finishers) hence
* disable graph tracing during their execution.
diff --git a/arch/arm/lib/csumpartialcopyuser.S b/arch/arm/lib/csumpartialcopyuser.S
index 6928781e6bee..c289bde04743 100644
--- a/arch/arm/lib/csumpartialcopyuser.S
+++ b/arch/arm/lib/csumpartialcopyuser.S
@@ -13,7 +13,8 @@
.text
-#ifdef CONFIG_CPU_SW_DOMAIN_PAN
+#if defined(CONFIG_CPU_SW_DOMAIN_PAN)
+
.macro save_regs
mrc p15, 0, ip, c3, c0, 0
stmfd sp!, {r1, r2, r4 - r8, ip, lr}
@@ -25,7 +26,23 @@
mcr p15, 0, ip, c3, c0, 0
ret lr
.endm
+
+#elif defined(CONFIG_CPU_TTBR0_PAN)
+
+ .macro save_regs
+ mrc p15, 0, ip, c2, c0, 2 @ read TTBCR
+ stmfd sp!, {r1, r2, r4 - r8, ip, lr}
+ uaccess_enable ip
+ .endm
+
+ .macro load_regs
+ ldmfd sp!, {r1, r2, r4 - r8, ip, lr}
+ mcr p15, 0, ip, c2, c0, 2 @ restore TTBCR
+ ret lr
+ .endm
+
#else
+
.macro save_regs
stmfd sp!, {r1, r2, r4 - r8, lr}
.endm
@@ -33,6 +50,7 @@
.macro load_regs
ldmfd sp!, {r1, r2, r4 - r8, pc}
.endm
+
#endif
.macro load1b, reg1
diff --git a/arch/arm/lib/delay-loop.S b/arch/arm/lib/delay-loop.S
index 3ac05177d097..33b08ca1c242 100644
--- a/arch/arm/lib/delay-loop.S
+++ b/arch/arm/lib/delay-loop.S
@@ -5,6 +5,7 @@
* Copyright (C) 1995, 1996 Russell King
*/
#include <linux/linkage.h>
+#include <linux/cfi_types.h>
#include <asm/assembler.h>
#include <asm/delay.h>
@@ -24,21 +25,26 @@
* HZ <= 1000
*/
-ENTRY(__loop_udelay)
+SYM_TYPED_FUNC_START(__loop_udelay)
ldr r2, .LC1
mul r0, r2, r0 @ r0 = delay_us * UDELAY_MULT
-ENTRY(__loop_const_udelay) @ 0 <= r0 <= 0xfffffaf0
+ b __loop_const_udelay
+SYM_FUNC_END(__loop_udelay)
+
+SYM_TYPED_FUNC_START(__loop_const_udelay) @ 0 <= r0 <= 0xfffffaf0
ldr r2, .LC0
ldr r2, [r2]
umull r1, r0, r2, r0 @ r0-r1 = r0 * loops_per_jiffy
adds r1, r1, #0xffffffff @ rounding up ...
adcs r0, r0, r0 @ and right shift by 31
reteq lr
+ b __loop_delay
+SYM_FUNC_END(__loop_const_udelay)
.align 3
@ Delay routine
-ENTRY(__loop_delay)
+SYM_TYPED_FUNC_START(__loop_delay)
subs r0, r0, #1
#if 0
retls lr
@@ -58,6 +64,4 @@ ENTRY(__loop_delay)
#endif
bhi __loop_delay
ret lr
-ENDPROC(__loop_udelay)
-ENDPROC(__loop_const_udelay)
-ENDPROC(__loop_delay)
+SYM_FUNC_END(__loop_delay)
diff --git a/arch/arm/mm/Makefile b/arch/arm/mm/Makefile
index 71b858c9b10c..f1f231f20ff9 100644
--- a/arch/arm/mm/Makefile
+++ b/arch/arm/mm/Makefile
@@ -45,6 +45,7 @@ obj-$(CONFIG_CPU_CACHE_V7) += cache-v7.o
obj-$(CONFIG_CPU_CACHE_FA) += cache-fa.o
obj-$(CONFIG_CPU_CACHE_NOP) += cache-nop.o
obj-$(CONFIG_CPU_CACHE_V7M) += cache-v7m.o
+obj-y += cache.o
obj-$(CONFIG_CPU_COPY_V4WT) += copypage-v4wt.o
obj-$(CONFIG_CPU_COPY_V4WB) += copypage-v4wb.o
@@ -62,6 +63,7 @@ obj-$(CONFIG_CPU_TLB_FEROCEON) += tlb-v4wbi.o # reuse v4wbi TLB functions
obj-$(CONFIG_CPU_TLB_V6) += tlb-v6.o
obj-$(CONFIG_CPU_TLB_V7) += tlb-v7.o
obj-$(CONFIG_CPU_TLB_FA) += tlb-fa.o
+obj-y += tlb.o
obj-$(CONFIG_CPU_ARM7TDMI) += proc-arm7tdmi.o
obj-$(CONFIG_CPU_ARM720T) += proc-arm720.o
@@ -88,6 +90,7 @@ obj-$(CONFIG_CPU_V6) += proc-v6.o
obj-$(CONFIG_CPU_V6K) += proc-v6.o
obj-$(CONFIG_CPU_V7) += proc-v7.o proc-v7-bugs.o
obj-$(CONFIG_CPU_V7M) += proc-v7m.o
+obj-$(CONFIG_CFI_CLANG) += proc.o
obj-$(CONFIG_OUTER_CACHE) += l2c-common.o
obj-$(CONFIG_CACHE_B15_RAC) += cache-b15-rac.o
diff --git a/arch/arm/mm/cache-b15-rac.c b/arch/arm/mm/cache-b15-rac.c
index 9c1172f26885..6f63b90f9e1a 100644
--- a/arch/arm/mm/cache-b15-rac.c
+++ b/arch/arm/mm/cache-b15-rac.c
@@ -5,6 +5,7 @@
* Copyright (C) 2015-2016 Broadcom
*/
+#include <linux/cfi_types.h>
#include <linux/err.h>
#include <linux/spinlock.h>
#include <linux/io.h>
diff --git a/arch/arm/mm/cache-fa.S b/arch/arm/mm/cache-fa.S
index 71c64e92dead..4a3668b52a2d 100644
--- a/arch/arm/mm/cache-fa.S
+++ b/arch/arm/mm/cache-fa.S
@@ -12,6 +12,7 @@
*/
#include <linux/linkage.h>
#include <linux/init.h>
+#include <linux/cfi_types.h>
#include <asm/assembler.h>
#include <asm/page.h>
@@ -39,11 +40,11 @@
*
* Unconditionally clean and invalidate the entire icache.
*/
-ENTRY(fa_flush_icache_all)
+SYM_TYPED_FUNC_START(fa_flush_icache_all)
mov r0, #0
mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
ret lr
-ENDPROC(fa_flush_icache_all)
+SYM_FUNC_END(fa_flush_icache_all)
/*
* flush_user_cache_all()
@@ -51,14 +52,14 @@ ENDPROC(fa_flush_icache_all)
* Clean and invalidate all cache entries in a particular address
* space.
*/
-ENTRY(fa_flush_user_cache_all)
- /* FALLTHROUGH */
+SYM_FUNC_ALIAS(fa_flush_user_cache_all, fa_flush_kern_cache_all)
+
/*
* flush_kern_cache_all()
*
* Clean and invalidate the entire cache.
*/
-ENTRY(fa_flush_kern_cache_all)
+SYM_TYPED_FUNC_START(fa_flush_kern_cache_all)
mov ip, #0
mov r2, #VM_EXEC
__flush_whole_cache:
@@ -69,6 +70,7 @@ __flush_whole_cache:
mcrne p15, 0, ip, c7, c10, 4 @ drain write buffer
mcrne p15, 0, ip, c7, c5, 4 @ prefetch flush
ret lr
+SYM_FUNC_END(fa_flush_kern_cache_all)
/*
* flush_user_cache_range(start, end, flags)
@@ -80,7 +82,7 @@ __flush_whole_cache:
* - end - end address (exclusive, page aligned)
* - flags - vma_area_struct flags describing address space
*/
-ENTRY(fa_flush_user_cache_range)
+SYM_TYPED_FUNC_START(fa_flush_user_cache_range)
mov ip, #0
sub r3, r1, r0 @ calculate total size
cmp r3, #CACHE_DLIMIT @ total size >= limit?
@@ -97,6 +99,7 @@ ENTRY(fa_flush_user_cache_range)
mcrne p15, 0, ip, c7, c10, 4 @ data write barrier
mcrne p15, 0, ip, c7, c5, 4 @ prefetch flush
ret lr
+SYM_FUNC_END(fa_flush_user_cache_range)
/*
* coherent_kern_range(start, end)
@@ -108,8 +111,11 @@ ENTRY(fa_flush_user_cache_range)
* - start - virtual start address
* - end - virtual end address
*/
-ENTRY(fa_coherent_kern_range)
- /* fall through */
+SYM_TYPED_FUNC_START(fa_coherent_kern_range)
+#ifdef CONFIG_CFI_CLANG /* Fallthrough if !CFI */
+ b fa_coherent_user_range
+#endif
+SYM_FUNC_END(fa_coherent_kern_range)
/*
* coherent_user_range(start, end)
@@ -121,7 +127,7 @@ ENTRY(fa_coherent_kern_range)
* - start - virtual start address
* - end - virtual end address
*/
-ENTRY(fa_coherent_user_range)
+SYM_TYPED_FUNC_START(fa_coherent_user_range)
bic r0, r0, #CACHE_DLINESIZE - 1
1: mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D entry
mcr p15, 0, r0, c7, c5, 1 @ invalidate I entry
@@ -133,6 +139,7 @@ ENTRY(fa_coherent_user_range)
mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
mcr p15, 0, r0, c7, c5, 4 @ prefetch flush
ret lr
+SYM_FUNC_END(fa_coherent_user_range)
/*
* flush_kern_dcache_area(void *addr, size_t size)
@@ -143,7 +150,7 @@ ENTRY(fa_coherent_user_range)
* - addr - kernel address
* - size - size of region
*/
-ENTRY(fa_flush_kern_dcache_area)
+SYM_TYPED_FUNC_START(fa_flush_kern_dcache_area)
add r1, r0, r1
1: mcr p15, 0, r0, c7, c14, 1 @ clean & invalidate D line
add r0, r0, #CACHE_DLINESIZE
@@ -153,6 +160,7 @@ ENTRY(fa_flush_kern_dcache_area)
mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
ret lr
+SYM_FUNC_END(fa_flush_kern_dcache_area)
/*
* dma_inv_range(start, end)
@@ -203,7 +211,7 @@ fa_dma_clean_range:
* - start - virtual start address of region
* - end - virtual end address of region
*/
-ENTRY(fa_dma_flush_range)
+SYM_TYPED_FUNC_START(fa_dma_flush_range)
bic r0, r0, #CACHE_DLINESIZE - 1
1: mcr p15, 0, r0, c7, c14, 1 @ clean & invalidate D entry
add r0, r0, #CACHE_DLINESIZE
@@ -212,6 +220,7 @@ ENTRY(fa_dma_flush_range)
mov r0, #0
mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
ret lr
+SYM_FUNC_END(fa_dma_flush_range)
/*
* dma_map_area(start, size, dir)
@@ -219,13 +228,13 @@ ENTRY(fa_dma_flush_range)
* - size - size of region
* - dir - DMA direction
*/
-ENTRY(fa_dma_map_area)
+SYM_TYPED_FUNC_START(fa_dma_map_area)
add r1, r1, r0
cmp r2, #DMA_TO_DEVICE
beq fa_dma_clean_range
bcs fa_dma_inv_range
b fa_dma_flush_range
-ENDPROC(fa_dma_map_area)
+SYM_FUNC_END(fa_dma_map_area)
/*
* dma_unmap_area(start, size, dir)
@@ -233,14 +242,6 @@ ENDPROC(fa_dma_map_area)
* - size - size of region
* - dir - DMA direction
*/
-ENTRY(fa_dma_unmap_area)
+SYM_TYPED_FUNC_START(fa_dma_unmap_area)
ret lr
-ENDPROC(fa_dma_unmap_area)
-
- .globl fa_flush_kern_cache_louis
- .equ fa_flush_kern_cache_louis, fa_flush_kern_cache_all
-
- __INITDATA
-
- @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
- define_cache_functions fa
+SYM_FUNC_END(fa_dma_unmap_area)
diff --git a/arch/arm/mm/cache-nop.S b/arch/arm/mm/cache-nop.S
index 72d939ef8798..f68dde2014ee 100644
--- a/arch/arm/mm/cache-nop.S
+++ b/arch/arm/mm/cache-nop.S
@@ -1,47 +1,52 @@
/* SPDX-License-Identifier: GPL-2.0-only */
#include <linux/linkage.h>
#include <linux/init.h>
+#include <linux/cfi_types.h>
#include <asm/assembler.h>
#include "proc-macros.S"
-ENTRY(nop_flush_icache_all)
+/*
+ * These are all open-coded instead of aliased, to make clear
+ * what is going on here: all functions are stubbed out.
+ */
+SYM_TYPED_FUNC_START(nop_flush_icache_all)
ret lr
-ENDPROC(nop_flush_icache_all)
+SYM_FUNC_END(nop_flush_icache_all)
- .globl nop_flush_kern_cache_all
- .equ nop_flush_kern_cache_all, nop_flush_icache_all
-
- .globl nop_flush_kern_cache_louis
- .equ nop_flush_kern_cache_louis, nop_flush_icache_all
+SYM_TYPED_FUNC_START(nop_flush_kern_cache_all)
+ ret lr
+SYM_FUNC_END(nop_flush_kern_cache_all)
- .globl nop_flush_user_cache_all
- .equ nop_flush_user_cache_all, nop_flush_icache_all
+SYM_TYPED_FUNC_START(nop_flush_user_cache_all)
+ ret lr
+SYM_FUNC_END(nop_flush_user_cache_all)
- .globl nop_flush_user_cache_range
- .equ nop_flush_user_cache_range, nop_flush_icache_all
+SYM_TYPED_FUNC_START(nop_flush_user_cache_range)
+ ret lr
+SYM_FUNC_END(nop_flush_user_cache_range)
- .globl nop_coherent_kern_range
- .equ nop_coherent_kern_range, nop_flush_icache_all
+SYM_TYPED_FUNC_START(nop_coherent_kern_range)
+ ret lr
+SYM_FUNC_END(nop_coherent_kern_range)
-ENTRY(nop_coherent_user_range)
+SYM_TYPED_FUNC_START(nop_coherent_user_range)
mov r0, 0
ret lr
-ENDPROC(nop_coherent_user_range)
-
- .globl nop_flush_kern_dcache_area
- .equ nop_flush_kern_dcache_area, nop_flush_icache_all
+SYM_FUNC_END(nop_coherent_user_range)
- .globl nop_dma_flush_range
- .equ nop_dma_flush_range, nop_flush_icache_all
-
- .globl nop_dma_map_area
- .equ nop_dma_map_area, nop_flush_icache_all
+SYM_TYPED_FUNC_START(nop_flush_kern_dcache_area)
+ ret lr
+SYM_FUNC_END(nop_flush_kern_dcache_area)
- .globl nop_dma_unmap_area
- .equ nop_dma_unmap_area, nop_flush_icache_all
+SYM_TYPED_FUNC_START(nop_dma_flush_range)
+ ret lr
+SYM_FUNC_END(nop_dma_flush_range)
- __INITDATA
+SYM_TYPED_FUNC_START(nop_dma_map_area)
+ ret lr
+SYM_FUNC_END(nop_dma_map_area)
- @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
- define_cache_functions nop
+SYM_TYPED_FUNC_START(nop_dma_unmap_area)
+ ret lr
+SYM_FUNC_END(nop_dma_unmap_area)
diff --git a/arch/arm/mm/cache-v4.S b/arch/arm/mm/cache-v4.S
index 7787057e4990..0e94e5193dbd 100644
--- a/arch/arm/mm/cache-v4.S
+++ b/arch/arm/mm/cache-v4.S
@@ -6,6 +6,7 @@
*/
#include <linux/linkage.h>
#include <linux/init.h>
+#include <linux/cfi_types.h>
#include <asm/assembler.h>
#include <asm/page.h>
#include "proc-macros.S"
@@ -15,9 +16,9 @@
*
* Unconditionally clean and invalidate the entire icache.
*/
-ENTRY(v4_flush_icache_all)
+SYM_TYPED_FUNC_START(v4_flush_icache_all)
ret lr
-ENDPROC(v4_flush_icache_all)
+SYM_FUNC_END(v4_flush_icache_all)
/*
* flush_user_cache_all()
@@ -27,21 +28,22 @@ ENDPROC(v4_flush_icache_all)
*
* - mm - mm_struct describing address space
*/
-ENTRY(v4_flush_user_cache_all)
- /* FALLTHROUGH */
+SYM_FUNC_ALIAS(v4_flush_user_cache_all, v4_flush_kern_cache_all)
+
/*
* flush_kern_cache_all()
*
* Clean and invalidate the entire cache.
*/
-ENTRY(v4_flush_kern_cache_all)
+SYM_TYPED_FUNC_START(v4_flush_kern_cache_all)
#ifdef CONFIG_CPU_CP15
mov r0, #0
mcr p15, 0, r0, c7, c7, 0 @ flush ID cache
ret lr
#else
- /* FALLTHROUGH */
+ ret lr
#endif
+SYM_FUNC_END(v4_flush_kern_cache_all)
/*
* flush_user_cache_range(start, end, flags)
@@ -53,14 +55,15 @@ ENTRY(v4_flush_kern_cache_all)
* - end - end address (exclusive, may not be aligned)
* - flags - vma_area_struct flags describing address space
*/
-ENTRY(v4_flush_user_cache_range)
+SYM_TYPED_FUNC_START(v4_flush_user_cache_range)
#ifdef CONFIG_CPU_CP15
mov ip, #0
mcr p15, 0, ip, c7, c7, 0 @ flush ID cache
ret lr
#else
- /* FALLTHROUGH */
+ ret lr
#endif
+SYM_FUNC_END(v4_flush_user_cache_range)
/*
* coherent_kern_range(start, end)
@@ -72,8 +75,9 @@ ENTRY(v4_flush_user_cache_range)
* - start - virtual start address
* - end - virtual end address
*/
-ENTRY(v4_coherent_kern_range)
- /* FALLTHROUGH */
+SYM_TYPED_FUNC_START(v4_coherent_kern_range)
+ ret lr
+SYM_FUNC_END(v4_coherent_kern_range)
/*
* coherent_user_range(start, end)
@@ -85,9 +89,10 @@ ENTRY(v4_coherent_kern_range)
* - start - virtual start address
* - end - virtual end address
*/
-ENTRY(v4_coherent_user_range)
+SYM_TYPED_FUNC_START(v4_coherent_user_range)
mov r0, #0
ret lr
+SYM_FUNC_END(v4_coherent_user_range)
/*
* flush_kern_dcache_area(void *addr, size_t size)
@@ -98,8 +103,11 @@ ENTRY(v4_coherent_user_range)
* - addr - kernel address
* - size - region size
*/
-ENTRY(v4_flush_kern_dcache_area)
- /* FALLTHROUGH */
+SYM_TYPED_FUNC_START(v4_flush_kern_dcache_area)
+#ifdef CONFIG_CFI_CLANG /* Fallthrough if !CFI */
+ b v4_dma_flush_range
+#endif
+SYM_FUNC_END(v4_flush_kern_dcache_area)
/*
* dma_flush_range(start, end)
@@ -109,12 +117,13 @@ ENTRY(v4_flush_kern_dcache_area)
* - start - virtual start address
* - end - virtual end address
*/
-ENTRY(v4_dma_flush_range)
+SYM_TYPED_FUNC_START(v4_dma_flush_range)
#ifdef CONFIG_CPU_CP15
mov r0, #0
mcr p15, 0, r0, c7, c7, 0 @ flush ID cache
#endif
ret lr
+SYM_FUNC_END(v4_dma_flush_range)
/*
* dma_unmap_area(start, size, dir)
@@ -122,10 +131,11 @@ ENTRY(v4_dma_flush_range)
* - size - size of region
* - dir - DMA direction
*/
-ENTRY(v4_dma_unmap_area)
+SYM_TYPED_FUNC_START(v4_dma_unmap_area)
teq r2, #DMA_TO_DEVICE
bne v4_dma_flush_range
- /* FALLTHROUGH */
+ ret lr
+SYM_FUNC_END(v4_dma_unmap_area)
/*
* dma_map_area(start, size, dir)
@@ -133,15 +143,6 @@ ENTRY(v4_dma_unmap_area)
* - size - size of region
* - dir - DMA direction
*/
-ENTRY(v4_dma_map_area)
+SYM_TYPED_FUNC_START(v4_dma_map_area)
ret lr
-ENDPROC(v4_dma_unmap_area)
-ENDPROC(v4_dma_map_area)
-
- .globl v4_flush_kern_cache_louis
- .equ v4_flush_kern_cache_louis, v4_flush_kern_cache_all
-
- __INITDATA
-
- @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
- define_cache_functions v4
+SYM_FUNC_END(v4_dma_map_area)
diff --git a/arch/arm/mm/cache-v4wb.S b/arch/arm/mm/cache-v4wb.S
index ad382cee0fdb..ce55a2eef5da 100644
--- a/arch/arm/mm/cache-v4wb.S
+++ b/arch/arm/mm/cache-v4wb.S
@@ -6,6 +6,7 @@
*/
#include <linux/linkage.h>
#include <linux/init.h>
+#include <linux/cfi_types.h>
#include <asm/assembler.h>
#include <asm/page.h>
#include "proc-macros.S"
@@ -53,11 +54,11 @@ flush_base:
*
* Unconditionally clean and invalidate the entire icache.
*/
-ENTRY(v4wb_flush_icache_all)
+SYM_TYPED_FUNC_START(v4wb_flush_icache_all)
mov r0, #0
mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
ret lr
-ENDPROC(v4wb_flush_icache_all)
+SYM_FUNC_END(v4wb_flush_icache_all)
/*
* flush_user_cache_all()
@@ -65,14 +66,14 @@ ENDPROC(v4wb_flush_icache_all)
* Clean and invalidate all cache entries in a particular address
* space.
*/
-ENTRY(v4wb_flush_user_cache_all)
- /* FALLTHROUGH */
+SYM_FUNC_ALIAS(v4wb_flush_user_cache_all, v4wb_flush_kern_cache_all)
+
/*
* flush_kern_cache_all()
*
* Clean and invalidate the entire cache.
*/
-ENTRY(v4wb_flush_kern_cache_all)
+SYM_TYPED_FUNC_START(v4wb_flush_kern_cache_all)
mov ip, #0
mcr p15, 0, ip, c7, c5, 0 @ invalidate I cache
__flush_whole_cache:
@@ -93,6 +94,7 @@ __flush_whole_cache:
#endif
mcr p15, 0, ip, c7, c10, 4 @ drain write buffer
ret lr
+SYM_FUNC_END(v4wb_flush_kern_cache_all)
/*
* flush_user_cache_range(start, end, flags)
@@ -104,7 +106,7 @@ __flush_whole_cache:
* - end - end address (exclusive, page aligned)
* - flags - vma_area_struct flags describing address space
*/
-ENTRY(v4wb_flush_user_cache_range)
+SYM_TYPED_FUNC_START(v4wb_flush_user_cache_range)
mov ip, #0
sub r3, r1, r0 @ calculate total size
tst r2, #VM_EXEC @ executable region?
@@ -121,6 +123,7 @@ ENTRY(v4wb_flush_user_cache_range)
tst r2, #VM_EXEC
mcrne p15, 0, ip, c7, c10, 4 @ drain write buffer
ret lr
+SYM_FUNC_END(v4wb_flush_user_cache_range)
/*
* flush_kern_dcache_area(void *addr, size_t size)
@@ -131,9 +134,12 @@ ENTRY(v4wb_flush_user_cache_range)
* - addr - kernel address
* - size - region size
*/
-ENTRY(v4wb_flush_kern_dcache_area)
+SYM_TYPED_FUNC_START(v4wb_flush_kern_dcache_area)
add r1, r0, r1
- /* fall through */
+#ifdef CONFIG_CFI_CLANG /* Fallthrough if !CFI */
+ b v4wb_coherent_user_range
+#endif
+SYM_FUNC_END(v4wb_flush_kern_dcache_area)
/*
* coherent_kern_range(start, end)
@@ -145,8 +151,11 @@ ENTRY(v4wb_flush_kern_dcache_area)
* - start - virtual start address
* - end - virtual end address
*/
-ENTRY(v4wb_coherent_kern_range)
- /* fall through */
+SYM_TYPED_FUNC_START(v4wb_coherent_kern_range)
+#ifdef CONFIG_CFI_CLANG /* Fallthrough if !CFI */
+ b v4wb_coherent_user_range
+#endif
+SYM_FUNC_END(v4wb_coherent_kern_range)
/*
* coherent_user_range(start, end)
@@ -158,7 +167,7 @@ ENTRY(v4wb_coherent_kern_range)
* - start - virtual start address
* - end - virtual end address
*/
-ENTRY(v4wb_coherent_user_range)
+SYM_TYPED_FUNC_START(v4wb_coherent_user_range)
bic r0, r0, #CACHE_DLINESIZE - 1
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry
@@ -169,7 +178,7 @@ ENTRY(v4wb_coherent_user_range)
mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
mcr p15, 0, r0, c7, c10, 4 @ drain WB
ret lr
-
+SYM_FUNC_END(v4wb_coherent_user_range)
/*
* dma_inv_range(start, end)
@@ -231,13 +240,13 @@ v4wb_dma_clean_range:
* - size - size of region
* - dir - DMA direction
*/
-ENTRY(v4wb_dma_map_area)
+SYM_TYPED_FUNC_START(v4wb_dma_map_area)
add r1, r1, r0
cmp r2, #DMA_TO_DEVICE
beq v4wb_dma_clean_range
bcs v4wb_dma_inv_range
b v4wb_dma_flush_range
-ENDPROC(v4wb_dma_map_area)
+SYM_FUNC_END(v4wb_dma_map_area)
/*
* dma_unmap_area(start, size, dir)
@@ -245,14 +254,6 @@ ENDPROC(v4wb_dma_map_area)
* - size - size of region
* - dir - DMA direction
*/
-ENTRY(v4wb_dma_unmap_area)
+SYM_TYPED_FUNC_START(v4wb_dma_unmap_area)
ret lr
-ENDPROC(v4wb_dma_unmap_area)
-
- .globl v4wb_flush_kern_cache_louis
- .equ v4wb_flush_kern_cache_louis, v4wb_flush_kern_cache_all
-
- __INITDATA
-
- @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
- define_cache_functions v4wb
+SYM_FUNC_END(v4wb_dma_unmap_area)
diff --git a/arch/arm/mm/cache-v4wt.S b/arch/arm/mm/cache-v4wt.S
index 0b290c25a99d..a97dc267b3b0 100644
--- a/arch/arm/mm/cache-v4wt.S
+++ b/arch/arm/mm/cache-v4wt.S
@@ -10,6 +10,7 @@
*/
#include <linux/linkage.h>
#include <linux/init.h>
+#include <linux/cfi_types.h>
#include <asm/assembler.h>
#include <asm/page.h>
#include "proc-macros.S"
@@ -43,11 +44,11 @@
*
* Unconditionally clean and invalidate the entire icache.
*/
-ENTRY(v4wt_flush_icache_all)
+SYM_TYPED_FUNC_START(v4wt_flush_icache_all)
mov r0, #0
mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
ret lr
-ENDPROC(v4wt_flush_icache_all)
+SYM_FUNC_END(v4wt_flush_icache_all)
/*
* flush_user_cache_all()
@@ -55,14 +56,14 @@ ENDPROC(v4wt_flush_icache_all)
* Invalidate all cache entries in a particular address
* space.
*/
-ENTRY(v4wt_flush_user_cache_all)
- /* FALLTHROUGH */
+SYM_FUNC_ALIAS(v4wt_flush_user_cache_all, v4wt_flush_kern_cache_all)
+
/*
* flush_kern_cache_all()
*
* Clean and invalidate the entire cache.
*/
-ENTRY(v4wt_flush_kern_cache_all)
+SYM_TYPED_FUNC_START(v4wt_flush_kern_cache_all)
mov r2, #VM_EXEC
mov ip, #0
__flush_whole_cache:
@@ -70,6 +71,7 @@ __flush_whole_cache:
mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache
mcr p15, 0, ip, c7, c6, 0 @ invalidate D cache
ret lr
+SYM_FUNC_END(v4wt_flush_kern_cache_all)
/*
* flush_user_cache_range(start, end, flags)
@@ -81,7 +83,7 @@ __flush_whole_cache:
* - end - end address (exclusive, page aligned)
* - flags - vma_area_struct flags describing address space
*/
-ENTRY(v4wt_flush_user_cache_range)
+SYM_TYPED_FUNC_START(v4wt_flush_user_cache_range)
sub r3, r1, r0 @ calculate total size
cmp r3, #CACHE_DLIMIT
bhs __flush_whole_cache
@@ -93,6 +95,7 @@ ENTRY(v4wt_flush_user_cache_range)
cmp r0, r1
blo 1b
ret lr
+SYM_FUNC_END(v4wt_flush_user_cache_range)
/*
* coherent_kern_range(start, end)
@@ -104,8 +107,11 @@ ENTRY(v4wt_flush_user_cache_range)
* - start - virtual start address
* - end - virtual end address
*/
-ENTRY(v4wt_coherent_kern_range)
- /* FALLTRHOUGH */
+SYM_TYPED_FUNC_START(v4wt_coherent_kern_range)
+#ifdef CONFIG_CFI_CLANG /* Fallthrough if !CFI */
+ b v4wt_coherent_user_range
+#endif
+SYM_FUNC_END(v4wt_coherent_kern_range)
/*
* coherent_user_range(start, end)
@@ -117,7 +123,7 @@ ENTRY(v4wt_coherent_kern_range)
* - start - virtual start address
* - end - virtual end address
*/
-ENTRY(v4wt_coherent_user_range)
+SYM_TYPED_FUNC_START(v4wt_coherent_user_range)
bic r0, r0, #CACHE_DLINESIZE - 1
1: mcr p15, 0, r0, c7, c5, 1 @ invalidate I entry
add r0, r0, #CACHE_DLINESIZE
@@ -125,6 +131,7 @@ ENTRY(v4wt_coherent_user_range)
blo 1b
mov r0, #0
ret lr
+SYM_FUNC_END(v4wt_coherent_user_range)
/*
* flush_kern_dcache_area(void *addr, size_t size)
@@ -135,11 +142,12 @@ ENTRY(v4wt_coherent_user_range)
* - addr - kernel address
* - size - region size
*/
-ENTRY(v4wt_flush_kern_dcache_area)
+SYM_TYPED_FUNC_START(v4wt_flush_kern_dcache_area)
mov r2, #0
mcr p15, 0, r2, c7, c5, 0 @ invalidate I cache
add r1, r0, r1
- /* fallthrough */
+ b v4wt_dma_inv_range
+SYM_FUNC_END(v4wt_flush_kern_dcache_area)
/*
* dma_inv_range(start, end)
@@ -167,9 +175,10 @@ v4wt_dma_inv_range:
*
* - start - virtual start address
* - end - virtual end address
- */
- .globl v4wt_dma_flush_range
- .equ v4wt_dma_flush_range, v4wt_dma_inv_range
+*/
+SYM_TYPED_FUNC_START(v4wt_dma_flush_range)
+ b v4wt_dma_inv_range
+SYM_FUNC_END(v4wt_dma_flush_range)
/*
* dma_unmap_area(start, size, dir)
@@ -177,11 +186,12 @@ v4wt_dma_inv_range:
* - size - size of region
* - dir - DMA direction
*/
-ENTRY(v4wt_dma_unmap_area)
+SYM_TYPED_FUNC_START(v4wt_dma_unmap_area)
add r1, r1, r0
teq r2, #DMA_TO_DEVICE
bne v4wt_dma_inv_range
- /* FALLTHROUGH */
+ ret lr
+SYM_FUNC_END(v4wt_dma_unmap_area)
/*
* dma_map_area(start, size, dir)
@@ -189,15 +199,6 @@ ENTRY(v4wt_dma_unmap_area)
* - size - size of region
* - dir - DMA direction
*/
-ENTRY(v4wt_dma_map_area)
+SYM_TYPED_FUNC_START(v4wt_dma_map_area)
ret lr
-ENDPROC(v4wt_dma_unmap_area)
-ENDPROC(v4wt_dma_map_area)
-
- .globl v4wt_flush_kern_cache_louis
- .equ v4wt_flush_kern_cache_louis, v4wt_flush_kern_cache_all
-
- __INITDATA
-
- @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
- define_cache_functions v4wt
+SYM_FUNC_END(v4wt_dma_map_area)
diff --git a/arch/arm/mm/cache-v6.S b/arch/arm/mm/cache-v6.S
index 44211d8a296f..9f415476e218 100644
--- a/arch/arm/mm/cache-v6.S
+++ b/arch/arm/mm/cache-v6.S
@@ -8,6 +8,7 @@
*/
#include <linux/linkage.h>
#include <linux/init.h>
+#include <linux/cfi_types.h>
#include <asm/assembler.h>
#include <asm/errno.h>
#include <asm/unwind.h>
@@ -34,7 +35,7 @@
* r0 - set to 0
* r1 - corrupted
*/
-ENTRY(v6_flush_icache_all)
+SYM_TYPED_FUNC_START(v6_flush_icache_all)
mov r0, #0
#ifdef CONFIG_ARM_ERRATA_411920
mrs r1, cpsr
@@ -51,7 +52,7 @@ ENTRY(v6_flush_icache_all)
mcr p15, 0, r0, c7, c5, 0 @ invalidate I-cache
#endif
ret lr
-ENDPROC(v6_flush_icache_all)
+SYM_FUNC_END(v6_flush_icache_all)
/*
* v6_flush_cache_all()
@@ -60,7 +61,7 @@ ENDPROC(v6_flush_icache_all)
*
* It is assumed that:
*/
-ENTRY(v6_flush_kern_cache_all)
+SYM_TYPED_FUNC_START(v6_flush_kern_cache_all)
mov r0, #0
#ifdef HARVARD_CACHE
mcr p15, 0, r0, c7, c14, 0 @ D cache clean+invalidate
@@ -73,6 +74,7 @@ ENTRY(v6_flush_kern_cache_all)
mcr p15, 0, r0, c7, c15, 0 @ Cache clean+invalidate
#endif
ret lr
+SYM_FUNC_END(v6_flush_kern_cache_all)
/*
* v6_flush_cache_all()
@@ -81,8 +83,9 @@ ENTRY(v6_flush_kern_cache_all)
*
* - mm - mm_struct describing address space
*/
-ENTRY(v6_flush_user_cache_all)
- /*FALLTHROUGH*/
+SYM_TYPED_FUNC_START(v6_flush_user_cache_all)
+ ret lr
+SYM_FUNC_END(v6_flush_user_cache_all)
/*
* v6_flush_cache_range(start, end, flags)
@@ -96,8 +99,9 @@ ENTRY(v6_flush_user_cache_all)
* It is assumed that:
* - we have a VIPT cache.
*/
-ENTRY(v6_flush_user_cache_range)
+SYM_TYPED_FUNC_START(v6_flush_user_cache_range)
ret lr
+SYM_FUNC_END(v6_flush_user_cache_range)
/*
* v6_coherent_kern_range(start,end)
@@ -112,8 +116,11 @@ ENTRY(v6_flush_user_cache_range)
* It is assumed that:
* - the Icache does not read data from the write buffer
*/
-ENTRY(v6_coherent_kern_range)
- /* FALLTHROUGH */
+SYM_TYPED_FUNC_START(v6_coherent_kern_range)
+#ifdef CONFIG_CFI_CLANG /* Fallthrough if !CFI */
+ b v6_coherent_user_range
+#endif
+SYM_FUNC_END(v6_coherent_kern_range)
/*
* v6_coherent_user_range(start,end)
@@ -128,7 +135,7 @@ ENTRY(v6_coherent_kern_range)
* It is assumed that:
* - the Icache does not read data from the write buffer
*/
-ENTRY(v6_coherent_user_range)
+SYM_TYPED_FUNC_START(v6_coherent_user_range)
UNWIND(.fnstart )
#ifdef HARVARD_CACHE
bic r0, r0, #CACHE_LINE_SIZE - 1
@@ -159,8 +166,7 @@ ENTRY(v6_coherent_user_range)
mov r0, #-EFAULT
ret lr
UNWIND(.fnend )
-ENDPROC(v6_coherent_user_range)
-ENDPROC(v6_coherent_kern_range)
+SYM_FUNC_END(v6_coherent_user_range)
/*
* v6_flush_kern_dcache_area(void *addr, size_t size)
@@ -171,7 +177,7 @@ ENDPROC(v6_coherent_kern_range)
* - addr - kernel address
* - size - region size
*/
-ENTRY(v6_flush_kern_dcache_area)
+SYM_TYPED_FUNC_START(v6_flush_kern_dcache_area)
add r1, r0, r1
bic r0, r0, #D_CACHE_LINE_SIZE - 1
1:
@@ -188,7 +194,7 @@ ENTRY(v6_flush_kern_dcache_area)
mcr p15, 0, r0, c7, c10, 4
#endif
ret lr
-
+SYM_FUNC_END(v6_flush_kern_dcache_area)
/*
* v6_dma_inv_range(start,end)
@@ -253,7 +259,7 @@ v6_dma_clean_range:
* - start - virtual start address of region
* - end - virtual end address of region
*/
-ENTRY(v6_dma_flush_range)
+SYM_TYPED_FUNC_START(v6_dma_flush_range)
bic r0, r0, #D_CACHE_LINE_SIZE - 1
1:
#ifdef HARVARD_CACHE
@@ -267,6 +273,7 @@ ENTRY(v6_dma_flush_range)
mov r0, #0
mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
ret lr
+SYM_FUNC_END(v6_dma_flush_range)
/*
* dma_map_area(start, size, dir)
@@ -274,12 +281,12 @@ ENTRY(v6_dma_flush_range)
* - size - size of region
* - dir - DMA direction
*/
-ENTRY(v6_dma_map_area)
+SYM_TYPED_FUNC_START(v6_dma_map_area)
add r1, r1, r0
teq r2, #DMA_FROM_DEVICE
beq v6_dma_inv_range
b v6_dma_clean_range
-ENDPROC(v6_dma_map_area)
+SYM_FUNC_END(v6_dma_map_area)
/*
* dma_unmap_area(start, size, dir)
@@ -287,17 +294,9 @@ ENDPROC(v6_dma_map_area)
* - size - size of region
* - dir - DMA direction
*/
-ENTRY(v6_dma_unmap_area)
+SYM_TYPED_FUNC_START(v6_dma_unmap_area)
add r1, r1, r0
teq r2, #DMA_TO_DEVICE
bne v6_dma_inv_range
ret lr
-ENDPROC(v6_dma_unmap_area)
-
- .globl v6_flush_kern_cache_louis
- .equ v6_flush_kern_cache_louis, v6_flush_kern_cache_all
-
- __INITDATA
-
- @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
- define_cache_functions v6
+SYM_FUNC_END(v6_dma_unmap_area)
diff --git a/arch/arm/mm/cache-v7.S b/arch/arm/mm/cache-v7.S
index 127afe2096ba..201ca05436fa 100644
--- a/arch/arm/mm/cache-v7.S
+++ b/arch/arm/mm/cache-v7.S
@@ -9,6 +9,7 @@
*/
#include <linux/linkage.h>
#include <linux/init.h>
+#include <linux/cfi_types.h>
#include <asm/assembler.h>
#include <asm/errno.h>
#include <asm/unwind.h>
@@ -80,12 +81,12 @@ ENDPROC(v7_invalidate_l1)
* Registers:
* r0 - set to 0
*/
-ENTRY(v7_flush_icache_all)
+SYM_TYPED_FUNC_START(v7_flush_icache_all)
mov r0, #0
ALT_SMP(mcr p15, 0, r0, c7, c1, 0) @ invalidate I-cache inner shareable
ALT_UP(mcr p15, 0, r0, c7, c5, 0) @ I+BTB cache invalidate
ret lr
-ENDPROC(v7_flush_icache_all)
+SYM_FUNC_END(v7_flush_icache_all)
/*
* v7_flush_dcache_louis()
@@ -193,7 +194,7 @@ ENDPROC(v7_flush_dcache_all)
* unification in a single instruction.
*
*/
-ENTRY(v7_flush_kern_cache_all)
+SYM_TYPED_FUNC_START(v7_flush_kern_cache_all)
stmfd sp!, {r4-r6, r9-r10, lr}
bl v7_flush_dcache_all
mov r0, #0
@@ -201,7 +202,7 @@ ENTRY(v7_flush_kern_cache_all)
ALT_UP(mcr p15, 0, r0, c7, c5, 0) @ I+BTB cache invalidate
ldmfd sp!, {r4-r6, r9-r10, lr}
ret lr
-ENDPROC(v7_flush_kern_cache_all)
+SYM_FUNC_END(v7_flush_kern_cache_all)
/*
* v7_flush_kern_cache_louis(void)
@@ -209,7 +210,7 @@ ENDPROC(v7_flush_kern_cache_all)
* Flush the data cache up to Level of Unification Inner Shareable.
* Invalidate the I-cache to the point of unification.
*/
-ENTRY(v7_flush_kern_cache_louis)
+SYM_TYPED_FUNC_START(v7_flush_kern_cache_louis)
stmfd sp!, {r4-r6, r9-r10, lr}
bl v7_flush_dcache_louis
mov r0, #0
@@ -217,7 +218,7 @@ ENTRY(v7_flush_kern_cache_louis)
ALT_UP(mcr p15, 0, r0, c7, c5, 0) @ I+BTB cache invalidate
ldmfd sp!, {r4-r6, r9-r10, lr}
ret lr
-ENDPROC(v7_flush_kern_cache_louis)
+SYM_FUNC_END(v7_flush_kern_cache_louis)
/*
* v7_flush_cache_all()
@@ -226,8 +227,9 @@ ENDPROC(v7_flush_kern_cache_louis)
*
* - mm - mm_struct describing address space
*/
-ENTRY(v7_flush_user_cache_all)
- /*FALLTHROUGH*/
+SYM_TYPED_FUNC_START(v7_flush_user_cache_all)
+ ret lr
+SYM_FUNC_END(v7_flush_user_cache_all)
/*
* v7_flush_cache_range(start, end, flags)
@@ -241,10 +243,9 @@ ENTRY(v7_flush_user_cache_all)
* It is assumed that:
* - we have a VIPT cache.
*/
-ENTRY(v7_flush_user_cache_range)
+SYM_TYPED_FUNC_START(v7_flush_user_cache_range)
ret lr
-ENDPROC(v7_flush_user_cache_all)
-ENDPROC(v7_flush_user_cache_range)
+SYM_FUNC_END(v7_flush_user_cache_range)
/*
* v7_coherent_kern_range(start,end)
@@ -259,8 +260,11 @@ ENDPROC(v7_flush_user_cache_range)
* It is assumed that:
* - the Icache does not read data from the write buffer
*/
-ENTRY(v7_coherent_kern_range)
- /* FALLTHROUGH */
+SYM_TYPED_FUNC_START(v7_coherent_kern_range)
+#ifdef CONFIG_CFI_CLANG /* Fallthrough if !CFI */
+ b v7_coherent_user_range
+#endif
+SYM_FUNC_END(v7_coherent_kern_range)
/*
* v7_coherent_user_range(start,end)
@@ -275,7 +279,7 @@ ENTRY(v7_coherent_kern_range)
* It is assumed that:
* - the Icache does not read data from the write buffer
*/
-ENTRY(v7_coherent_user_range)
+SYM_TYPED_FUNC_START(v7_coherent_user_range)
UNWIND(.fnstart )
dcache_line_size r2, r3
sub r3, r2, #1
@@ -321,8 +325,7 @@ ENTRY(v7_coherent_user_range)
mov r0, #-EFAULT
ret lr
UNWIND(.fnend )
-ENDPROC(v7_coherent_kern_range)
-ENDPROC(v7_coherent_user_range)
+SYM_FUNC_END(v7_coherent_user_range)
/*
* v7_flush_kern_dcache_area(void *addr, size_t size)
@@ -333,7 +336,7 @@ ENDPROC(v7_coherent_user_range)
* - addr - kernel address
* - size - region size
*/
-ENTRY(v7_flush_kern_dcache_area)
+SYM_TYPED_FUNC_START(v7_flush_kern_dcache_area)
dcache_line_size r2, r3
add r1, r0, r1
sub r3, r2, #1
@@ -349,7 +352,7 @@ ENTRY(v7_flush_kern_dcache_area)
blo 1b
dsb st
ret lr
-ENDPROC(v7_flush_kern_dcache_area)
+SYM_FUNC_END(v7_flush_kern_dcache_area)
/*
* v7_dma_inv_range(start,end)
@@ -413,7 +416,7 @@ ENDPROC(v7_dma_clean_range)
* - start - virtual start address of region
* - end - virtual end address of region
*/
-ENTRY(v7_dma_flush_range)
+SYM_TYPED_FUNC_START(v7_dma_flush_range)
dcache_line_size r2, r3
sub r3, r2, #1
bic r0, r0, r3
@@ -428,7 +431,7 @@ ENTRY(v7_dma_flush_range)
blo 1b
dsb st
ret lr
-ENDPROC(v7_dma_flush_range)
+SYM_FUNC_END(v7_dma_flush_range)
/*
* dma_map_area(start, size, dir)
@@ -436,12 +439,12 @@ ENDPROC(v7_dma_flush_range)
* - size - size of region
* - dir - DMA direction
*/
-ENTRY(v7_dma_map_area)
+SYM_TYPED_FUNC_START(v7_dma_map_area)
add r1, r1, r0
teq r2, #DMA_FROM_DEVICE
beq v7_dma_inv_range
b v7_dma_clean_range
-ENDPROC(v7_dma_map_area)
+SYM_FUNC_END(v7_dma_map_area)
/*
* dma_unmap_area(start, size, dir)
@@ -449,34 +452,9 @@ ENDPROC(v7_dma_map_area)
* - size - size of region
* - dir - DMA direction
*/
-ENTRY(v7_dma_unmap_area)
+SYM_TYPED_FUNC_START(v7_dma_unmap_area)
add r1, r1, r0
teq r2, #DMA_TO_DEVICE
bne v7_dma_inv_range
ret lr
-ENDPROC(v7_dma_unmap_area)
-
- __INITDATA
-
- @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
- define_cache_functions v7
-
- /* The Broadcom Brahma-B15 read-ahead cache requires some modifications
- * to the v7_cache_fns, we only override the ones we need
- */
-#ifndef CONFIG_CACHE_B15_RAC
- globl_equ b15_flush_kern_cache_all, v7_flush_kern_cache_all
-#endif
- globl_equ b15_flush_icache_all, v7_flush_icache_all
- globl_equ b15_flush_kern_cache_louis, v7_flush_kern_cache_louis
- globl_equ b15_flush_user_cache_all, v7_flush_user_cache_all
- globl_equ b15_flush_user_cache_range, v7_flush_user_cache_range
- globl_equ b15_coherent_kern_range, v7_coherent_kern_range
- globl_equ b15_coherent_user_range, v7_coherent_user_range
- globl_equ b15_flush_kern_dcache_area, v7_flush_kern_dcache_area
-
- globl_equ b15_dma_map_area, v7_dma_map_area
- globl_equ b15_dma_unmap_area, v7_dma_unmap_area
- globl_equ b15_dma_flush_range, v7_dma_flush_range
-
- define_cache_functions b15
+SYM_FUNC_END(v7_dma_unmap_area)
diff --git a/arch/arm/mm/cache-v7m.S b/arch/arm/mm/cache-v7m.S
index eb60b5e5e2ad..14d719eba729 100644
--- a/arch/arm/mm/cache-v7m.S
+++ b/arch/arm/mm/cache-v7m.S
@@ -11,6 +11,7 @@
*/
#include <linux/linkage.h>
#include <linux/init.h>
+#include <linux/cfi_types.h>
#include <asm/assembler.h>
#include <asm/errno.h>
#include <asm/unwind.h>
@@ -159,10 +160,10 @@ ENDPROC(v7m_invalidate_l1)
* Registers:
* r0 - set to 0
*/
-ENTRY(v7m_flush_icache_all)
+SYM_TYPED_FUNC_START(v7m_flush_icache_all)
invalidate_icache r0
ret lr
-ENDPROC(v7m_flush_icache_all)
+SYM_FUNC_END(v7m_flush_icache_all)
/*
* v7m_flush_dcache_all()
@@ -236,13 +237,13 @@ ENDPROC(v7m_flush_dcache_all)
* unification in a single instruction.
*
*/
-ENTRY(v7m_flush_kern_cache_all)
+SYM_TYPED_FUNC_START(v7m_flush_kern_cache_all)
stmfd sp!, {r4-r7, r9-r11, lr}
bl v7m_flush_dcache_all
invalidate_icache r0
ldmfd sp!, {r4-r7, r9-r11, lr}
ret lr
-ENDPROC(v7m_flush_kern_cache_all)
+SYM_FUNC_END(v7m_flush_kern_cache_all)
/*
* v7m_flush_cache_all()
@@ -251,8 +252,9 @@ ENDPROC(v7m_flush_kern_cache_all)
*
* - mm - mm_struct describing address space
*/
-ENTRY(v7m_flush_user_cache_all)
- /*FALLTHROUGH*/
+SYM_TYPED_FUNC_START(v7m_flush_user_cache_all)
+ ret lr
+SYM_FUNC_END(v7m_flush_user_cache_all)
/*
* v7m_flush_cache_range(start, end, flags)
@@ -266,10 +268,9 @@ ENTRY(v7m_flush_user_cache_all)
* It is assumed that:
* - we have a VIPT cache.
*/
-ENTRY(v7m_flush_user_cache_range)
+SYM_TYPED_FUNC_START(v7m_flush_user_cache_range)
ret lr
-ENDPROC(v7m_flush_user_cache_all)
-ENDPROC(v7m_flush_user_cache_range)
+SYM_FUNC_END(v7m_flush_user_cache_range)
/*
* v7m_coherent_kern_range(start,end)
@@ -284,8 +285,11 @@ ENDPROC(v7m_flush_user_cache_range)
* It is assumed that:
* - the Icache does not read data from the write buffer
*/
-ENTRY(v7m_coherent_kern_range)
- /* FALLTHROUGH */
+SYM_TYPED_FUNC_START(v7m_coherent_kern_range)
+#ifdef CONFIG_CFI_CLANG /* Fallthrough if !CFI */
+ b v7m_coherent_user_range
+#endif
+SYM_FUNC_END(v7m_coherent_kern_range)
/*
* v7m_coherent_user_range(start,end)
@@ -300,7 +304,7 @@ ENTRY(v7m_coherent_kern_range)
* It is assumed that:
* - the Icache does not read data from the write buffer
*/
-ENTRY(v7m_coherent_user_range)
+SYM_TYPED_FUNC_START(v7m_coherent_user_range)
UNWIND(.fnstart )
dcache_line_size r2, r3
sub r3, r2, #1
@@ -328,8 +332,7 @@ ENTRY(v7m_coherent_user_range)
isb
ret lr
UNWIND(.fnend )
-ENDPROC(v7m_coherent_kern_range)
-ENDPROC(v7m_coherent_user_range)
+SYM_FUNC_END(v7m_coherent_user_range)
/*
* v7m_flush_kern_dcache_area(void *addr, size_t size)
@@ -340,7 +343,7 @@ ENDPROC(v7m_coherent_user_range)
* - addr - kernel address
* - size - region size
*/
-ENTRY(v7m_flush_kern_dcache_area)
+SYM_TYPED_FUNC_START(v7m_flush_kern_dcache_area)
dcache_line_size r2, r3
add r1, r0, r1
sub r3, r2, #1
@@ -352,7 +355,7 @@ ENTRY(v7m_flush_kern_dcache_area)
blo 1b
dsb st
ret lr
-ENDPROC(v7m_flush_kern_dcache_area)
+SYM_FUNC_END(v7m_flush_kern_dcache_area)
/*
* v7m_dma_inv_range(start,end)
@@ -408,7 +411,7 @@ ENDPROC(v7m_dma_clean_range)
* - start - virtual start address of region
* - end - virtual end address of region
*/
-ENTRY(v7m_dma_flush_range)
+SYM_TYPED_FUNC_START(v7m_dma_flush_range)
dcache_line_size r2, r3
sub r3, r2, #1
bic r0, r0, r3
@@ -419,7 +422,7 @@ ENTRY(v7m_dma_flush_range)
blo 1b
dsb st
ret lr
-ENDPROC(v7m_dma_flush_range)
+SYM_FUNC_END(v7m_dma_flush_range)
/*
* dma_map_area(start, size, dir)
@@ -427,12 +430,12 @@ ENDPROC(v7m_dma_flush_range)
* - size - size of region
* - dir - DMA direction
*/
-ENTRY(v7m_dma_map_area)
+SYM_TYPED_FUNC_START(v7m_dma_map_area)
add r1, r1, r0
teq r2, #DMA_FROM_DEVICE
beq v7m_dma_inv_range
b v7m_dma_clean_range
-ENDPROC(v7m_dma_map_area)
+SYM_FUNC_END(v7m_dma_map_area)
/*
* dma_unmap_area(start, size, dir)
@@ -440,17 +443,9 @@ ENDPROC(v7m_dma_map_area)
* - size - size of region
* - dir - DMA direction
*/
-ENTRY(v7m_dma_unmap_area)
+SYM_TYPED_FUNC_START(v7m_dma_unmap_area)
add r1, r1, r0
teq r2, #DMA_TO_DEVICE
bne v7m_dma_inv_range
ret lr
-ENDPROC(v7m_dma_unmap_area)
-
- .globl v7m_flush_kern_cache_louis
- .equ v7m_flush_kern_cache_louis, v7m_flush_kern_cache_all
-
- __INITDATA
-
- @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
- define_cache_functions v7m
+SYM_FUNC_END(v7m_dma_unmap_area)
diff --git a/arch/arm/mm/cache.c b/arch/arm/mm/cache.c
new file mode 100644
index 000000000000..e6fbc599c9ed
--- /dev/null
+++ b/arch/arm/mm/cache.c
@@ -0,0 +1,663 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * This file defines C prototypes for the low-level cache assembly functions
+ * and populates a vtable for each selected ARM CPU cache type.
+ */
+
+#include <linux/types.h>
+#include <asm/cacheflush.h>
+
+#ifdef CONFIG_CPU_CACHE_V4
+void v4_flush_icache_all(void);
+void v4_flush_kern_cache_all(void);
+void v4_flush_user_cache_all(void);
+void v4_flush_user_cache_range(unsigned long, unsigned long, unsigned int);
+void v4_coherent_kern_range(unsigned long, unsigned long);
+int v4_coherent_user_range(unsigned long, unsigned long);
+void v4_flush_kern_dcache_area(void *, size_t);
+void v4_dma_map_area(const void *, size_t, int);
+void v4_dma_unmap_area(const void *, size_t, int);
+void v4_dma_flush_range(const void *, const void *);
+
+struct cpu_cache_fns v4_cache_fns __initconst = {
+ .flush_icache_all = v4_flush_icache_all,
+ .flush_kern_all = v4_flush_kern_cache_all,
+ .flush_kern_louis = v4_flush_kern_cache_all,
+ .flush_user_all = v4_flush_user_cache_all,
+ .flush_user_range = v4_flush_user_cache_range,
+ .coherent_kern_range = v4_coherent_kern_range,
+ .coherent_user_range = v4_coherent_user_range,
+ .flush_kern_dcache_area = v4_flush_kern_dcache_area,
+ .dma_map_area = v4_dma_map_area,
+ .dma_unmap_area = v4_dma_unmap_area,
+ .dma_flush_range = v4_dma_flush_range,
+};
+#endif
+
+/* V4 write-back cache "V4WB" */
+#ifdef CONFIG_CPU_CACHE_V4WB
+void v4wb_flush_icache_all(void);
+void v4wb_flush_kern_cache_all(void);
+void v4wb_flush_user_cache_all(void);
+void v4wb_flush_user_cache_range(unsigned long, unsigned long, unsigned int);
+void v4wb_coherent_kern_range(unsigned long, unsigned long);
+int v4wb_coherent_user_range(unsigned long, unsigned long);
+void v4wb_flush_kern_dcache_area(void *, size_t);
+void v4wb_dma_map_area(const void *, size_t, int);
+void v4wb_dma_unmap_area(const void *, size_t, int);
+void v4wb_dma_flush_range(const void *, const void *);
+
+struct cpu_cache_fns v4wb_cache_fns __initconst = {
+ .flush_icache_all = v4wb_flush_icache_all,
+ .flush_kern_all = v4wb_flush_kern_cache_all,
+ .flush_kern_louis = v4wb_flush_kern_cache_all,
+ .flush_user_all = v4wb_flush_user_cache_all,
+ .flush_user_range = v4wb_flush_user_cache_range,
+ .coherent_kern_range = v4wb_coherent_kern_range,
+ .coherent_user_range = v4wb_coherent_user_range,
+ .flush_kern_dcache_area = v4wb_flush_kern_dcache_area,
+ .dma_map_area = v4wb_dma_map_area,
+ .dma_unmap_area = v4wb_dma_unmap_area,
+ .dma_flush_range = v4wb_dma_flush_range,
+};
+#endif
+
+/* V4 write-through cache "V4WT" */
+#ifdef CONFIG_CPU_CACHE_V4WT
+void v4wt_flush_icache_all(void);
+void v4wt_flush_kern_cache_all(void);
+void v4wt_flush_user_cache_all(void);
+void v4wt_flush_user_cache_range(unsigned long, unsigned long, unsigned int);
+void v4wt_coherent_kern_range(unsigned long, unsigned long);
+int v4wt_coherent_user_range(unsigned long, unsigned long);
+void v4wt_flush_kern_dcache_area(void *, size_t);
+void v4wt_dma_map_area(const void *, size_t, int);
+void v4wt_dma_unmap_area(const void *, size_t, int);
+void v4wt_dma_flush_range(const void *, const void *);
+
+struct cpu_cache_fns v4wt_cache_fns __initconst = {
+ .flush_icache_all = v4wt_flush_icache_all,
+ .flush_kern_all = v4wt_flush_kern_cache_all,
+ .flush_kern_louis = v4wt_flush_kern_cache_all,
+ .flush_user_all = v4wt_flush_user_cache_all,
+ .flush_user_range = v4wt_flush_user_cache_range,
+ .coherent_kern_range = v4wt_coherent_kern_range,
+ .coherent_user_range = v4wt_coherent_user_range,
+ .flush_kern_dcache_area = v4wt_flush_kern_dcache_area,
+ .dma_map_area = v4wt_dma_map_area,
+ .dma_unmap_area = v4wt_dma_unmap_area,
+ .dma_flush_range = v4wt_dma_flush_range,
+};
+#endif
+
+/* Faraday FA526 cache */
+#ifdef CONFIG_CPU_CACHE_FA
+void fa_flush_icache_all(void);
+void fa_flush_kern_cache_all(void);
+void fa_flush_user_cache_all(void);
+void fa_flush_user_cache_range(unsigned long, unsigned long, unsigned int);
+void fa_coherent_kern_range(unsigned long, unsigned long);
+int fa_coherent_user_range(unsigned long, unsigned long);
+void fa_flush_kern_dcache_area(void *, size_t);
+void fa_dma_map_area(const void *, size_t, int);
+void fa_dma_unmap_area(const void *, size_t, int);
+void fa_dma_flush_range(const void *, const void *);
+
+struct cpu_cache_fns fa_cache_fns __initconst = {
+ .flush_icache_all = fa_flush_icache_all,
+ .flush_kern_all = fa_flush_kern_cache_all,
+ .flush_kern_louis = fa_flush_kern_cache_all,
+ .flush_user_all = fa_flush_user_cache_all,
+ .flush_user_range = fa_flush_user_cache_range,
+ .coherent_kern_range = fa_coherent_kern_range,
+ .coherent_user_range = fa_coherent_user_range,
+ .flush_kern_dcache_area = fa_flush_kern_dcache_area,
+ .dma_map_area = fa_dma_map_area,
+ .dma_unmap_area = fa_dma_unmap_area,
+ .dma_flush_range = fa_dma_flush_range,
+};
+#endif
+
+#ifdef CONFIG_CPU_CACHE_V6
+void v6_flush_icache_all(void);
+void v6_flush_kern_cache_all(void);
+void v6_flush_user_cache_all(void);
+void v6_flush_user_cache_range(unsigned long, unsigned long, unsigned int);
+void v6_coherent_kern_range(unsigned long, unsigned long);
+int v6_coherent_user_range(unsigned long, unsigned long);
+void v6_flush_kern_dcache_area(void *, size_t);
+void v6_dma_map_area(const void *, size_t, int);
+void v6_dma_unmap_area(const void *, size_t, int);
+void v6_dma_flush_range(const void *, const void *);
+
+struct cpu_cache_fns v6_cache_fns __initconst = {
+ .flush_icache_all = v6_flush_icache_all,
+ .flush_kern_all = v6_flush_kern_cache_all,
+ .flush_kern_louis = v6_flush_kern_cache_all,
+ .flush_user_all = v6_flush_user_cache_all,
+ .flush_user_range = v6_flush_user_cache_range,
+ .coherent_kern_range = v6_coherent_kern_range,
+ .coherent_user_range = v6_coherent_user_range,
+ .flush_kern_dcache_area = v6_flush_kern_dcache_area,
+ .dma_map_area = v6_dma_map_area,
+ .dma_unmap_area = v6_dma_unmap_area,
+ .dma_flush_range = v6_dma_flush_range,
+};
+#endif
+
+#ifdef CONFIG_CPU_CACHE_V7
+void v7_flush_icache_all(void);
+void v7_flush_kern_cache_all(void);
+void v7_flush_kern_cache_louis(void);
+void v7_flush_user_cache_all(void);
+void v7_flush_user_cache_range(unsigned long, unsigned long, unsigned int);
+void v7_coherent_kern_range(unsigned long, unsigned long);
+int v7_coherent_user_range(unsigned long, unsigned long);
+void v7_flush_kern_dcache_area(void *, size_t);
+void v7_dma_map_area(const void *, size_t, int);
+void v7_dma_unmap_area(const void *, size_t, int);
+void v7_dma_flush_range(const void *, const void *);
+
+struct cpu_cache_fns v7_cache_fns __initconst = {
+ .flush_icache_all = v7_flush_icache_all,
+ .flush_kern_all = v7_flush_kern_cache_all,
+ .flush_kern_louis = v7_flush_kern_cache_louis,
+ .flush_user_all = v7_flush_user_cache_all,
+ .flush_user_range = v7_flush_user_cache_range,
+ .coherent_kern_range = v7_coherent_kern_range,
+ .coherent_user_range = v7_coherent_user_range,
+ .flush_kern_dcache_area = v7_flush_kern_dcache_area,
+ .dma_map_area = v7_dma_map_area,
+ .dma_unmap_area = v7_dma_unmap_area,
+ .dma_flush_range = v7_dma_flush_range,
+};
+
+/* Special quirky cache flush function for Broadcom B15 v7 caches */
+void b15_flush_kern_cache_all(void);
+
+struct cpu_cache_fns b15_cache_fns __initconst = {
+ .flush_icache_all = v7_flush_icache_all,
+#ifdef CONFIG_CACHE_B15_RAC
+ .flush_kern_all = b15_flush_kern_cache_all,
+#else
+ .flush_kern_all = v7_flush_kern_cache_all,
+#endif
+ .flush_kern_louis = v7_flush_kern_cache_louis,
+ .flush_user_all = v7_flush_user_cache_all,
+ .flush_user_range = v7_flush_user_cache_range,
+ .coherent_kern_range = v7_coherent_kern_range,
+ .coherent_user_range = v7_coherent_user_range,
+ .flush_kern_dcache_area = v7_flush_kern_dcache_area,
+ .dma_map_area = v7_dma_map_area,
+ .dma_unmap_area = v7_dma_unmap_area,
+ .dma_flush_range = v7_dma_flush_range,
+};
+#endif
+
+/* The NOP cache is just a set of dummy stubs that by definition does nothing */
+#ifdef CONFIG_CPU_CACHE_NOP
+void nop_flush_icache_all(void);
+void nop_flush_kern_cache_all(void);
+void nop_flush_user_cache_all(void);
+void nop_flush_user_cache_range(unsigned long start, unsigned long end, unsigned int flags);
+void nop_coherent_kern_range(unsigned long start, unsigned long end);
+int nop_coherent_user_range(unsigned long, unsigned long);
+void nop_flush_kern_dcache_area(void *kaddr, size_t size);
+void nop_dma_map_area(const void *start, size_t size, int flags);
+void nop_dma_unmap_area(const void *start, size_t size, int flags);
+void nop_dma_flush_range(const void *start, const void *end);
+
+struct cpu_cache_fns nop_cache_fns __initconst = {
+ .flush_icache_all = nop_flush_icache_all,
+ .flush_kern_all = nop_flush_kern_cache_all,
+ .flush_kern_louis = nop_flush_kern_cache_all,
+ .flush_user_all = nop_flush_user_cache_all,
+ .flush_user_range = nop_flush_user_cache_range,
+ .coherent_kern_range = nop_coherent_kern_range,
+ .coherent_user_range = nop_coherent_user_range,
+ .flush_kern_dcache_area = nop_flush_kern_dcache_area,
+ .dma_map_area = nop_dma_map_area,
+ .dma_unmap_area = nop_dma_unmap_area,
+ .dma_flush_range = nop_dma_flush_range,
+};
+#endif
+
+#ifdef CONFIG_CPU_CACHE_V7M
+void v7m_flush_icache_all(void);
+void v7m_flush_kern_cache_all(void);
+void v7m_flush_user_cache_all(void);
+void v7m_flush_user_cache_range(unsigned long, unsigned long, unsigned int);
+void v7m_coherent_kern_range(unsigned long, unsigned long);
+int v7m_coherent_user_range(unsigned long, unsigned long);
+void v7m_flush_kern_dcache_area(void *, size_t);
+void v7m_dma_map_area(const void *, size_t, int);
+void v7m_dma_unmap_area(const void *, size_t, int);
+void v7m_dma_flush_range(const void *, const void *);
+
+struct cpu_cache_fns v7m_cache_fns __initconst = {
+ .flush_icache_all = v7m_flush_icache_all,
+ .flush_kern_all = v7m_flush_kern_cache_all,
+ .flush_kern_louis = v7m_flush_kern_cache_all,
+ .flush_user_all = v7m_flush_user_cache_all,
+ .flush_user_range = v7m_flush_user_cache_range,
+ .coherent_kern_range = v7m_coherent_kern_range,
+ .coherent_user_range = v7m_coherent_user_range,
+ .flush_kern_dcache_area = v7m_flush_kern_dcache_area,
+ .dma_map_area = v7m_dma_map_area,
+ .dma_unmap_area = v7m_dma_unmap_area,
+ .dma_flush_range = v7m_dma_flush_range,
+};
+#endif
+
+#ifdef CONFIG_CPU_ARM1020
+void arm1020_flush_icache_all(void);
+void arm1020_flush_kern_cache_all(void);
+void arm1020_flush_user_cache_all(void);
+void arm1020_flush_user_cache_range(unsigned long, unsigned long, unsigned int);
+void arm1020_coherent_kern_range(unsigned long, unsigned long);
+int arm1020_coherent_user_range(unsigned long, unsigned long);
+void arm1020_flush_kern_dcache_area(void *, size_t);
+void arm1020_dma_map_area(const void *, size_t, int);
+void arm1020_dma_unmap_area(const void *, size_t, int);
+void arm1020_dma_flush_range(const void *, const void *);
+
+struct cpu_cache_fns arm1020_cache_fns __initconst = {
+ .flush_icache_all = arm1020_flush_icache_all,
+ .flush_kern_all = arm1020_flush_kern_cache_all,
+ .flush_kern_louis = arm1020_flush_kern_cache_all,
+ .flush_user_all = arm1020_flush_user_cache_all,
+ .flush_user_range = arm1020_flush_user_cache_range,
+ .coherent_kern_range = arm1020_coherent_kern_range,
+ .coherent_user_range = arm1020_coherent_user_range,
+ .flush_kern_dcache_area = arm1020_flush_kern_dcache_area,
+ .dma_map_area = arm1020_dma_map_area,
+ .dma_unmap_area = arm1020_dma_unmap_area,
+ .dma_flush_range = arm1020_dma_flush_range,
+};
+#endif
+
+#ifdef CONFIG_CPU_ARM1020E
+void arm1020e_flush_icache_all(void);
+void arm1020e_flush_kern_cache_all(void);
+void arm1020e_flush_user_cache_all(void);
+void arm1020e_flush_user_cache_range(unsigned long, unsigned long, unsigned int);
+void arm1020e_coherent_kern_range(unsigned long, unsigned long);
+int arm1020e_coherent_user_range(unsigned long, unsigned long);
+void arm1020e_flush_kern_dcache_area(void *, size_t);
+void arm1020e_dma_map_area(const void *, size_t, int);
+void arm1020e_dma_unmap_area(const void *, size_t, int);
+void arm1020e_dma_flush_range(const void *, const void *);
+
+struct cpu_cache_fns arm1020e_cache_fns __initconst = {
+ .flush_icache_all = arm1020e_flush_icache_all,
+ .flush_kern_all = arm1020e_flush_kern_cache_all,
+ .flush_kern_louis = arm1020e_flush_kern_cache_all,
+ .flush_user_all = arm1020e_flush_user_cache_all,
+ .flush_user_range = arm1020e_flush_user_cache_range,
+ .coherent_kern_range = arm1020e_coherent_kern_range,
+ .coherent_user_range = arm1020e_coherent_user_range,
+ .flush_kern_dcache_area = arm1020e_flush_kern_dcache_area,
+ .dma_map_area = arm1020e_dma_map_area,
+ .dma_unmap_area = arm1020e_dma_unmap_area,
+ .dma_flush_range = arm1020e_dma_flush_range,
+};
+#endif
+
+#ifdef CONFIG_CPU_ARM1022
+void arm1022_flush_icache_all(void);
+void arm1022_flush_kern_cache_all(void);
+void arm1022_flush_user_cache_all(void);
+void arm1022_flush_user_cache_range(unsigned long, unsigned long, unsigned int);
+void arm1022_coherent_kern_range(unsigned long, unsigned long);
+int arm1022_coherent_user_range(unsigned long, unsigned long);
+void arm1022_flush_kern_dcache_area(void *, size_t);
+void arm1022_dma_map_area(const void *, size_t, int);
+void arm1022_dma_unmap_area(const void *, size_t, int);
+void arm1022_dma_flush_range(const void *, const void *);
+
+struct cpu_cache_fns arm1022_cache_fns __initconst = {
+ .flush_icache_all = arm1022_flush_icache_all,
+ .flush_kern_all = arm1022_flush_kern_cache_all,
+ .flush_kern_louis = arm1022_flush_kern_cache_all,
+ .flush_user_all = arm1022_flush_user_cache_all,
+ .flush_user_range = arm1022_flush_user_cache_range,
+ .coherent_kern_range = arm1022_coherent_kern_range,
+ .coherent_user_range = arm1022_coherent_user_range,
+ .flush_kern_dcache_area = arm1022_flush_kern_dcache_area,
+ .dma_map_area = arm1022_dma_map_area,
+ .dma_unmap_area = arm1022_dma_unmap_area,
+ .dma_flush_range = arm1022_dma_flush_range,
+};
+#endif
+
+#ifdef CONFIG_CPU_ARM1026
+void arm1026_flush_icache_all(void);
+void arm1026_flush_kern_cache_all(void);
+void arm1026_flush_user_cache_all(void);
+void arm1026_flush_user_cache_range(unsigned long, unsigned long, unsigned int);
+void arm1026_coherent_kern_range(unsigned long, unsigned long);
+int arm1026_coherent_user_range(unsigned long, unsigned long);
+void arm1026_flush_kern_dcache_area(void *, size_t);
+void arm1026_dma_map_area(const void *, size_t, int);
+void arm1026_dma_unmap_area(const void *, size_t, int);
+void arm1026_dma_flush_range(const void *, const void *);
+
+struct cpu_cache_fns arm1026_cache_fns __initconst = {
+ .flush_icache_all = arm1026_flush_icache_all,
+ .flush_kern_all = arm1026_flush_kern_cache_all,
+ .flush_kern_louis = arm1026_flush_kern_cache_all,
+ .flush_user_all = arm1026_flush_user_cache_all,
+ .flush_user_range = arm1026_flush_user_cache_range,
+ .coherent_kern_range = arm1026_coherent_kern_range,
+ .coherent_user_range = arm1026_coherent_user_range,
+ .flush_kern_dcache_area = arm1026_flush_kern_dcache_area,
+ .dma_map_area = arm1026_dma_map_area,
+ .dma_unmap_area = arm1026_dma_unmap_area,
+ .dma_flush_range = arm1026_dma_flush_range,
+};
+#endif
+
+#if defined(CONFIG_CPU_ARM920T) && !defined(CONFIG_CPU_DCACHE_WRITETHROUGH)
+void arm920_flush_icache_all(void);
+void arm920_flush_kern_cache_all(void);
+void arm920_flush_user_cache_all(void);
+void arm920_flush_user_cache_range(unsigned long, unsigned long, unsigned int);
+void arm920_coherent_kern_range(unsigned long, unsigned long);
+int arm920_coherent_user_range(unsigned long, unsigned long);
+void arm920_flush_kern_dcache_area(void *, size_t);
+void arm920_dma_map_area(const void *, size_t, int);
+void arm920_dma_unmap_area(const void *, size_t, int);
+void arm920_dma_flush_range(const void *, const void *);
+
+struct cpu_cache_fns arm920_cache_fns __initconst = {
+ .flush_icache_all = arm920_flush_icache_all,
+ .flush_kern_all = arm920_flush_kern_cache_all,
+ .flush_kern_louis = arm920_flush_kern_cache_all,
+ .flush_user_all = arm920_flush_user_cache_all,
+ .flush_user_range = arm920_flush_user_cache_range,
+ .coherent_kern_range = arm920_coherent_kern_range,
+ .coherent_user_range = arm920_coherent_user_range,
+ .flush_kern_dcache_area = arm920_flush_kern_dcache_area,
+ .dma_map_area = arm920_dma_map_area,
+ .dma_unmap_area = arm920_dma_unmap_area,
+ .dma_flush_range = arm920_dma_flush_range,
+};
+#endif
+
+#if defined(CONFIG_CPU_ARM922T) && !defined(CONFIG_CPU_DCACHE_WRITETHROUGH)
+void arm922_flush_icache_all(void);
+void arm922_flush_kern_cache_all(void);
+void arm922_flush_user_cache_all(void);
+void arm922_flush_user_cache_range(unsigned long, unsigned long, unsigned int);
+void arm922_coherent_kern_range(unsigned long, unsigned long);
+int arm922_coherent_user_range(unsigned long, unsigned long);
+void arm922_flush_kern_dcache_area(void *, size_t);
+void arm922_dma_map_area(const void *, size_t, int);
+void arm922_dma_unmap_area(const void *, size_t, int);
+void arm922_dma_flush_range(const void *, const void *);
+
+struct cpu_cache_fns arm922_cache_fns __initconst = {
+ .flush_icache_all = arm922_flush_icache_all,
+ .flush_kern_all = arm922_flush_kern_cache_all,
+ .flush_kern_louis = arm922_flush_kern_cache_all,
+ .flush_user_all = arm922_flush_user_cache_all,
+ .flush_user_range = arm922_flush_user_cache_range,
+ .coherent_kern_range = arm922_coherent_kern_range,
+ .coherent_user_range = arm922_coherent_user_range,
+ .flush_kern_dcache_area = arm922_flush_kern_dcache_area,
+ .dma_map_area = arm922_dma_map_area,
+ .dma_unmap_area = arm922_dma_unmap_area,
+ .dma_flush_range = arm922_dma_flush_range,
+};
+#endif
+
+#ifdef CONFIG_CPU_ARM925T
+void arm925_flush_icache_all(void);
+void arm925_flush_kern_cache_all(void);
+void arm925_flush_user_cache_all(void);
+void arm925_flush_user_cache_range(unsigned long, unsigned long, unsigned int);
+void arm925_coherent_kern_range(unsigned long, unsigned long);
+int arm925_coherent_user_range(unsigned long, unsigned long);
+void arm925_flush_kern_dcache_area(void *, size_t);
+void arm925_dma_map_area(const void *, size_t, int);
+void arm925_dma_unmap_area(const void *, size_t, int);
+void arm925_dma_flush_range(const void *, const void *);
+
+struct cpu_cache_fns arm925_cache_fns __initconst = {
+ .flush_icache_all = arm925_flush_icache_all,
+ .flush_kern_all = arm925_flush_kern_cache_all,
+ .flush_kern_louis = arm925_flush_kern_cache_all,
+ .flush_user_all = arm925_flush_user_cache_all,
+ .flush_user_range = arm925_flush_user_cache_range,
+ .coherent_kern_range = arm925_coherent_kern_range,
+ .coherent_user_range = arm925_coherent_user_range,
+ .flush_kern_dcache_area = arm925_flush_kern_dcache_area,
+ .dma_map_area = arm925_dma_map_area,
+ .dma_unmap_area = arm925_dma_unmap_area,
+ .dma_flush_range = arm925_dma_flush_range,
+};
+#endif
+
+#ifdef CONFIG_CPU_ARM926T
+void arm926_flush_icache_all(void);
+void arm926_flush_kern_cache_all(void);
+void arm926_flush_user_cache_all(void);
+void arm926_flush_user_cache_range(unsigned long, unsigned long, unsigned int);
+void arm926_coherent_kern_range(unsigned long, unsigned long);
+int arm926_coherent_user_range(unsigned long, unsigned long);
+void arm926_flush_kern_dcache_area(void *, size_t);
+void arm926_dma_map_area(const void *, size_t, int);
+void arm926_dma_unmap_area(const void *, size_t, int);
+void arm926_dma_flush_range(const void *, const void *);
+
+struct cpu_cache_fns arm926_cache_fns __initconst = {
+ .flush_icache_all = arm926_flush_icache_all,
+ .flush_kern_all = arm926_flush_kern_cache_all,
+ .flush_kern_louis = arm926_flush_kern_cache_all,
+ .flush_user_all = arm926_flush_user_cache_all,
+ .flush_user_range = arm926_flush_user_cache_range,
+ .coherent_kern_range = arm926_coherent_kern_range,
+ .coherent_user_range = arm926_coherent_user_range,
+ .flush_kern_dcache_area = arm926_flush_kern_dcache_area,
+ .dma_map_area = arm926_dma_map_area,
+ .dma_unmap_area = arm926_dma_unmap_area,
+ .dma_flush_range = arm926_dma_flush_range,
+};
+#endif
+
+#ifdef CONFIG_CPU_ARM940T
+void arm940_flush_icache_all(void);
+void arm940_flush_kern_cache_all(void);
+void arm940_flush_user_cache_all(void);
+void arm940_flush_user_cache_range(unsigned long, unsigned long, unsigned int);
+void arm940_coherent_kern_range(unsigned long, unsigned long);
+int arm940_coherent_user_range(unsigned long, unsigned long);
+void arm940_flush_kern_dcache_area(void *, size_t);
+void arm940_dma_map_area(const void *, size_t, int);
+void arm940_dma_unmap_area(const void *, size_t, int);
+void arm940_dma_flush_range(const void *, const void *);
+
+struct cpu_cache_fns arm940_cache_fns __initconst = {
+ .flush_icache_all = arm940_flush_icache_all,
+ .flush_kern_all = arm940_flush_kern_cache_all,
+ .flush_kern_louis = arm940_flush_kern_cache_all,
+ .flush_user_all = arm940_flush_user_cache_all,
+ .flush_user_range = arm940_flush_user_cache_range,
+ .coherent_kern_range = arm940_coherent_kern_range,
+ .coherent_user_range = arm940_coherent_user_range,
+ .flush_kern_dcache_area = arm940_flush_kern_dcache_area,
+ .dma_map_area = arm940_dma_map_area,
+ .dma_unmap_area = arm940_dma_unmap_area,
+ .dma_flush_range = arm940_dma_flush_range,
+};
+#endif
+
+#ifdef CONFIG_CPU_ARM946E
+void arm946_flush_icache_all(void);
+void arm946_flush_kern_cache_all(void);
+void arm946_flush_user_cache_all(void);
+void arm946_flush_user_cache_range(unsigned long, unsigned long, unsigned int);
+void arm946_coherent_kern_range(unsigned long, unsigned long);
+int arm946_coherent_user_range(unsigned long, unsigned long);
+void arm946_flush_kern_dcache_area(void *, size_t);
+void arm946_dma_map_area(const void *, size_t, int);
+void arm946_dma_unmap_area(const void *, size_t, int);
+void arm946_dma_flush_range(const void *, const void *);
+
+struct cpu_cache_fns arm946_cache_fns __initconst = {
+ .flush_icache_all = arm946_flush_icache_all,
+ .flush_kern_all = arm946_flush_kern_cache_all,
+ .flush_kern_louis = arm946_flush_kern_cache_all,
+ .flush_user_all = arm946_flush_user_cache_all,
+ .flush_user_range = arm946_flush_user_cache_range,
+ .coherent_kern_range = arm946_coherent_kern_range,
+ .coherent_user_range = arm946_coherent_user_range,
+ .flush_kern_dcache_area = arm946_flush_kern_dcache_area,
+ .dma_map_area = arm946_dma_map_area,
+ .dma_unmap_area = arm946_dma_unmap_area,
+ .dma_flush_range = arm946_dma_flush_range,
+};
+#endif
+
+#ifdef CONFIG_CPU_XSCALE
+void xscale_flush_icache_all(void);
+void xscale_flush_kern_cache_all(void);
+void xscale_flush_user_cache_all(void);
+void xscale_flush_user_cache_range(unsigned long, unsigned long, unsigned int);
+void xscale_coherent_kern_range(unsigned long, unsigned long);
+int xscale_coherent_user_range(unsigned long, unsigned long);
+void xscale_flush_kern_dcache_area(void *, size_t);
+void xscale_dma_map_area(const void *, size_t, int);
+void xscale_dma_unmap_area(const void *, size_t, int);
+void xscale_dma_flush_range(const void *, const void *);
+
+struct cpu_cache_fns xscale_cache_fns __initconst = {
+ .flush_icache_all = xscale_flush_icache_all,
+ .flush_kern_all = xscale_flush_kern_cache_all,
+ .flush_kern_louis = xscale_flush_kern_cache_all,
+ .flush_user_all = xscale_flush_user_cache_all,
+ .flush_user_range = xscale_flush_user_cache_range,
+ .coherent_kern_range = xscale_coherent_kern_range,
+ .coherent_user_range = xscale_coherent_user_range,
+ .flush_kern_dcache_area = xscale_flush_kern_dcache_area,
+ .dma_map_area = xscale_dma_map_area,
+ .dma_unmap_area = xscale_dma_unmap_area,
+ .dma_flush_range = xscale_dma_flush_range,
+};
+
+/* The 80200 A0 and A1 need a special quirk for dma_map_area() */
+void xscale_80200_A0_A1_dma_map_area(const void *, size_t, int);
+
+struct cpu_cache_fns xscale_80200_A0_A1_cache_fns __initconst = {
+ .flush_icache_all = xscale_flush_icache_all,
+ .flush_kern_all = xscale_flush_kern_cache_all,
+ .flush_kern_louis = xscale_flush_kern_cache_all,
+ .flush_user_all = xscale_flush_user_cache_all,
+ .flush_user_range = xscale_flush_user_cache_range,
+ .coherent_kern_range = xscale_coherent_kern_range,
+ .coherent_user_range = xscale_coherent_user_range,
+ .flush_kern_dcache_area = xscale_flush_kern_dcache_area,
+ .dma_map_area = xscale_80200_A0_A1_dma_map_area,
+ .dma_unmap_area = xscale_dma_unmap_area,
+ .dma_flush_range = xscale_dma_flush_range,
+};
+#endif
+
+#ifdef CONFIG_CPU_XSC3
+void xsc3_flush_icache_all(void);
+void xsc3_flush_kern_cache_all(void);
+void xsc3_flush_user_cache_all(void);
+void xsc3_flush_user_cache_range(unsigned long, unsigned long, unsigned int);
+void xsc3_coherent_kern_range(unsigned long, unsigned long);
+int xsc3_coherent_user_range(unsigned long, unsigned long);
+void xsc3_flush_kern_dcache_area(void *, size_t);
+void xsc3_dma_map_area(const void *, size_t, int);
+void xsc3_dma_unmap_area(const void *, size_t, int);
+void xsc3_dma_flush_range(const void *, const void *);
+
+struct cpu_cache_fns xsc3_cache_fns __initconst = {
+ .flush_icache_all = xsc3_flush_icache_all,
+ .flush_kern_all = xsc3_flush_kern_cache_all,
+ .flush_kern_louis = xsc3_flush_kern_cache_all,
+ .flush_user_all = xsc3_flush_user_cache_all,
+ .flush_user_range = xsc3_flush_user_cache_range,
+ .coherent_kern_range = xsc3_coherent_kern_range,
+ .coherent_user_range = xsc3_coherent_user_range,
+ .flush_kern_dcache_area = xsc3_flush_kern_dcache_area,
+ .dma_map_area = xsc3_dma_map_area,
+ .dma_unmap_area = xsc3_dma_unmap_area,
+ .dma_flush_range = xsc3_dma_flush_range,
+};
+#endif
+
+#ifdef CONFIG_CPU_MOHAWK
+void mohawk_flush_icache_all(void);
+void mohawk_flush_kern_cache_all(void);
+void mohawk_flush_user_cache_all(void);
+void mohawk_flush_user_cache_range(unsigned long, unsigned long, unsigned int);
+void mohawk_coherent_kern_range(unsigned long, unsigned long);
+int mohawk_coherent_user_range(unsigned long, unsigned long);
+void mohawk_flush_kern_dcache_area(void *, size_t);
+void mohawk_dma_map_area(const void *, size_t, int);
+void mohawk_dma_unmap_area(const void *, size_t, int);
+void mohawk_dma_flush_range(const void *, const void *);
+
+struct cpu_cache_fns mohawk_cache_fns __initconst = {
+ .flush_icache_all = mohawk_flush_icache_all,
+ .flush_kern_all = mohawk_flush_kern_cache_all,
+ .flush_kern_louis = mohawk_flush_kern_cache_all,
+ .flush_user_all = mohawk_flush_user_cache_all,
+ .flush_user_range = mohawk_flush_user_cache_range,
+ .coherent_kern_range = mohawk_coherent_kern_range,
+ .coherent_user_range = mohawk_coherent_user_range,
+ .flush_kern_dcache_area = mohawk_flush_kern_dcache_area,
+ .dma_map_area = mohawk_dma_map_area,
+ .dma_unmap_area = mohawk_dma_unmap_area,
+ .dma_flush_range = mohawk_dma_flush_range,
+};
+#endif
+
+#ifdef CONFIG_CPU_FEROCEON
+void feroceon_flush_icache_all(void);
+void feroceon_flush_kern_cache_all(void);
+void feroceon_flush_user_cache_all(void);
+void feroceon_flush_user_cache_range(unsigned long, unsigned long, unsigned int);
+void feroceon_coherent_kern_range(unsigned long, unsigned long);
+int feroceon_coherent_user_range(unsigned long, unsigned long);
+void feroceon_flush_kern_dcache_area(void *, size_t);
+void feroceon_dma_map_area(const void *, size_t, int);
+void feroceon_dma_unmap_area(const void *, size_t, int);
+void feroceon_dma_flush_range(const void *, const void *);
+
+struct cpu_cache_fns feroceon_cache_fns __initconst = {
+ .flush_icache_all = feroceon_flush_icache_all,
+ .flush_kern_all = feroceon_flush_kern_cache_all,
+ .flush_kern_louis = feroceon_flush_kern_cache_all,
+ .flush_user_all = feroceon_flush_user_cache_all,
+ .flush_user_range = feroceon_flush_user_cache_range,
+ .coherent_kern_range = feroceon_coherent_kern_range,
+ .coherent_user_range = feroceon_coherent_user_range,
+ .flush_kern_dcache_area = feroceon_flush_kern_dcache_area,
+ .dma_map_area = feroceon_dma_map_area,
+ .dma_unmap_area = feroceon_dma_unmap_area,
+ .dma_flush_range = feroceon_dma_flush_range,
+};
+
+void feroceon_range_flush_kern_dcache_area(void *, size_t);
+void feroceon_range_dma_map_area(const void *, size_t, int);
+void feroceon_range_dma_flush_range(const void *, const void *);
+
+struct cpu_cache_fns feroceon_range_cache_fns __initconst = {
+ .flush_icache_all = feroceon_flush_icache_all,
+ .flush_kern_all = feroceon_flush_kern_cache_all,
+ .flush_kern_louis = feroceon_flush_kern_cache_all,
+ .flush_user_all = feroceon_flush_user_cache_all,
+ .flush_user_range = feroceon_flush_user_cache_range,
+ .coherent_kern_range = feroceon_coherent_kern_range,
+ .coherent_user_range = feroceon_coherent_user_range,
+ .flush_kern_dcache_area = feroceon_range_flush_kern_dcache_area,
+ .dma_map_area = feroceon_range_dma_map_area,
+ .dma_unmap_area = feroceon_dma_unmap_area,
+ .dma_flush_range = feroceon_range_dma_flush_range,
+};
+#endif
diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
index 439dc6a26bb9..dfa9554ef331 100644
--- a/arch/arm/mm/fault.c
+++ b/arch/arm/mm/fault.c
@@ -242,6 +242,27 @@ static inline bool is_permission_fault(unsigned int fsr)
return false;
}
+#ifdef CONFIG_CPU_TTBR0_PAN
+static inline bool ttbr0_usermode_access_allowed(struct pt_regs *regs)
+{
+ struct svc_pt_regs *svcregs;
+
+ /* If we are in user mode: permission granted */
+ if (user_mode(regs))
+ return true;
+
+ /* uaccess state saved above pt_regs on SVC exception entry */
+ svcregs = to_svc_pt_regs(regs);
+
+ return !(svcregs->ttbcr & TTBCR_EPD0);
+}
+#else
+static inline bool ttbr0_usermode_access_allowed(struct pt_regs *regs)
+{
+ return true;
+}
+#endif
+
static int __kprobes
do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
{
@@ -285,6 +306,14 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr);
+ /*
+ * Privileged access aborts with CONFIG_CPU_TTBR0_PAN enabled are
+ * routed via the translation fault mechanism. Check whether uaccess
+ * is disabled while in kernel mode.
+ */
+ if (!ttbr0_usermode_access_allowed(regs))
+ goto no_context;
+
if (!(flags & FAULT_FLAG_USER))
goto lock_mmap;
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index c24e29c0b9a4..3f774856ca67 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -1687,9 +1687,8 @@ static void __init early_paging_init(const struct machine_desc *mdesc)
*/
cr = get_cr();
set_cr(cr & ~(CR_I | CR_C));
- asm("mrc p15, 0, %0, c2, c0, 2" : "=r" (ttbcr));
- asm volatile("mcr p15, 0, %0, c2, c0, 2"
- : : "r" (ttbcr & ~(3 << 8 | 3 << 10)));
+ ttbcr = cpu_get_ttbcr();
+ cpu_set_ttbcr(ttbcr & ~(3 << 8 | 3 << 10));
flush_cache_all();
/*
@@ -1701,7 +1700,7 @@ static void __init early_paging_init(const struct machine_desc *mdesc)
lpae_pgtables_remap(offset, pa_pgd);
/* Re-enable the caches and cacheable TLB walks */
- asm volatile("mcr p15, 0, %0, c2, c0, 2" : : "r" (ttbcr));
+ cpu_set_ttbcr(ttbcr);
set_cr(cr);
}
diff --git a/arch/arm/mm/proc-arm1020.S b/arch/arm/mm/proc-arm1020.S
index 6837cf7a4812..d0ce3414a13e 100644
--- a/arch/arm/mm/proc-arm1020.S
+++ b/arch/arm/mm/proc-arm1020.S
@@ -11,6 +11,7 @@
*/
#include <linux/linkage.h>
#include <linux/init.h>
+#include <linux/cfi_types.h>
#include <linux/pgtable.h>
#include <asm/assembler.h>
#include <asm/asm-offsets.h>
@@ -56,18 +57,20 @@
/*
* cpu_arm1020_proc_init()
*/
-ENTRY(cpu_arm1020_proc_init)
+SYM_TYPED_FUNC_START(cpu_arm1020_proc_init)
ret lr
+SYM_FUNC_END(cpu_arm1020_proc_init)
/*
* cpu_arm1020_proc_fin()
*/
-ENTRY(cpu_arm1020_proc_fin)
+SYM_TYPED_FUNC_START(cpu_arm1020_proc_fin)
mrc p15, 0, r0, c1, c0, 0 @ ctrl register
bic r0, r0, #0x1000 @ ...i............
bic r0, r0, #0x000e @ ............wca.
mcr p15, 0, r0, c1, c0, 0 @ disable caches
ret lr
+SYM_FUNC_END(cpu_arm1020_proc_fin)
/*
* cpu_arm1020_reset(loc)
@@ -80,7 +83,7 @@ ENTRY(cpu_arm1020_proc_fin)
*/
.align 5
.pushsection .idmap.text, "ax"
-ENTRY(cpu_arm1020_reset)
+SYM_TYPED_FUNC_START(cpu_arm1020_reset)
mov ip, #0
mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches
mcr p15, 0, ip, c7, c10, 4 @ drain WB
@@ -92,16 +95,17 @@ ENTRY(cpu_arm1020_reset)
bic ip, ip, #0x1100 @ ...i...s........
mcr p15, 0, ip, c1, c0, 0 @ ctrl register
ret r0
-ENDPROC(cpu_arm1020_reset)
+SYM_FUNC_END(cpu_arm1020_reset)
.popsection
/*
* cpu_arm1020_do_idle()
*/
.align 5
-ENTRY(cpu_arm1020_do_idle)
+SYM_TYPED_FUNC_START(cpu_arm1020_do_idle)
mcr p15, 0, r0, c7, c0, 4 @ Wait for interrupt
ret lr
+SYM_FUNC_END(cpu_arm1020_do_idle)
/* ================================= CACHE ================================ */
@@ -112,13 +116,13 @@ ENTRY(cpu_arm1020_do_idle)
*
* Unconditionally clean and invalidate the entire icache.
*/
-ENTRY(arm1020_flush_icache_all)
+SYM_TYPED_FUNC_START(arm1020_flush_icache_all)
#ifndef CONFIG_CPU_ICACHE_DISABLE
mov r0, #0
mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
#endif
ret lr
-ENDPROC(arm1020_flush_icache_all)
+SYM_FUNC_END(arm1020_flush_icache_all)
/*
* flush_user_cache_all()
@@ -126,14 +130,14 @@ ENDPROC(arm1020_flush_icache_all)
* Invalidate all cache entries in a particular address
* space.
*/
-ENTRY(arm1020_flush_user_cache_all)
- /* FALLTHROUGH */
+SYM_FUNC_ALIAS(arm1020_flush_user_cache_all, arm1020_flush_kern_cache_all)
+
/*
* flush_kern_cache_all()
*
* Clean and invalidate the entire cache.
*/
-ENTRY(arm1020_flush_kern_cache_all)
+SYM_TYPED_FUNC_START(arm1020_flush_kern_cache_all)
mov r2, #VM_EXEC
mov ip, #0
__flush_whole_cache:
@@ -154,6 +158,7 @@ __flush_whole_cache:
#endif
mcrne p15, 0, ip, c7, c10, 4 @ drain WB
ret lr
+SYM_FUNC_END(arm1020_flush_kern_cache_all)
/*
* flush_user_cache_range(start, end, flags)
@@ -165,7 +170,7 @@ __flush_whole_cache:
* - end - end address (exclusive)
* - flags - vm_flags for this space
*/
-ENTRY(arm1020_flush_user_cache_range)
+SYM_TYPED_FUNC_START(arm1020_flush_user_cache_range)
mov ip, #0
sub r3, r1, r0 @ calculate total size
cmp r3, #CACHE_DLIMIT
@@ -185,6 +190,7 @@ ENTRY(arm1020_flush_user_cache_range)
#endif
mcrne p15, 0, ip, c7, c10, 4 @ drain WB
ret lr
+SYM_FUNC_END(arm1020_flush_user_cache_range)
/*
* coherent_kern_range(start, end)
@@ -196,8 +202,11 @@ ENTRY(arm1020_flush_user_cache_range)
* - start - virtual start address
* - end - virtual end address
*/
-ENTRY(arm1020_coherent_kern_range)
- /* FALLTRHOUGH */
+SYM_TYPED_FUNC_START(arm1020_coherent_kern_range)
+#ifdef CONFIG_CFI_CLANG /* Fallthrough if !CFI */
+ b arm1020_coherent_user_range
+#endif
+SYM_FUNC_END(arm1020_coherent_kern_range)
/*
* coherent_user_range(start, end)
@@ -209,7 +218,7 @@ ENTRY(arm1020_coherent_kern_range)
* - start - virtual start address
* - end - virtual end address
*/
-ENTRY(arm1020_coherent_user_range)
+SYM_TYPED_FUNC_START(arm1020_coherent_user_range)
mov ip, #0
bic r0, r0, #CACHE_DLINESIZE - 1
mcr p15, 0, ip, c7, c10, 4
@@ -227,6 +236,7 @@ ENTRY(arm1020_coherent_user_range)
mcr p15, 0, ip, c7, c10, 4 @ drain WB
mov r0, #0
ret lr
+SYM_FUNC_END(arm1020_coherent_user_range)
/*
* flush_kern_dcache_area(void *addr, size_t size)
@@ -237,7 +247,7 @@ ENTRY(arm1020_coherent_user_range)
* - addr - kernel address
* - size - region size
*/
-ENTRY(arm1020_flush_kern_dcache_area)
+SYM_TYPED_FUNC_START(arm1020_flush_kern_dcache_area)
mov ip, #0
#ifndef CONFIG_CPU_DCACHE_DISABLE
add r1, r0, r1
@@ -249,6 +259,7 @@ ENTRY(arm1020_flush_kern_dcache_area)
#endif
mcr p15, 0, ip, c7, c10, 4 @ drain WB
ret lr
+SYM_FUNC_END(arm1020_flush_kern_dcache_area)
/*
* dma_inv_range(start, end)
@@ -314,7 +325,7 @@ arm1020_dma_clean_range:
* - start - virtual start address
* - end - virtual end address
*/
-ENTRY(arm1020_dma_flush_range)
+SYM_TYPED_FUNC_START(arm1020_dma_flush_range)
mov ip, #0
#ifndef CONFIG_CPU_DCACHE_DISABLE
bic r0, r0, #CACHE_DLINESIZE - 1
@@ -327,6 +338,7 @@ ENTRY(arm1020_dma_flush_range)
#endif
mcr p15, 0, ip, c7, c10, 4 @ drain WB
ret lr
+SYM_FUNC_END(arm1020_dma_flush_range)
/*
* dma_map_area(start, size, dir)
@@ -334,13 +346,13 @@ ENTRY(arm1020_dma_flush_range)
* - size - size of region
* - dir - DMA direction
*/
-ENTRY(arm1020_dma_map_area)
+SYM_TYPED_FUNC_START(arm1020_dma_map_area)
add r1, r1, r0
cmp r2, #DMA_TO_DEVICE
beq arm1020_dma_clean_range
bcs arm1020_dma_inv_range
b arm1020_dma_flush_range
-ENDPROC(arm1020_dma_map_area)
+SYM_FUNC_END(arm1020_dma_map_area)
/*
* dma_unmap_area(start, size, dir)
@@ -348,18 +360,12 @@ ENDPROC(arm1020_dma_map_area)
* - size - size of region
* - dir - DMA direction
*/
-ENTRY(arm1020_dma_unmap_area)
+SYM_TYPED_FUNC_START(arm1020_dma_unmap_area)
ret lr
-ENDPROC(arm1020_dma_unmap_area)
-
- .globl arm1020_flush_kern_cache_louis
- .equ arm1020_flush_kern_cache_louis, arm1020_flush_kern_cache_all
-
- @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
- define_cache_functions arm1020
+SYM_FUNC_END(arm1020_dma_unmap_area)
.align 5
-ENTRY(cpu_arm1020_dcache_clean_area)
+SYM_TYPED_FUNC_START(cpu_arm1020_dcache_clean_area)
#ifndef CONFIG_CPU_DCACHE_DISABLE
mov ip, #0
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
@@ -369,6 +375,7 @@ ENTRY(cpu_arm1020_dcache_clean_area)
bhi 1b
#endif
ret lr
+SYM_FUNC_END(cpu_arm1020_dcache_clean_area)
/* =============================== PageTable ============================== */
@@ -380,7 +387,7 @@ ENTRY(cpu_arm1020_dcache_clean_area)
* pgd: new page tables
*/
.align 5
-ENTRY(cpu_arm1020_switch_mm)
+SYM_TYPED_FUNC_START(cpu_arm1020_switch_mm)
#ifdef CONFIG_MMU
#ifndef CONFIG_CPU_DCACHE_DISABLE
mcr p15, 0, r3, c7, c10, 4
@@ -408,14 +415,15 @@ ENTRY(cpu_arm1020_switch_mm)
mcr p15, 0, r1, c8, c7, 0 @ invalidate I & D TLBs
#endif /* CONFIG_MMU */
ret lr
-
+SYM_FUNC_END(cpu_arm1020_switch_mm)
+
/*
* cpu_arm1020_set_pte(ptep, pte)
*
* Set a PTE and flush it out
*/
.align 5
-ENTRY(cpu_arm1020_set_pte_ext)
+SYM_TYPED_FUNC_START(cpu_arm1020_set_pte_ext)
#ifdef CONFIG_MMU
armv3_set_pte_ext
mov r0, r0
@@ -426,6 +434,7 @@ ENTRY(cpu_arm1020_set_pte_ext)
mcr p15, 0, r0, c7, c10, 4 @ drain WB
#endif /* CONFIG_MMU */
ret lr
+SYM_FUNC_END(cpu_arm1020_set_pte_ext)
.type __arm1020_setup, #function
__arm1020_setup:
diff --git a/arch/arm/mm/proc-arm1020e.S b/arch/arm/mm/proc-arm1020e.S
index df49b10250b8..64f031bf6eff 100644
--- a/arch/arm/mm/proc-arm1020e.S
+++ b/arch/arm/mm/proc-arm1020e.S
@@ -11,6 +11,7 @@
*/
#include <linux/linkage.h>
#include <linux/init.h>
+#include <linux/cfi_types.h>
#include <linux/pgtable.h>
#include <asm/assembler.h>
#include <asm/asm-offsets.h>
@@ -56,18 +57,20 @@
/*
* cpu_arm1020e_proc_init()
*/
-ENTRY(cpu_arm1020e_proc_init)
+SYM_TYPED_FUNC_START(cpu_arm1020e_proc_init)
ret lr
+SYM_FUNC_END(cpu_arm1020e_proc_init)
/*
* cpu_arm1020e_proc_fin()
*/
-ENTRY(cpu_arm1020e_proc_fin)
+SYM_TYPED_FUNC_START(cpu_arm1020e_proc_fin)
mrc p15, 0, r0, c1, c0, 0 @ ctrl register
bic r0, r0, #0x1000 @ ...i............
bic r0, r0, #0x000e @ ............wca.
mcr p15, 0, r0, c1, c0, 0 @ disable caches
ret lr
+SYM_FUNC_END(cpu_arm1020e_proc_fin)
/*
* cpu_arm1020e_reset(loc)
@@ -80,7 +83,7 @@ ENTRY(cpu_arm1020e_proc_fin)
*/
.align 5
.pushsection .idmap.text, "ax"
-ENTRY(cpu_arm1020e_reset)
+SYM_TYPED_FUNC_START(cpu_arm1020e_reset)
mov ip, #0
mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches
mcr p15, 0, ip, c7, c10, 4 @ drain WB
@@ -92,16 +95,17 @@ ENTRY(cpu_arm1020e_reset)
bic ip, ip, #0x1100 @ ...i...s........
mcr p15, 0, ip, c1, c0, 0 @ ctrl register
ret r0
-ENDPROC(cpu_arm1020e_reset)
+SYM_FUNC_END(cpu_arm1020e_reset)
.popsection
/*
* cpu_arm1020e_do_idle()
*/
.align 5
-ENTRY(cpu_arm1020e_do_idle)
+SYM_TYPED_FUNC_START(cpu_arm1020e_do_idle)
mcr p15, 0, r0, c7, c0, 4 @ Wait for interrupt
ret lr
+SYM_FUNC_END(cpu_arm1020e_do_idle)
/* ================================= CACHE ================================ */
@@ -112,13 +116,13 @@ ENTRY(cpu_arm1020e_do_idle)
*
* Unconditionally clean and invalidate the entire icache.
*/
-ENTRY(arm1020e_flush_icache_all)
+SYM_TYPED_FUNC_START(arm1020e_flush_icache_all)
#ifndef CONFIG_CPU_ICACHE_DISABLE
mov r0, #0
mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
#endif
ret lr
-ENDPROC(arm1020e_flush_icache_all)
+SYM_FUNC_END(arm1020e_flush_icache_all)
/*
* flush_user_cache_all()
@@ -126,14 +130,14 @@ ENDPROC(arm1020e_flush_icache_all)
* Invalidate all cache entries in a particular address
* space.
*/
-ENTRY(arm1020e_flush_user_cache_all)
- /* FALLTHROUGH */
+SYM_FUNC_ALIAS(arm1020e_flush_user_cache_all, arm1020e_flush_kern_cache_all)
+
/*
* flush_kern_cache_all()
*
* Clean and invalidate the entire cache.
*/
-ENTRY(arm1020e_flush_kern_cache_all)
+SYM_TYPED_FUNC_START(arm1020e_flush_kern_cache_all)
mov r2, #VM_EXEC
mov ip, #0
__flush_whole_cache:
@@ -153,6 +157,7 @@ __flush_whole_cache:
#endif
mcrne p15, 0, ip, c7, c10, 4 @ drain WB
ret lr
+SYM_FUNC_END(arm1020e_flush_kern_cache_all)
/*
* flush_user_cache_range(start, end, flags)
@@ -164,7 +169,7 @@ __flush_whole_cache:
* - end - end address (exclusive)
* - flags - vm_flags for this space
*/
-ENTRY(arm1020e_flush_user_cache_range)
+SYM_TYPED_FUNC_START(arm1020e_flush_user_cache_range)
mov ip, #0
sub r3, r1, r0 @ calculate total size
cmp r3, #CACHE_DLIMIT
@@ -182,6 +187,7 @@ ENTRY(arm1020e_flush_user_cache_range)
#endif
mcrne p15, 0, ip, c7, c10, 4 @ drain WB
ret lr
+SYM_FUNC_END(arm1020e_flush_user_cache_range)
/*
* coherent_kern_range(start, end)
@@ -193,8 +199,12 @@ ENTRY(arm1020e_flush_user_cache_range)
* - start - virtual start address
* - end - virtual end address
*/
-ENTRY(arm1020e_coherent_kern_range)
- /* FALLTHROUGH */
+SYM_TYPED_FUNC_START(arm1020e_coherent_kern_range)
+#ifdef CONFIG_CFI_CLANG /* Fallthrough if !CFI */
+ b arm1020e_coherent_user_range
+#endif
+SYM_FUNC_END(arm1020e_coherent_kern_range)
+
/*
* coherent_user_range(start, end)
*
@@ -205,7 +215,7 @@ ENTRY(arm1020e_coherent_kern_range)
* - start - virtual start address
* - end - virtual end address
*/
-ENTRY(arm1020e_coherent_user_range)
+SYM_TYPED_FUNC_START(arm1020e_coherent_user_range)
mov ip, #0
bic r0, r0, #CACHE_DLINESIZE - 1
1:
@@ -221,6 +231,7 @@ ENTRY(arm1020e_coherent_user_range)
mcr p15, 0, ip, c7, c10, 4 @ drain WB
mov r0, #0
ret lr
+SYM_FUNC_END(arm1020e_coherent_user_range)
/*
* flush_kern_dcache_area(void *addr, size_t size)
@@ -231,7 +242,7 @@ ENTRY(arm1020e_coherent_user_range)
* - addr - kernel address
* - size - region size
*/
-ENTRY(arm1020e_flush_kern_dcache_area)
+SYM_TYPED_FUNC_START(arm1020e_flush_kern_dcache_area)
mov ip, #0
#ifndef CONFIG_CPU_DCACHE_DISABLE
add r1, r0, r1
@@ -242,6 +253,7 @@ ENTRY(arm1020e_flush_kern_dcache_area)
#endif
mcr p15, 0, ip, c7, c10, 4 @ drain WB
ret lr
+SYM_FUNC_END(arm1020e_flush_kern_dcache_area)
/*
* dma_inv_range(start, end)
@@ -302,7 +314,7 @@ arm1020e_dma_clean_range:
* - start - virtual start address
* - end - virtual end address
*/
-ENTRY(arm1020e_dma_flush_range)
+SYM_TYPED_FUNC_START(arm1020e_dma_flush_range)
mov ip, #0
#ifndef CONFIG_CPU_DCACHE_DISABLE
bic r0, r0, #CACHE_DLINESIZE - 1
@@ -313,6 +325,7 @@ ENTRY(arm1020e_dma_flush_range)
#endif
mcr p15, 0, ip, c7, c10, 4 @ drain WB
ret lr
+SYM_FUNC_END(arm1020e_dma_flush_range)
/*
* dma_map_area(start, size, dir)
@@ -320,13 +333,13 @@ ENTRY(arm1020e_dma_flush_range)
* - size - size of region
* - dir - DMA direction
*/
-ENTRY(arm1020e_dma_map_area)
+SYM_TYPED_FUNC_START(arm1020e_dma_map_area)
add r1, r1, r0
cmp r2, #DMA_TO_DEVICE
beq arm1020e_dma_clean_range
bcs arm1020e_dma_inv_range
b arm1020e_dma_flush_range
-ENDPROC(arm1020e_dma_map_area)
+SYM_FUNC_END(arm1020e_dma_map_area)
/*
* dma_unmap_area(start, size, dir)
@@ -334,18 +347,12 @@ ENDPROC(arm1020e_dma_map_area)
* - size - size of region
* - dir - DMA direction
*/
-ENTRY(arm1020e_dma_unmap_area)
+SYM_TYPED_FUNC_START(arm1020e_dma_unmap_area)
ret lr
-ENDPROC(arm1020e_dma_unmap_area)
-
- .globl arm1020e_flush_kern_cache_louis
- .equ arm1020e_flush_kern_cache_louis, arm1020e_flush_kern_cache_all
-
- @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
- define_cache_functions arm1020e
+SYM_FUNC_END(arm1020e_dma_unmap_area)
.align 5
-ENTRY(cpu_arm1020e_dcache_clean_area)
+SYM_TYPED_FUNC_START(cpu_arm1020e_dcache_clean_area)
#ifndef CONFIG_CPU_DCACHE_DISABLE
mov ip, #0
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
@@ -354,6 +361,7 @@ ENTRY(cpu_arm1020e_dcache_clean_area)
bhi 1b
#endif
ret lr
+SYM_FUNC_END(cpu_arm1020e_dcache_clean_area)
/* =============================== PageTable ============================== */
@@ -365,7 +373,7 @@ ENTRY(cpu_arm1020e_dcache_clean_area)
* pgd: new page tables
*/
.align 5
-ENTRY(cpu_arm1020e_switch_mm)
+SYM_TYPED_FUNC_START(cpu_arm1020e_switch_mm)
#ifdef CONFIG_MMU
#ifndef CONFIG_CPU_DCACHE_DISABLE
mcr p15, 0, r3, c7, c10, 4
@@ -392,14 +400,15 @@ ENTRY(cpu_arm1020e_switch_mm)
mcr p15, 0, r1, c8, c7, 0 @ invalidate I & D TLBs
#endif
ret lr
-
+SYM_FUNC_END(cpu_arm1020e_switch_mm)
+
/*
* cpu_arm1020e_set_pte(ptep, pte)
*
* Set a PTE and flush it out
*/
.align 5
-ENTRY(cpu_arm1020e_set_pte_ext)
+SYM_TYPED_FUNC_START(cpu_arm1020e_set_pte_ext)
#ifdef CONFIG_MMU
armv3_set_pte_ext
mov r0, r0
@@ -408,6 +417,7 @@ ENTRY(cpu_arm1020e_set_pte_ext)
#endif
#endif /* CONFIG_MMU */
ret lr
+SYM_FUNC_END(cpu_arm1020e_set_pte_ext)
.type __arm1020e_setup, #function
__arm1020e_setup:
diff --git a/arch/arm/mm/proc-arm1022.S b/arch/arm/mm/proc-arm1022.S
index e89ce467f672..42ed5ed07252 100644
--- a/arch/arm/mm/proc-arm1022.S
+++ b/arch/arm/mm/proc-arm1022.S
@@ -11,6 +11,7 @@
*/
#include <linux/linkage.h>
#include <linux/init.h>
+#include <linux/cfi_types.h>
#include <linux/pgtable.h>
#include <asm/assembler.h>
#include <asm/asm-offsets.h>
@@ -56,18 +57,20 @@
/*
* cpu_arm1022_proc_init()
*/
-ENTRY(cpu_arm1022_proc_init)
+SYM_TYPED_FUNC_START(cpu_arm1022_proc_init)
ret lr
+SYM_FUNC_END(cpu_arm1022_proc_init)
/*
* cpu_arm1022_proc_fin()
*/
-ENTRY(cpu_arm1022_proc_fin)
+SYM_TYPED_FUNC_START(cpu_arm1022_proc_fin)
mrc p15, 0, r0, c1, c0, 0 @ ctrl register
bic r0, r0, #0x1000 @ ...i............
bic r0, r0, #0x000e @ ............wca.
mcr p15, 0, r0, c1, c0, 0 @ disable caches
ret lr
+SYM_FUNC_END(cpu_arm1022_proc_fin)
/*
* cpu_arm1022_reset(loc)
@@ -80,7 +83,7 @@ ENTRY(cpu_arm1022_proc_fin)
*/
.align 5
.pushsection .idmap.text, "ax"
-ENTRY(cpu_arm1022_reset)
+SYM_TYPED_FUNC_START(cpu_arm1022_reset)
mov ip, #0
mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches
mcr p15, 0, ip, c7, c10, 4 @ drain WB
@@ -92,16 +95,17 @@ ENTRY(cpu_arm1022_reset)
bic ip, ip, #0x1100 @ ...i...s........
mcr p15, 0, ip, c1, c0, 0 @ ctrl register
ret r0
-ENDPROC(cpu_arm1022_reset)
+SYM_FUNC_END(cpu_arm1022_reset)
.popsection
/*
* cpu_arm1022_do_idle()
*/
.align 5
-ENTRY(cpu_arm1022_do_idle)
+SYM_TYPED_FUNC_START(cpu_arm1022_do_idle)
mcr p15, 0, r0, c7, c0, 4 @ Wait for interrupt
ret lr
+SYM_FUNC_END(cpu_arm1022_do_idle)
/* ================================= CACHE ================================ */
@@ -112,13 +116,13 @@ ENTRY(cpu_arm1022_do_idle)
*
* Unconditionally clean and invalidate the entire icache.
*/
-ENTRY(arm1022_flush_icache_all)
+SYM_TYPED_FUNC_START(arm1022_flush_icache_all)
#ifndef CONFIG_CPU_ICACHE_DISABLE
mov r0, #0
mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
#endif
ret lr
-ENDPROC(arm1022_flush_icache_all)
+SYM_FUNC_END(arm1022_flush_icache_all)
/*
* flush_user_cache_all()
@@ -126,14 +130,14 @@ ENDPROC(arm1022_flush_icache_all)
* Invalidate all cache entries in a particular address
* space.
*/
-ENTRY(arm1022_flush_user_cache_all)
- /* FALLTHROUGH */
+SYM_FUNC_ALIAS(arm1022_flush_user_cache_all, arm1022_flush_kern_cache_all)
+
/*
* flush_kern_cache_all()
*
* Clean and invalidate the entire cache.
*/
-ENTRY(arm1022_flush_kern_cache_all)
+SYM_TYPED_FUNC_START(arm1022_flush_kern_cache_all)
mov r2, #VM_EXEC
mov ip, #0
__flush_whole_cache:
@@ -152,6 +156,7 @@ __flush_whole_cache:
#endif
mcrne p15, 0, ip, c7, c10, 4 @ drain WB
ret lr
+SYM_FUNC_END(arm1022_flush_kern_cache_all)
/*
* flush_user_cache_range(start, end, flags)
@@ -163,7 +168,7 @@ __flush_whole_cache:
* - end - end address (exclusive)
* - flags - vm_flags for this space
*/
-ENTRY(arm1022_flush_user_cache_range)
+SYM_TYPED_FUNC_START(arm1022_flush_user_cache_range)
mov ip, #0
sub r3, r1, r0 @ calculate total size
cmp r3, #CACHE_DLIMIT
@@ -181,6 +186,7 @@ ENTRY(arm1022_flush_user_cache_range)
#endif
mcrne p15, 0, ip, c7, c10, 4 @ drain WB
ret lr
+SYM_FUNC_END(arm1022_flush_user_cache_range)
/*
* coherent_kern_range(start, end)
@@ -192,8 +198,11 @@ ENTRY(arm1022_flush_user_cache_range)
* - start - virtual start address
* - end - virtual end address
*/
-ENTRY(arm1022_coherent_kern_range)
- /* FALLTHROUGH */
+SYM_TYPED_FUNC_START(arm1022_coherent_kern_range)
+#ifdef CONFIG_CFI_CLANG /* Fallthrough if !CFI */
+ b arm1022_coherent_user_range
+#endif
+SYM_FUNC_END(arm1022_coherent_kern_range)
/*
* coherent_user_range(start, end)
@@ -205,7 +214,7 @@ ENTRY(arm1022_coherent_kern_range)
* - start - virtual start address
* - end - virtual end address
*/
-ENTRY(arm1022_coherent_user_range)
+SYM_TYPED_FUNC_START(arm1022_coherent_user_range)
mov ip, #0
bic r0, r0, #CACHE_DLINESIZE - 1
1:
@@ -221,6 +230,7 @@ ENTRY(arm1022_coherent_user_range)
mcr p15, 0, ip, c7, c10, 4 @ drain WB
mov r0, #0
ret lr
+SYM_FUNC_END(arm1022_coherent_user_range)
/*
* flush_kern_dcache_area(void *addr, size_t size)
@@ -231,7 +241,7 @@ ENTRY(arm1022_coherent_user_range)
* - addr - kernel address
* - size - region size
*/
-ENTRY(arm1022_flush_kern_dcache_area)
+SYM_TYPED_FUNC_START(arm1022_flush_kern_dcache_area)
mov ip, #0
#ifndef CONFIG_CPU_DCACHE_DISABLE
add r1, r0, r1
@@ -242,6 +252,7 @@ ENTRY(arm1022_flush_kern_dcache_area)
#endif
mcr p15, 0, ip, c7, c10, 4 @ drain WB
ret lr
+SYM_FUNC_END(arm1022_flush_kern_dcache_area)
/*
* dma_inv_range(start, end)
@@ -302,7 +313,7 @@ arm1022_dma_clean_range:
* - start - virtual start address
* - end - virtual end address
*/
-ENTRY(arm1022_dma_flush_range)
+SYM_TYPED_FUNC_START(arm1022_dma_flush_range)
mov ip, #0
#ifndef CONFIG_CPU_DCACHE_DISABLE
bic r0, r0, #CACHE_DLINESIZE - 1
@@ -313,6 +324,7 @@ ENTRY(arm1022_dma_flush_range)
#endif
mcr p15, 0, ip, c7, c10, 4 @ drain WB
ret lr
+SYM_FUNC_END(arm1022_dma_flush_range)
/*
* dma_map_area(start, size, dir)
@@ -320,13 +332,13 @@ ENTRY(arm1022_dma_flush_range)
* - size - size of region
* - dir - DMA direction
*/
-ENTRY(arm1022_dma_map_area)
+SYM_TYPED_FUNC_START(arm1022_dma_map_area)
add r1, r1, r0
cmp r2, #DMA_TO_DEVICE
beq arm1022_dma_clean_range
bcs arm1022_dma_inv_range
b arm1022_dma_flush_range
-ENDPROC(arm1022_dma_map_area)
+SYM_FUNC_END(arm1022_dma_map_area)
/*
* dma_unmap_area(start, size, dir)
@@ -334,18 +346,12 @@ ENDPROC(arm1022_dma_map_area)
* - size - size of region
* - dir - DMA direction
*/
-ENTRY(arm1022_dma_unmap_area)
+SYM_TYPED_FUNC_START(arm1022_dma_unmap_area)
ret lr
-ENDPROC(arm1022_dma_unmap_area)
-
- .globl arm1022_flush_kern_cache_louis
- .equ arm1022_flush_kern_cache_louis, arm1022_flush_kern_cache_all
-
- @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
- define_cache_functions arm1022
+SYM_FUNC_END(arm1022_dma_unmap_area)
.align 5
-ENTRY(cpu_arm1022_dcache_clean_area)
+SYM_TYPED_FUNC_START(cpu_arm1022_dcache_clean_area)
#ifndef CONFIG_CPU_DCACHE_DISABLE
mov ip, #0
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
@@ -354,6 +360,7 @@ ENTRY(cpu_arm1022_dcache_clean_area)
bhi 1b
#endif
ret lr
+SYM_FUNC_END(cpu_arm1022_dcache_clean_area)
/* =============================== PageTable ============================== */
@@ -365,7 +372,7 @@ ENTRY(cpu_arm1022_dcache_clean_area)
* pgd: new page tables
*/
.align 5
-ENTRY(cpu_arm1022_switch_mm)
+SYM_TYPED_FUNC_START(cpu_arm1022_switch_mm)
#ifdef CONFIG_MMU
#ifndef CONFIG_CPU_DCACHE_DISABLE
mov r1, #(CACHE_DSEGMENTS - 1) << 5 @ 16 segments
@@ -385,14 +392,15 @@ ENTRY(cpu_arm1022_switch_mm)
mcr p15, 0, r1, c8, c7, 0 @ invalidate I & D TLBs
#endif
ret lr
-
+SYM_FUNC_END(cpu_arm1022_switch_mm)
+
/*
* cpu_arm1022_set_pte_ext(ptep, pte, ext)
*
* Set a PTE and flush it out
*/
.align 5
-ENTRY(cpu_arm1022_set_pte_ext)
+SYM_TYPED_FUNC_START(cpu_arm1022_set_pte_ext)
#ifdef CONFIG_MMU
armv3_set_pte_ext
mov r0, r0
@@ -401,6 +409,7 @@ ENTRY(cpu_arm1022_set_pte_ext)
#endif
#endif /* CONFIG_MMU */
ret lr
+SYM_FUNC_END(cpu_arm1022_set_pte_ext)
.type __arm1022_setup, #function
__arm1022_setup:
diff --git a/arch/arm/mm/proc-arm1026.S b/arch/arm/mm/proc-arm1026.S
index 7fdd1a205e8e..b3ae62cd553a 100644
--- a/arch/arm/mm/proc-arm1026.S
+++ b/arch/arm/mm/proc-arm1026.S
@@ -11,6 +11,7 @@
*/
#include <linux/linkage.h>
#include <linux/init.h>
+#include <linux/cfi_types.h>
#include <linux/pgtable.h>
#include <asm/assembler.h>
#include <asm/asm-offsets.h>
@@ -56,18 +57,20 @@
/*
* cpu_arm1026_proc_init()
*/
-ENTRY(cpu_arm1026_proc_init)
+SYM_TYPED_FUNC_START(cpu_arm1026_proc_init)
ret lr
+SYM_FUNC_END(cpu_arm1026_proc_init)
/*
* cpu_arm1026_proc_fin()
*/
-ENTRY(cpu_arm1026_proc_fin)
+SYM_TYPED_FUNC_START(cpu_arm1026_proc_fin)
mrc p15, 0, r0, c1, c0, 0 @ ctrl register
bic r0, r0, #0x1000 @ ...i............
bic r0, r0, #0x000e @ ............wca.
mcr p15, 0, r0, c1, c0, 0 @ disable caches
ret lr
+SYM_FUNC_END(cpu_arm1026_proc_fin)
/*
* cpu_arm1026_reset(loc)
@@ -80,7 +83,7 @@ ENTRY(cpu_arm1026_proc_fin)
*/
.align 5
.pushsection .idmap.text, "ax"
-ENTRY(cpu_arm1026_reset)
+SYM_TYPED_FUNC_START(cpu_arm1026_reset)
mov ip, #0
mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches
mcr p15, 0, ip, c7, c10, 4 @ drain WB
@@ -92,16 +95,17 @@ ENTRY(cpu_arm1026_reset)
bic ip, ip, #0x1100 @ ...i...s........
mcr p15, 0, ip, c1, c0, 0 @ ctrl register
ret r0
-ENDPROC(cpu_arm1026_reset)
+SYM_FUNC_END(cpu_arm1026_reset)
.popsection
/*
* cpu_arm1026_do_idle()
*/
.align 5
-ENTRY(cpu_arm1026_do_idle)
+SYM_TYPED_FUNC_START(cpu_arm1026_do_idle)
mcr p15, 0, r0, c7, c0, 4 @ Wait for interrupt
ret lr
+SYM_FUNC_END(cpu_arm1026_do_idle)
/* ================================= CACHE ================================ */
@@ -112,13 +116,13 @@ ENTRY(cpu_arm1026_do_idle)
*
* Unconditionally clean and invalidate the entire icache.
*/
-ENTRY(arm1026_flush_icache_all)
+SYM_TYPED_FUNC_START(arm1026_flush_icache_all)
#ifndef CONFIG_CPU_ICACHE_DISABLE
mov r0, #0
mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
#endif
ret lr
-ENDPROC(arm1026_flush_icache_all)
+SYM_FUNC_END(arm1026_flush_icache_all)
/*
* flush_user_cache_all()
@@ -126,14 +130,14 @@ ENDPROC(arm1026_flush_icache_all)
* Invalidate all cache entries in a particular address
* space.
*/
-ENTRY(arm1026_flush_user_cache_all)
- /* FALLTHROUGH */
+SYM_FUNC_ALIAS(arm1026_flush_user_cache_all, arm1026_flush_kern_cache_all)
+
/*
* flush_kern_cache_all()
*
* Clean and invalidate the entire cache.
*/
-ENTRY(arm1026_flush_kern_cache_all)
+SYM_TYPED_FUNC_START(arm1026_flush_kern_cache_all)
mov r2, #VM_EXEC
mov ip, #0
__flush_whole_cache:
@@ -147,6 +151,7 @@ __flush_whole_cache:
#endif
mcrne p15, 0, ip, c7, c10, 4 @ drain WB
ret lr
+SYM_FUNC_END(arm1026_flush_kern_cache_all)
/*
* flush_user_cache_range(start, end, flags)
@@ -158,7 +163,7 @@ __flush_whole_cache:
* - end - end address (exclusive)
* - flags - vm_flags for this space
*/
-ENTRY(arm1026_flush_user_cache_range)
+SYM_TYPED_FUNC_START(arm1026_flush_user_cache_range)
mov ip, #0
sub r3, r1, r0 @ calculate total size
cmp r3, #CACHE_DLIMIT
@@ -176,6 +181,7 @@ ENTRY(arm1026_flush_user_cache_range)
#endif
mcrne p15, 0, ip, c7, c10, 4 @ drain WB
ret lr
+SYM_FUNC_END(arm1026_flush_user_cache_range)
/*
* coherent_kern_range(start, end)
@@ -187,8 +193,12 @@ ENTRY(arm1026_flush_user_cache_range)
* - start - virtual start address
* - end - virtual end address
*/
-ENTRY(arm1026_coherent_kern_range)
- /* FALLTHROUGH */
+SYM_TYPED_FUNC_START(arm1026_coherent_kern_range)
+#ifdef CONFIG_CFI_CLANG /* Fallthrough if !CFI */
+ b arm1026_coherent_user_range
+#endif
+SYM_FUNC_END(arm1026_coherent_kern_range)
+
/*
* coherent_user_range(start, end)
*
@@ -199,7 +209,7 @@ ENTRY(arm1026_coherent_kern_range)
* - start - virtual start address
* - end - virtual end address
*/
-ENTRY(arm1026_coherent_user_range)
+SYM_TYPED_FUNC_START(arm1026_coherent_user_range)
mov ip, #0
bic r0, r0, #CACHE_DLINESIZE - 1
1:
@@ -215,6 +225,7 @@ ENTRY(arm1026_coherent_user_range)
mcr p15, 0, ip, c7, c10, 4 @ drain WB
mov r0, #0
ret lr
+SYM_FUNC_END(arm1026_coherent_user_range)
/*
* flush_kern_dcache_area(void *addr, size_t size)
@@ -225,7 +236,7 @@ ENTRY(arm1026_coherent_user_range)
* - addr - kernel address
* - size - region size
*/
-ENTRY(arm1026_flush_kern_dcache_area)
+SYM_TYPED_FUNC_START(arm1026_flush_kern_dcache_area)
mov ip, #0
#ifndef CONFIG_CPU_DCACHE_DISABLE
add r1, r0, r1
@@ -236,6 +247,7 @@ ENTRY(arm1026_flush_kern_dcache_area)
#endif
mcr p15, 0, ip, c7, c10, 4 @ drain WB
ret lr
+SYM_FUNC_END(arm1026_flush_kern_dcache_area)
/*
* dma_inv_range(start, end)
@@ -296,7 +308,7 @@ arm1026_dma_clean_range:
* - start - virtual start address
* - end - virtual end address
*/
-ENTRY(arm1026_dma_flush_range)
+SYM_TYPED_FUNC_START(arm1026_dma_flush_range)
mov ip, #0
#ifndef CONFIG_CPU_DCACHE_DISABLE
bic r0, r0, #CACHE_DLINESIZE - 1
@@ -307,6 +319,7 @@ ENTRY(arm1026_dma_flush_range)
#endif
mcr p15, 0, ip, c7, c10, 4 @ drain WB
ret lr
+SYM_FUNC_END(arm1026_dma_flush_range)
/*
* dma_map_area(start, size, dir)
@@ -314,13 +327,13 @@ ENTRY(arm1026_dma_flush_range)
* - size - size of region
* - dir - DMA direction
*/
-ENTRY(arm1026_dma_map_area)
+SYM_TYPED_FUNC_START(arm1026_dma_map_area)
add r1, r1, r0
cmp r2, #DMA_TO_DEVICE
beq arm1026_dma_clean_range
bcs arm1026_dma_inv_range
b arm1026_dma_flush_range
-ENDPROC(arm1026_dma_map_area)
+SYM_FUNC_END(arm1026_dma_map_area)
/*
* dma_unmap_area(start, size, dir)
@@ -328,18 +341,12 @@ ENDPROC(arm1026_dma_map_area)
* - size - size of region
* - dir - DMA direction
*/
-ENTRY(arm1026_dma_unmap_area)
+SYM_TYPED_FUNC_START(arm1026_dma_unmap_area)
ret lr
-ENDPROC(arm1026_dma_unmap_area)
-
- .globl arm1026_flush_kern_cache_louis
- .equ arm1026_flush_kern_cache_louis, arm1026_flush_kern_cache_all
-
- @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
- define_cache_functions arm1026
+SYM_FUNC_END(arm1026_dma_unmap_area)
.align 5
-ENTRY(cpu_arm1026_dcache_clean_area)
+SYM_TYPED_FUNC_START(cpu_arm1026_dcache_clean_area)
#ifndef CONFIG_CPU_DCACHE_DISABLE
mov ip, #0
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
@@ -348,6 +355,7 @@ ENTRY(cpu_arm1026_dcache_clean_area)
bhi 1b
#endif
ret lr
+SYM_FUNC_END(cpu_arm1026_dcache_clean_area)
/* =============================== PageTable ============================== */
@@ -359,7 +367,7 @@ ENTRY(cpu_arm1026_dcache_clean_area)
* pgd: new page tables
*/
.align 5
-ENTRY(cpu_arm1026_switch_mm)
+SYM_TYPED_FUNC_START(cpu_arm1026_switch_mm)
#ifdef CONFIG_MMU
mov r1, #0
#ifndef CONFIG_CPU_DCACHE_DISABLE
@@ -374,14 +382,15 @@ ENTRY(cpu_arm1026_switch_mm)
mcr p15, 0, r1, c8, c7, 0 @ invalidate I & D TLBs
#endif
ret lr
-
+SYM_FUNC_END(cpu_arm1026_switch_mm)
+
/*
* cpu_arm1026_set_pte_ext(ptep, pte, ext)
*
* Set a PTE and flush it out
*/
.align 5
-ENTRY(cpu_arm1026_set_pte_ext)
+SYM_TYPED_FUNC_START(cpu_arm1026_set_pte_ext)
#ifdef CONFIG_MMU
armv3_set_pte_ext
mov r0, r0
@@ -390,6 +399,7 @@ ENTRY(cpu_arm1026_set_pte_ext)
#endif
#endif /* CONFIG_MMU */
ret lr
+SYM_FUNC_END(cpu_arm1026_set_pte_ext)
.type __arm1026_setup, #function
__arm1026_setup:
diff --git a/arch/arm/mm/proc-arm720.S b/arch/arm/mm/proc-arm720.S
index 3b687e6dd9fd..59732c334e1d 100644
--- a/arch/arm/mm/proc-arm720.S
+++ b/arch/arm/mm/proc-arm720.S
@@ -20,6 +20,7 @@
*/
#include <linux/linkage.h>
#include <linux/init.h>
+#include <linux/cfi_types.h>
#include <linux/pgtable.h>
#include <asm/assembler.h>
#include <asm/asm-offsets.h>
@@ -35,24 +36,30 @@
*
* Notes : This processor does not require these
*/
-ENTRY(cpu_arm720_dcache_clean_area)
-ENTRY(cpu_arm720_proc_init)
+SYM_TYPED_FUNC_START(cpu_arm720_dcache_clean_area)
ret lr
+SYM_FUNC_END(cpu_arm720_dcache_clean_area)
-ENTRY(cpu_arm720_proc_fin)
+SYM_TYPED_FUNC_START(cpu_arm720_proc_init)
+ ret lr
+SYM_FUNC_END(cpu_arm720_proc_init)
+
+SYM_TYPED_FUNC_START(cpu_arm720_proc_fin)
mrc p15, 0, r0, c1, c0, 0
bic r0, r0, #0x1000 @ ...i............
bic r0, r0, #0x000e @ ............wca.
mcr p15, 0, r0, c1, c0, 0 @ disable caches
ret lr
+SYM_FUNC_END(cpu_arm720_proc_fin)
/*
* Function: arm720_proc_do_idle(void)
* Params : r0 = unused
* Purpose : put the processor in proper idle mode
*/
-ENTRY(cpu_arm720_do_idle)
+SYM_TYPED_FUNC_START(cpu_arm720_do_idle)
ret lr
+SYM_FUNC_END(cpu_arm720_do_idle)
/*
* Function: arm720_switch_mm(unsigned long pgd_phys)
@@ -60,7 +67,7 @@ ENTRY(cpu_arm720_do_idle)
* Purpose : Perform a task switch, saving the old process' state and restoring
* the new.
*/
-ENTRY(cpu_arm720_switch_mm)
+SYM_TYPED_FUNC_START(cpu_arm720_switch_mm)
#ifdef CONFIG_MMU
mov r1, #0
mcr p15, 0, r1, c7, c7, 0 @ invalidate cache
@@ -68,6 +75,7 @@ ENTRY(cpu_arm720_switch_mm)
mcr p15, 0, r1, c8, c7, 0 @ flush TLB (v4)
#endif
ret lr
+SYM_FUNC_END(cpu_arm720_switch_mm)
/*
* Function: arm720_set_pte_ext(pte_t *ptep, pte_t pte, unsigned int ext)
@@ -76,11 +84,12 @@ ENTRY(cpu_arm720_switch_mm)
* Purpose : Set a PTE and flush it out of any WB cache
*/
.align 5
-ENTRY(cpu_arm720_set_pte_ext)
+SYM_TYPED_FUNC_START(cpu_arm720_set_pte_ext)
#ifdef CONFIG_MMU
armv3_set_pte_ext wc_disable=0
#endif
ret lr
+SYM_FUNC_END(cpu_arm720_set_pte_ext)
/*
* Function: arm720_reset
@@ -88,7 +97,7 @@ ENTRY(cpu_arm720_set_pte_ext)
* Notes : This sets up everything for a reset
*/
.pushsection .idmap.text, "ax"
-ENTRY(cpu_arm720_reset)
+SYM_TYPED_FUNC_START(cpu_arm720_reset)
mov ip, #0
mcr p15, 0, ip, c7, c7, 0 @ invalidate cache
#ifdef CONFIG_MMU
@@ -99,7 +108,7 @@ ENTRY(cpu_arm720_reset)
bic ip, ip, #0x2100 @ ..v....s........
mcr p15, 0, ip, c1, c0, 0 @ ctrl register
ret r0
-ENDPROC(cpu_arm720_reset)
+SYM_FUNC_END(cpu_arm720_reset)
.popsection
.type __arm710_setup, #function
diff --git a/arch/arm/mm/proc-arm740.S b/arch/arm/mm/proc-arm740.S
index f2ec3bc60874..78854df63964 100644
--- a/arch/arm/mm/proc-arm740.S
+++ b/arch/arm/mm/proc-arm740.S
@@ -6,6 +6,7 @@
*/
#include <linux/linkage.h>
#include <linux/init.h>
+#include <linux/cfi_types.h>
#include <linux/pgtable.h>
#include <asm/assembler.h>
#include <asm/asm-offsets.h>
@@ -24,21 +25,32 @@
*
* These are not required.
*/
-ENTRY(cpu_arm740_proc_init)
-ENTRY(cpu_arm740_do_idle)
-ENTRY(cpu_arm740_dcache_clean_area)
-ENTRY(cpu_arm740_switch_mm)
+SYM_TYPED_FUNC_START(cpu_arm740_proc_init)
ret lr
+SYM_FUNC_END(cpu_arm740_proc_init)
+
+SYM_TYPED_FUNC_START(cpu_arm740_do_idle)
+ ret lr
+SYM_FUNC_END(cpu_arm740_do_idle)
+
+SYM_TYPED_FUNC_START(cpu_arm740_dcache_clean_area)
+ ret lr
+SYM_FUNC_END(cpu_arm740_dcache_clean_area)
+
+SYM_TYPED_FUNC_START(cpu_arm740_switch_mm)
+ ret lr
+SYM_FUNC_END(cpu_arm740_switch_mm)
/*
* cpu_arm740_proc_fin()
*/
-ENTRY(cpu_arm740_proc_fin)
+SYM_TYPED_FUNC_START(cpu_arm740_proc_fin)
mrc p15, 0, r0, c1, c0, 0
bic r0, r0, #0x3f000000 @ bank/f/lock/s
bic r0, r0, #0x0000000c @ w-buffer/cache
mcr p15, 0, r0, c1, c0, 0 @ disable caches
ret lr
+SYM_FUNC_END(cpu_arm740_proc_fin)
/*
* cpu_arm740_reset(loc)
@@ -46,14 +58,14 @@ ENTRY(cpu_arm740_proc_fin)
* Notes : This sets up everything for a reset
*/
.pushsection .idmap.text, "ax"
-ENTRY(cpu_arm740_reset)
+SYM_TYPED_FUNC_START(cpu_arm740_reset)
mov ip, #0
mcr p15, 0, ip, c7, c0, 0 @ invalidate cache
mrc p15, 0, ip, c1, c0, 0 @ get ctrl register
bic ip, ip, #0x0000000c @ ............wc..
mcr p15, 0, ip, c1, c0, 0 @ ctrl register
ret r0
-ENDPROC(cpu_arm740_reset)
+SYM_FUNC_END(cpu_arm740_reset)
.popsection
.type __arm740_setup, #function
diff --git a/arch/arm/mm/proc-arm7tdmi.S b/arch/arm/mm/proc-arm7tdmi.S
index 01bbe7576c1c..baa3d4472147 100644
--- a/arch/arm/mm/proc-arm7tdmi.S
+++ b/arch/arm/mm/proc-arm7tdmi.S
@@ -6,6 +6,7 @@
*/
#include <linux/linkage.h>
#include <linux/init.h>
+#include <linux/cfi_types.h>
#include <linux/pgtable.h>
#include <asm/assembler.h>
#include <asm/asm-offsets.h>
@@ -23,18 +24,29 @@
* cpu_arm7tdmi_switch_mm()
*
* These are not required.
- */
-ENTRY(cpu_arm7tdmi_proc_init)
-ENTRY(cpu_arm7tdmi_do_idle)
-ENTRY(cpu_arm7tdmi_dcache_clean_area)
-ENTRY(cpu_arm7tdmi_switch_mm)
- ret lr
+*/
+SYM_TYPED_FUNC_START(cpu_arm7tdmi_proc_init)
+ ret lr
+SYM_FUNC_END(cpu_arm7tdmi_proc_init)
+
+SYM_TYPED_FUNC_START(cpu_arm7tdmi_do_idle)
+ ret lr
+SYM_FUNC_END(cpu_arm7tdmi_do_idle)
+
+SYM_TYPED_FUNC_START(cpu_arm7tdmi_dcache_clean_area)
+ ret lr
+SYM_FUNC_END(cpu_arm7tdmi_dcache_clean_area)
+
+SYM_TYPED_FUNC_START(cpu_arm7tdmi_switch_mm)
+ ret lr
+SYM_FUNC_END(cpu_arm7tdmi_switch_mm)
/*
* cpu_arm7tdmi_proc_fin()
- */
-ENTRY(cpu_arm7tdmi_proc_fin)
- ret lr
+*/
+SYM_TYPED_FUNC_START(cpu_arm7tdmi_proc_fin)
+ ret lr
+SYM_FUNC_END(cpu_arm7tdmi_proc_fin)
/*
* Function: cpu_arm7tdmi_reset(loc)
@@ -42,9 +54,9 @@ ENTRY(cpu_arm7tdmi_proc_fin)
* Purpose : Sets up everything for a reset and jump to the location for soft reset.
*/
.pushsection .idmap.text, "ax"
-ENTRY(cpu_arm7tdmi_reset)
+SYM_TYPED_FUNC_START(cpu_arm7tdmi_reset)
ret r0
-ENDPROC(cpu_arm7tdmi_reset)
+SYM_FUNC_END(cpu_arm7tdmi_reset)
.popsection
.type __arm7tdmi_setup, #function
diff --git a/arch/arm/mm/proc-arm920.S b/arch/arm/mm/proc-arm920.S
index a234cd8ba5e6..a30df54ad5fa 100644
--- a/arch/arm/mm/proc-arm920.S
+++ b/arch/arm/mm/proc-arm920.S
@@ -13,6 +13,7 @@
*/
#include <linux/linkage.h>
#include <linux/init.h>
+#include <linux/cfi_types.h>
#include <linux/pgtable.h>
#include <asm/assembler.h>
#include <asm/hwcap.h>
@@ -48,18 +49,20 @@
/*
* cpu_arm920_proc_init()
*/
-ENTRY(cpu_arm920_proc_init)
+SYM_TYPED_FUNC_START(cpu_arm920_proc_init)
ret lr
+SYM_FUNC_END(cpu_arm920_proc_init)
/*
* cpu_arm920_proc_fin()
*/
-ENTRY(cpu_arm920_proc_fin)
+SYM_TYPED_FUNC_START(cpu_arm920_proc_fin)
mrc p15, 0, r0, c1, c0, 0 @ ctrl register
bic r0, r0, #0x1000 @ ...i............
bic r0, r0, #0x000e @ ............wca.
mcr p15, 0, r0, c1, c0, 0 @ disable caches
ret lr
+SYM_FUNC_END(cpu_arm920_proc_fin)
/*
* cpu_arm920_reset(loc)
@@ -72,7 +75,7 @@ ENTRY(cpu_arm920_proc_fin)
*/
.align 5
.pushsection .idmap.text, "ax"
-ENTRY(cpu_arm920_reset)
+SYM_TYPED_FUNC_START(cpu_arm920_reset)
mov ip, #0
mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches
mcr p15, 0, ip, c7, c10, 4 @ drain WB
@@ -84,17 +87,17 @@ ENTRY(cpu_arm920_reset)
bic ip, ip, #0x1100 @ ...i...s........
mcr p15, 0, ip, c1, c0, 0 @ ctrl register
ret r0
-ENDPROC(cpu_arm920_reset)
+SYM_FUNC_END(cpu_arm920_reset)
.popsection
/*
* cpu_arm920_do_idle()
*/
.align 5
-ENTRY(cpu_arm920_do_idle)
+SYM_TYPED_FUNC_START(cpu_arm920_do_idle)
mcr p15, 0, r0, c7, c0, 4 @ Wait for interrupt
ret lr
-
+SYM_FUNC_END(cpu_arm920_do_idle)
#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
@@ -103,11 +106,11 @@ ENTRY(cpu_arm920_do_idle)
*
* Unconditionally clean and invalidate the entire icache.
*/
-ENTRY(arm920_flush_icache_all)
+SYM_TYPED_FUNC_START(arm920_flush_icache_all)
mov r0, #0
mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
ret lr
-ENDPROC(arm920_flush_icache_all)
+SYM_FUNC_END(arm920_flush_icache_all)
/*
* flush_user_cache_all()
@@ -115,15 +118,14 @@ ENDPROC(arm920_flush_icache_all)
* Invalidate all cache entries in a particular address
* space.
*/
-ENTRY(arm920_flush_user_cache_all)
- /* FALLTHROUGH */
+SYM_FUNC_ALIAS(arm920_flush_user_cache_all, arm920_flush_kern_cache_all)
/*
* flush_kern_cache_all()
*
* Clean and invalidate the entire cache.
*/
-ENTRY(arm920_flush_kern_cache_all)
+SYM_TYPED_FUNC_START(arm920_flush_kern_cache_all)
mov r2, #VM_EXEC
mov ip, #0
__flush_whole_cache:
@@ -138,6 +140,7 @@ __flush_whole_cache:
mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache
mcrne p15, 0, ip, c7, c10, 4 @ drain WB
ret lr
+SYM_FUNC_END(arm920_flush_kern_cache_all)
/*
* flush_user_cache_range(start, end, flags)
@@ -149,7 +152,7 @@ __flush_whole_cache:
* - end - end address (exclusive)
* - flags - vm_flags for address space
*/
-ENTRY(arm920_flush_user_cache_range)
+SYM_TYPED_FUNC_START(arm920_flush_user_cache_range)
mov ip, #0
sub r3, r1, r0 @ calculate total size
cmp r3, #CACHE_DLIMIT
@@ -164,6 +167,7 @@ ENTRY(arm920_flush_user_cache_range)
tst r2, #VM_EXEC
mcrne p15, 0, ip, c7, c10, 4 @ drain WB
ret lr
+SYM_FUNC_END(arm920_flush_user_cache_range)
/*
* coherent_kern_range(start, end)
@@ -175,8 +179,11 @@ ENTRY(arm920_flush_user_cache_range)
* - start - virtual start address
* - end - virtual end address
*/
-ENTRY(arm920_coherent_kern_range)
- /* FALLTHROUGH */
+SYM_TYPED_FUNC_START(arm920_coherent_kern_range)
+#ifdef CONFIG_CFI_CLANG /* Fallthrough if !CFI */
+ b arm920_coherent_user_range
+#endif
+SYM_FUNC_END(arm920_coherent_kern_range)
/*
* coherent_user_range(start, end)
@@ -188,7 +195,7 @@ ENTRY(arm920_coherent_kern_range)
* - start - virtual start address
* - end - virtual end address
*/
-ENTRY(arm920_coherent_user_range)
+SYM_TYPED_FUNC_START(arm920_coherent_user_range)
bic r0, r0, #CACHE_DLINESIZE - 1
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
mcr p15, 0, r0, c7, c5, 1 @ invalidate I entry
@@ -198,6 +205,7 @@ ENTRY(arm920_coherent_user_range)
mcr p15, 0, r0, c7, c10, 4 @ drain WB
mov r0, #0
ret lr
+SYM_FUNC_END(arm920_coherent_user_range)
/*
* flush_kern_dcache_area(void *addr, size_t size)
@@ -208,7 +216,7 @@ ENTRY(arm920_coherent_user_range)
* - addr - kernel address
* - size - region size
*/
-ENTRY(arm920_flush_kern_dcache_area)
+SYM_TYPED_FUNC_START(arm920_flush_kern_dcache_area)
add r1, r0, r1
1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry
add r0, r0, #CACHE_DLINESIZE
@@ -218,6 +226,7 @@ ENTRY(arm920_flush_kern_dcache_area)
mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
mcr p15, 0, r0, c7, c10, 4 @ drain WB
ret lr
+SYM_FUNC_END(arm920_flush_kern_dcache_area)
/*
* dma_inv_range(start, end)
@@ -272,7 +281,7 @@ arm920_dma_clean_range:
* - start - virtual start address
* - end - virtual end address
*/
-ENTRY(arm920_dma_flush_range)
+SYM_TYPED_FUNC_START(arm920_dma_flush_range)
bic r0, r0, #CACHE_DLINESIZE - 1
1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry
add r0, r0, #CACHE_DLINESIZE
@@ -280,6 +289,7 @@ ENTRY(arm920_dma_flush_range)
blo 1b
mcr p15, 0, r0, c7, c10, 4 @ drain WB
ret lr
+SYM_FUNC_END(arm920_dma_flush_range)
/*
* dma_map_area(start, size, dir)
@@ -287,13 +297,13 @@ ENTRY(arm920_dma_flush_range)
* - size - size of region
* - dir - DMA direction
*/
-ENTRY(arm920_dma_map_area)
+SYM_TYPED_FUNC_START(arm920_dma_map_area)
add r1, r1, r0
cmp r2, #DMA_TO_DEVICE
beq arm920_dma_clean_range
bcs arm920_dma_inv_range
b arm920_dma_flush_range
-ENDPROC(arm920_dma_map_area)
+SYM_FUNC_END(arm920_dma_map_area)
/*
* dma_unmap_area(start, size, dir)
@@ -301,24 +311,20 @@ ENDPROC(arm920_dma_map_area)
* - size - size of region
* - dir - DMA direction
*/
-ENTRY(arm920_dma_unmap_area)
+SYM_TYPED_FUNC_START(arm920_dma_unmap_area)
ret lr
-ENDPROC(arm920_dma_unmap_area)
+SYM_FUNC_END(arm920_dma_unmap_area)
- .globl arm920_flush_kern_cache_louis
- .equ arm920_flush_kern_cache_louis, arm920_flush_kern_cache_all
-
- @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
- define_cache_functions arm920
-#endif
+#endif /* !CONFIG_CPU_DCACHE_WRITETHROUGH */
-ENTRY(cpu_arm920_dcache_clean_area)
+SYM_TYPED_FUNC_START(cpu_arm920_dcache_clean_area)
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
add r0, r0, #CACHE_DLINESIZE
subs r1, r1, #CACHE_DLINESIZE
bhi 1b
ret lr
+SYM_FUNC_END(cpu_arm920_dcache_clean_area)
/* =============================== PageTable ============================== */
@@ -330,7 +336,7 @@ ENTRY(cpu_arm920_dcache_clean_area)
* pgd: new page tables
*/
.align 5
-ENTRY(cpu_arm920_switch_mm)
+SYM_TYPED_FUNC_START(cpu_arm920_switch_mm)
#ifdef CONFIG_MMU
mov ip, #0
#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
@@ -354,6 +360,7 @@ ENTRY(cpu_arm920_switch_mm)
mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs
#endif
ret lr
+SYM_FUNC_END(cpu_arm920_switch_mm)
/*
* cpu_arm920_set_pte(ptep, pte, ext)
@@ -361,7 +368,7 @@ ENTRY(cpu_arm920_switch_mm)
* Set a PTE and flush it out
*/
.align 5
-ENTRY(cpu_arm920_set_pte_ext)
+SYM_TYPED_FUNC_START(cpu_arm920_set_pte_ext)
#ifdef CONFIG_MMU
armv3_set_pte_ext
mov r0, r0
@@ -369,21 +376,22 @@ ENTRY(cpu_arm920_set_pte_ext)
mcr p15, 0, r0, c7, c10, 4 @ drain WB
#endif
ret lr
+SYM_FUNC_END(cpu_arm920_set_pte_ext)
/* Suspend/resume support: taken from arch/arm/plat-s3c24xx/sleep.S */
.globl cpu_arm920_suspend_size
.equ cpu_arm920_suspend_size, 4 * 3
#ifdef CONFIG_ARM_CPU_SUSPEND
-ENTRY(cpu_arm920_do_suspend)
+SYM_TYPED_FUNC_START(cpu_arm920_do_suspend)
stmfd sp!, {r4 - r6, lr}
mrc p15, 0, r4, c13, c0, 0 @ PID
mrc p15, 0, r5, c3, c0, 0 @ Domain ID
mrc p15, 0, r6, c1, c0, 0 @ Control register
stmia r0, {r4 - r6}
ldmfd sp!, {r4 - r6, pc}
-ENDPROC(cpu_arm920_do_suspend)
+SYM_FUNC_END(cpu_arm920_do_suspend)
-ENTRY(cpu_arm920_do_resume)
+SYM_TYPED_FUNC_START(cpu_arm920_do_resume)
mov ip, #0
mcr p15, 0, ip, c8, c7, 0 @ invalidate I+D TLBs
mcr p15, 0, ip, c7, c7, 0 @ invalidate I+D caches
@@ -393,7 +401,7 @@ ENTRY(cpu_arm920_do_resume)
mcr p15, 0, r1, c2, c0, 0 @ TTB address
mov r0, r6 @ control register
b cpu_resume_mmu
-ENDPROC(cpu_arm920_do_resume)
+SYM_FUNC_END(cpu_arm920_do_resume)
#endif
.type __arm920_setup, #function
diff --git a/arch/arm/mm/proc-arm922.S b/arch/arm/mm/proc-arm922.S
index 53c029dcfd83..aac4e048100d 100644
--- a/arch/arm/mm/proc-arm922.S
+++ b/arch/arm/mm/proc-arm922.S
@@ -14,6 +14,7 @@
*/
#include <linux/linkage.h>
#include <linux/init.h>
+#include <linux/cfi_types.h>
#include <linux/pgtable.h>
#include <asm/assembler.h>
#include <asm/hwcap.h>
@@ -50,18 +51,20 @@
/*
* cpu_arm922_proc_init()
*/
-ENTRY(cpu_arm922_proc_init)
+SYM_TYPED_FUNC_START(cpu_arm922_proc_init)
ret lr
+SYM_FUNC_END(cpu_arm922_proc_init)
/*
* cpu_arm922_proc_fin()
*/
-ENTRY(cpu_arm922_proc_fin)
+SYM_TYPED_FUNC_START(cpu_arm922_proc_fin)
mrc p15, 0, r0, c1, c0, 0 @ ctrl register
bic r0, r0, #0x1000 @ ...i............
bic r0, r0, #0x000e @ ............wca.
mcr p15, 0, r0, c1, c0, 0 @ disable caches
ret lr
+SYM_FUNC_END(cpu_arm922_proc_fin)
/*
* cpu_arm922_reset(loc)
@@ -74,7 +77,7 @@ ENTRY(cpu_arm922_proc_fin)
*/
.align 5
.pushsection .idmap.text, "ax"
-ENTRY(cpu_arm922_reset)
+SYM_TYPED_FUNC_START(cpu_arm922_reset)
mov ip, #0
mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches
mcr p15, 0, ip, c7, c10, 4 @ drain WB
@@ -86,17 +89,17 @@ ENTRY(cpu_arm922_reset)
bic ip, ip, #0x1100 @ ...i...s........
mcr p15, 0, ip, c1, c0, 0 @ ctrl register
ret r0
-ENDPROC(cpu_arm922_reset)
+SYM_FUNC_END(cpu_arm922_reset)
.popsection
/*
* cpu_arm922_do_idle()
*/
.align 5
-ENTRY(cpu_arm922_do_idle)
+SYM_TYPED_FUNC_START(cpu_arm922_do_idle)
mcr p15, 0, r0, c7, c0, 4 @ Wait for interrupt
ret lr
-
+SYM_FUNC_END(cpu_arm922_do_idle)
#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
@@ -105,11 +108,11 @@ ENTRY(cpu_arm922_do_idle)
*
* Unconditionally clean and invalidate the entire icache.
*/
-ENTRY(arm922_flush_icache_all)
+SYM_TYPED_FUNC_START(arm922_flush_icache_all)
mov r0, #0
mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
ret lr
-ENDPROC(arm922_flush_icache_all)
+SYM_FUNC_END(arm922_flush_icache_all)
/*
* flush_user_cache_all()
@@ -117,15 +120,14 @@ ENDPROC(arm922_flush_icache_all)
* Clean and invalidate all cache entries in a particular
* address space.
*/
-ENTRY(arm922_flush_user_cache_all)
- /* FALLTHROUGH */
+SYM_FUNC_ALIAS(arm922_flush_user_cache_all, arm922_flush_kern_cache_all)
/*
* flush_kern_cache_all()
*
* Clean and invalidate the entire cache.
*/
-ENTRY(arm922_flush_kern_cache_all)
+SYM_TYPED_FUNC_START(arm922_flush_kern_cache_all)
mov r2, #VM_EXEC
mov ip, #0
__flush_whole_cache:
@@ -140,6 +142,7 @@ __flush_whole_cache:
mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache
mcrne p15, 0, ip, c7, c10, 4 @ drain WB
ret lr
+SYM_FUNC_END(arm922_flush_kern_cache_all)
/*
* flush_user_cache_range(start, end, flags)
@@ -151,7 +154,7 @@ __flush_whole_cache:
* - end - end address (exclusive)
* - flags - vm_flags describing address space
*/
-ENTRY(arm922_flush_user_cache_range)
+SYM_TYPED_FUNC_START(arm922_flush_user_cache_range)
mov ip, #0
sub r3, r1, r0 @ calculate total size
cmp r3, #CACHE_DLIMIT
@@ -166,6 +169,7 @@ ENTRY(arm922_flush_user_cache_range)
tst r2, #VM_EXEC
mcrne p15, 0, ip, c7, c10, 4 @ drain WB
ret lr
+SYM_FUNC_END(arm922_flush_user_cache_range)
/*
* coherent_kern_range(start, end)
@@ -177,8 +181,11 @@ ENTRY(arm922_flush_user_cache_range)
* - start - virtual start address
* - end - virtual end address
*/
-ENTRY(arm922_coherent_kern_range)
- /* FALLTHROUGH */
+SYM_TYPED_FUNC_START(arm922_coherent_kern_range)
+#ifdef CONFIG_CFI_CLANG /* Fallthrough if !CFI */
+ b arm922_coherent_user_range
+#endif
+SYM_FUNC_END(arm922_coherent_kern_range)
/*
* coherent_user_range(start, end)
@@ -190,7 +197,7 @@ ENTRY(arm922_coherent_kern_range)
* - start - virtual start address
* - end - virtual end address
*/
-ENTRY(arm922_coherent_user_range)
+SYM_TYPED_FUNC_START(arm922_coherent_user_range)
bic r0, r0, #CACHE_DLINESIZE - 1
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
mcr p15, 0, r0, c7, c5, 1 @ invalidate I entry
@@ -200,6 +207,7 @@ ENTRY(arm922_coherent_user_range)
mcr p15, 0, r0, c7, c10, 4 @ drain WB
mov r0, #0
ret lr
+SYM_FUNC_END(arm922_coherent_user_range)
/*
* flush_kern_dcache_area(void *addr, size_t size)
@@ -210,7 +218,7 @@ ENTRY(arm922_coherent_user_range)
* - addr - kernel address
* - size - region size
*/
-ENTRY(arm922_flush_kern_dcache_area)
+SYM_TYPED_FUNC_START(arm922_flush_kern_dcache_area)
add r1, r0, r1
1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry
add r0, r0, #CACHE_DLINESIZE
@@ -220,6 +228,7 @@ ENTRY(arm922_flush_kern_dcache_area)
mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
mcr p15, 0, r0, c7, c10, 4 @ drain WB
ret lr
+SYM_FUNC_END(arm922_flush_kern_dcache_area)
/*
* dma_inv_range(start, end)
@@ -274,7 +283,7 @@ arm922_dma_clean_range:
* - start - virtual start address
* - end - virtual end address
*/
-ENTRY(arm922_dma_flush_range)
+SYM_TYPED_FUNC_START(arm922_dma_flush_range)
bic r0, r0, #CACHE_DLINESIZE - 1
1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry
add r0, r0, #CACHE_DLINESIZE
@@ -282,6 +291,7 @@ ENTRY(arm922_dma_flush_range)
blo 1b
mcr p15, 0, r0, c7, c10, 4 @ drain WB
ret lr
+SYM_FUNC_END(arm922_dma_flush_range)
/*
* dma_map_area(start, size, dir)
@@ -289,13 +299,13 @@ ENTRY(arm922_dma_flush_range)
* - size - size of region
* - dir - DMA direction
*/
-ENTRY(arm922_dma_map_area)
+SYM_TYPED_FUNC_START(arm922_dma_map_area)
add r1, r1, r0
cmp r2, #DMA_TO_DEVICE
beq arm922_dma_clean_range
bcs arm922_dma_inv_range
b arm922_dma_flush_range
-ENDPROC(arm922_dma_map_area)
+SYM_FUNC_END(arm922_dma_map_area)
/*
* dma_unmap_area(start, size, dir)
@@ -303,19 +313,13 @@ ENDPROC(arm922_dma_map_area)
* - size - size of region
* - dir - DMA direction
*/
-ENTRY(arm922_dma_unmap_area)
+SYM_TYPED_FUNC_START(arm922_dma_unmap_area)
ret lr
-ENDPROC(arm922_dma_unmap_area)
-
- .globl arm922_flush_kern_cache_louis
- .equ arm922_flush_kern_cache_louis, arm922_flush_kern_cache_all
-
- @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
- define_cache_functions arm922
-#endif
+SYM_FUNC_END(arm922_dma_unmap_area)
+#endif /* !CONFIG_CPU_DCACHE_WRITETHROUGH */
-ENTRY(cpu_arm922_dcache_clean_area)
+SYM_TYPED_FUNC_START(cpu_arm922_dcache_clean_area)
#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
add r0, r0, #CACHE_DLINESIZE
@@ -323,6 +327,7 @@ ENTRY(cpu_arm922_dcache_clean_area)
bhi 1b
#endif
ret lr
+SYM_FUNC_END(cpu_arm922_dcache_clean_area)
/* =============================== PageTable ============================== */
@@ -334,7 +339,7 @@ ENTRY(cpu_arm922_dcache_clean_area)
* pgd: new page tables
*/
.align 5
-ENTRY(cpu_arm922_switch_mm)
+SYM_TYPED_FUNC_START(cpu_arm922_switch_mm)
#ifdef CONFIG_MMU
mov ip, #0
#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
@@ -358,6 +363,7 @@ ENTRY(cpu_arm922_switch_mm)
mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs
#endif
ret lr
+SYM_FUNC_END(cpu_arm922_switch_mm)
/*
* cpu_arm922_set_pte_ext(ptep, pte, ext)
@@ -365,7 +371,7 @@ ENTRY(cpu_arm922_switch_mm)
* Set a PTE and flush it out
*/
.align 5
-ENTRY(cpu_arm922_set_pte_ext)
+SYM_TYPED_FUNC_START(cpu_arm922_set_pte_ext)
#ifdef CONFIG_MMU
armv3_set_pte_ext
mov r0, r0
@@ -373,6 +379,7 @@ ENTRY(cpu_arm922_set_pte_ext)
mcr p15, 0, r0, c7, c10, 4 @ drain WB
#endif /* CONFIG_MMU */
ret lr
+SYM_FUNC_END(cpu_arm922_set_pte_ext)
.type __arm922_setup, #function
__arm922_setup:
diff --git a/arch/arm/mm/proc-arm925.S b/arch/arm/mm/proc-arm925.S
index 0bfad62ea858..035941faeb2e 100644
--- a/arch/arm/mm/proc-arm925.S
+++ b/arch/arm/mm/proc-arm925.S
@@ -37,6 +37,7 @@
#include <linux/linkage.h>
#include <linux/init.h>
+#include <linux/cfi_types.h>
#include <linux/pgtable.h>
#include <asm/assembler.h>
#include <asm/hwcap.h>
@@ -71,18 +72,20 @@
/*
* cpu_arm925_proc_init()
*/
-ENTRY(cpu_arm925_proc_init)
+SYM_TYPED_FUNC_START(cpu_arm925_proc_init)
ret lr
+SYM_FUNC_END(cpu_arm925_proc_init)
/*
* cpu_arm925_proc_fin()
*/
-ENTRY(cpu_arm925_proc_fin)
+SYM_TYPED_FUNC_START(cpu_arm925_proc_fin)
mrc p15, 0, r0, c1, c0, 0 @ ctrl register
bic r0, r0, #0x1000 @ ...i............
bic r0, r0, #0x000e @ ............wca.
mcr p15, 0, r0, c1, c0, 0 @ disable caches
ret lr
+SYM_FUNC_END(cpu_arm925_proc_fin)
/*
* cpu_arm925_reset(loc)
@@ -95,14 +98,14 @@ ENTRY(cpu_arm925_proc_fin)
*/
.align 5
.pushsection .idmap.text, "ax"
-ENTRY(cpu_arm925_reset)
+SYM_TYPED_FUNC_START(cpu_arm925_reset)
/* Send software reset to MPU and DSP */
mov ip, #0xff000000
orr ip, ip, #0x00fe0000
orr ip, ip, #0x0000ce00
mov r4, #1
strh r4, [ip, #0x10]
-ENDPROC(cpu_arm925_reset)
+SYM_FUNC_END(cpu_arm925_reset)
.popsection
mov ip, #0
@@ -123,7 +126,7 @@ ENDPROC(cpu_arm925_reset)
* Called with IRQs disabled
*/
.align 10
-ENTRY(cpu_arm925_do_idle)
+SYM_TYPED_FUNC_START(cpu_arm925_do_idle)
mov r0, #0
mrc p15, 0, r1, c1, c0, 0 @ Read control register
mcr p15, 0, r0, c7, c10, 4 @ Drain write buffer
@@ -132,17 +135,18 @@ ENTRY(cpu_arm925_do_idle)
mcr p15, 0, r0, c7, c0, 4 @ Wait for interrupt
mcr p15, 0, r1, c1, c0, 0 @ Restore ICache enable
ret lr
+SYM_FUNC_END(cpu_arm925_do_idle)
/*
* flush_icache_all()
*
* Unconditionally clean and invalidate the entire icache.
*/
-ENTRY(arm925_flush_icache_all)
+SYM_TYPED_FUNC_START(arm925_flush_icache_all)
mov r0, #0
mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
ret lr
-ENDPROC(arm925_flush_icache_all)
+SYM_FUNC_END(arm925_flush_icache_all)
/*
* flush_user_cache_all()
@@ -150,15 +154,14 @@ ENDPROC(arm925_flush_icache_all)
* Clean and invalidate all cache entries in a particular
* address space.
*/
-ENTRY(arm925_flush_user_cache_all)
- /* FALLTHROUGH */
+SYM_FUNC_ALIAS(arm925_flush_user_cache_all, arm925_flush_kern_cache_all)
/*
* flush_kern_cache_all()
*
* Clean and invalidate the entire cache.
*/
-ENTRY(arm925_flush_kern_cache_all)
+SYM_TYPED_FUNC_START(arm925_flush_kern_cache_all)
mov r2, #VM_EXEC
mov ip, #0
__flush_whole_cache:
@@ -175,6 +178,7 @@ __flush_whole_cache:
mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache
mcrne p15, 0, ip, c7, c10, 4 @ drain WB
ret lr
+SYM_FUNC_END(arm925_flush_kern_cache_all)
/*
* flush_user_cache_range(start, end, flags)
@@ -186,7 +190,7 @@ __flush_whole_cache:
* - end - end address (exclusive)
* - flags - vm_flags describing address space
*/
-ENTRY(arm925_flush_user_cache_range)
+SYM_TYPED_FUNC_START(arm925_flush_user_cache_range)
mov ip, #0
sub r3, r1, r0 @ calculate total size
cmp r3, #CACHE_DLIMIT
@@ -212,6 +216,7 @@ ENTRY(arm925_flush_user_cache_range)
tst r2, #VM_EXEC
mcrne p15, 0, ip, c7, c10, 4 @ drain WB
ret lr
+SYM_FUNC_END(arm925_flush_user_cache_range)
/*
* coherent_kern_range(start, end)
@@ -223,8 +228,11 @@ ENTRY(arm925_flush_user_cache_range)
* - start - virtual start address
* - end - virtual end address
*/
-ENTRY(arm925_coherent_kern_range)
- /* FALLTHROUGH */
+SYM_TYPED_FUNC_START(arm925_coherent_kern_range)
+#ifdef CONFIG_CFI_CLANG /* Fallthrough if !CFI */
+ b arm925_coherent_user_range
+#endif
+SYM_FUNC_END(arm925_coherent_kern_range)
/*
* coherent_user_range(start, end)
@@ -236,7 +244,7 @@ ENTRY(arm925_coherent_kern_range)
* - start - virtual start address
* - end - virtual end address
*/
-ENTRY(arm925_coherent_user_range)
+SYM_TYPED_FUNC_START(arm925_coherent_user_range)
bic r0, r0, #CACHE_DLINESIZE - 1
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
mcr p15, 0, r0, c7, c5, 1 @ invalidate I entry
@@ -246,6 +254,7 @@ ENTRY(arm925_coherent_user_range)
mcr p15, 0, r0, c7, c10, 4 @ drain WB
mov r0, #0
ret lr
+SYM_FUNC_END(arm925_coherent_user_range)
/*
* flush_kern_dcache_area(void *addr, size_t size)
@@ -256,7 +265,7 @@ ENTRY(arm925_coherent_user_range)
* - addr - kernel address
* - size - region size
*/
-ENTRY(arm925_flush_kern_dcache_area)
+SYM_TYPED_FUNC_START(arm925_flush_kern_dcache_area)
add r1, r0, r1
1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry
add r0, r0, #CACHE_DLINESIZE
@@ -266,6 +275,7 @@ ENTRY(arm925_flush_kern_dcache_area)
mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
mcr p15, 0, r0, c7, c10, 4 @ drain WB
ret lr
+SYM_FUNC_END(arm925_flush_kern_dcache_area)
/*
* dma_inv_range(start, end)
@@ -324,7 +334,7 @@ arm925_dma_clean_range:
* - start - virtual start address
* - end - virtual end address
*/
-ENTRY(arm925_dma_flush_range)
+SYM_TYPED_FUNC_START(arm925_dma_flush_range)
bic r0, r0, #CACHE_DLINESIZE - 1
1:
#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
@@ -337,6 +347,7 @@ ENTRY(arm925_dma_flush_range)
blo 1b
mcr p15, 0, r0, c7, c10, 4 @ drain WB
ret lr
+SYM_FUNC_END(arm925_dma_flush_range)
/*
* dma_map_area(start, size, dir)
@@ -344,13 +355,13 @@ ENTRY(arm925_dma_flush_range)
* - size - size of region
* - dir - DMA direction
*/
-ENTRY(arm925_dma_map_area)
+SYM_TYPED_FUNC_START(arm925_dma_map_area)
add r1, r1, r0
cmp r2, #DMA_TO_DEVICE
beq arm925_dma_clean_range
bcs arm925_dma_inv_range
b arm925_dma_flush_range
-ENDPROC(arm925_dma_map_area)
+SYM_FUNC_END(arm925_dma_map_area)
/*
* dma_unmap_area(start, size, dir)
@@ -358,17 +369,11 @@ ENDPROC(arm925_dma_map_area)
* - size - size of region
* - dir - DMA direction
*/
-ENTRY(arm925_dma_unmap_area)
+SYM_TYPED_FUNC_START(arm925_dma_unmap_area)
ret lr
-ENDPROC(arm925_dma_unmap_area)
-
- .globl arm925_flush_kern_cache_louis
- .equ arm925_flush_kern_cache_louis, arm925_flush_kern_cache_all
-
- @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
- define_cache_functions arm925
+SYM_FUNC_END(arm925_dma_unmap_area)
-ENTRY(cpu_arm925_dcache_clean_area)
+SYM_TYPED_FUNC_START(cpu_arm925_dcache_clean_area)
#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
add r0, r0, #CACHE_DLINESIZE
@@ -377,6 +382,7 @@ ENTRY(cpu_arm925_dcache_clean_area)
#endif
mcr p15, 0, r0, c7, c10, 4 @ drain WB
ret lr
+SYM_FUNC_END(cpu_arm925_dcache_clean_area)
/* =============================== PageTable ============================== */
@@ -388,7 +394,7 @@ ENTRY(cpu_arm925_dcache_clean_area)
* pgd: new page tables
*/
.align 5
-ENTRY(cpu_arm925_switch_mm)
+SYM_TYPED_FUNC_START(cpu_arm925_switch_mm)
#ifdef CONFIG_MMU
mov ip, #0
#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
@@ -406,6 +412,7 @@ ENTRY(cpu_arm925_switch_mm)
mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs
#endif
ret lr
+SYM_FUNC_END(cpu_arm925_switch_mm)
/*
* cpu_arm925_set_pte_ext(ptep, pte, ext)
@@ -413,7 +420,7 @@ ENTRY(cpu_arm925_switch_mm)
* Set a PTE and flush it out
*/
.align 5
-ENTRY(cpu_arm925_set_pte_ext)
+SYM_TYPED_FUNC_START(cpu_arm925_set_pte_ext)
#ifdef CONFIG_MMU
armv3_set_pte_ext
mov r0, r0
@@ -423,6 +430,7 @@ ENTRY(cpu_arm925_set_pte_ext)
mcr p15, 0, r0, c7, c10, 4 @ drain WB
#endif /* CONFIG_MMU */
ret lr
+SYM_FUNC_END(cpu_arm925_set_pte_ext)
.type __arm925_setup, #function
__arm925_setup:
diff --git a/arch/arm/mm/proc-arm926.S b/arch/arm/mm/proc-arm926.S
index 0487a2c3439b..6f43d6af2d9a 100644
--- a/arch/arm/mm/proc-arm926.S
+++ b/arch/arm/mm/proc-arm926.S
@@ -13,6 +13,7 @@
*/
#include <linux/linkage.h>
#include <linux/init.h>
+#include <linux/cfi_types.h>
#include <linux/pgtable.h>
#include <asm/assembler.h>
#include <asm/hwcap.h>
@@ -40,18 +41,20 @@
/*
* cpu_arm926_proc_init()
*/
-ENTRY(cpu_arm926_proc_init)
+SYM_TYPED_FUNC_START(cpu_arm926_proc_init)
ret lr
+SYM_FUNC_END(cpu_arm926_proc_init)
/*
* cpu_arm926_proc_fin()
*/
-ENTRY(cpu_arm926_proc_fin)
+SYM_TYPED_FUNC_START(cpu_arm926_proc_fin)
mrc p15, 0, r0, c1, c0, 0 @ ctrl register
bic r0, r0, #0x1000 @ ...i............
bic r0, r0, #0x000e @ ............wca.
mcr p15, 0, r0, c1, c0, 0 @ disable caches
ret lr
+SYM_FUNC_END(cpu_arm926_proc_fin)
/*
* cpu_arm926_reset(loc)
@@ -64,7 +67,7 @@ ENTRY(cpu_arm926_proc_fin)
*/
.align 5
.pushsection .idmap.text, "ax"
-ENTRY(cpu_arm926_reset)
+SYM_TYPED_FUNC_START(cpu_arm926_reset)
mov ip, #0
mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches
mcr p15, 0, ip, c7, c10, 4 @ drain WB
@@ -76,7 +79,7 @@ ENTRY(cpu_arm926_reset)
bic ip, ip, #0x1100 @ ...i...s........
mcr p15, 0, ip, c1, c0, 0 @ ctrl register
ret r0
-ENDPROC(cpu_arm926_reset)
+SYM_FUNC_END(cpu_arm926_reset)
.popsection
/*
@@ -85,7 +88,7 @@ ENDPROC(cpu_arm926_reset)
* Called with IRQs disabled
*/
.align 10
-ENTRY(cpu_arm926_do_idle)
+SYM_TYPED_FUNC_START(cpu_arm926_do_idle)
mov r0, #0
mrc p15, 0, r1, c1, c0, 0 @ Read control register
mcr p15, 0, r0, c7, c10, 4 @ Drain write buffer
@@ -98,17 +101,18 @@ ENTRY(cpu_arm926_do_idle)
mcr p15, 0, r1, c1, c0, 0 @ Restore ICache enable
msr cpsr_c, r3 @ Restore FIQ state
ret lr
+SYM_FUNC_END(cpu_arm926_do_idle)
/*
* flush_icache_all()
*
* Unconditionally clean and invalidate the entire icache.
*/
-ENTRY(arm926_flush_icache_all)
+SYM_TYPED_FUNC_START(arm926_flush_icache_all)
mov r0, #0
mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
ret lr
-ENDPROC(arm926_flush_icache_all)
+SYM_FUNC_END(arm926_flush_icache_all)
/*
* flush_user_cache_all()
@@ -116,15 +120,14 @@ ENDPROC(arm926_flush_icache_all)
* Clean and invalidate all cache entries in a particular
* address space.
*/
-ENTRY(arm926_flush_user_cache_all)
- /* FALLTHROUGH */
+SYM_FUNC_ALIAS(arm926_flush_user_cache_all, arm926_flush_kern_cache_all)
/*
* flush_kern_cache_all()
*
* Clean and invalidate the entire cache.
*/
-ENTRY(arm926_flush_kern_cache_all)
+SYM_TYPED_FUNC_START(arm926_flush_kern_cache_all)
mov r2, #VM_EXEC
mov ip, #0
__flush_whole_cache:
@@ -138,6 +141,7 @@ __flush_whole_cache:
mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache
mcrne p15, 0, ip, c7, c10, 4 @ drain WB
ret lr
+SYM_FUNC_END(arm926_flush_kern_cache_all)
/*
* flush_user_cache_range(start, end, flags)
@@ -149,7 +153,7 @@ __flush_whole_cache:
* - end - end address (exclusive)
* - flags - vm_flags describing address space
*/
-ENTRY(arm926_flush_user_cache_range)
+SYM_TYPED_FUNC_START(arm926_flush_user_cache_range)
mov ip, #0
sub r3, r1, r0 @ calculate total size
cmp r3, #CACHE_DLIMIT
@@ -175,6 +179,7 @@ ENTRY(arm926_flush_user_cache_range)
tst r2, #VM_EXEC
mcrne p15, 0, ip, c7, c10, 4 @ drain WB
ret lr
+SYM_FUNC_END(arm926_flush_user_cache_range)
/*
* coherent_kern_range(start, end)
@@ -186,8 +191,11 @@ ENTRY(arm926_flush_user_cache_range)
* - start - virtual start address
* - end - virtual end address
*/
-ENTRY(arm926_coherent_kern_range)
- /* FALLTHROUGH */
+SYM_TYPED_FUNC_START(arm926_coherent_kern_range)
+#ifdef CONFIG_CFI_CLANG /* Fallthrough if !CFI */
+ b arm926_coherent_user_range
+#endif
+SYM_FUNC_END(arm926_coherent_kern_range)
/*
* coherent_user_range(start, end)
@@ -199,7 +207,7 @@ ENTRY(arm926_coherent_kern_range)
* - start - virtual start address
* - end - virtual end address
*/
-ENTRY(arm926_coherent_user_range)
+SYM_TYPED_FUNC_START(arm926_coherent_user_range)
bic r0, r0, #CACHE_DLINESIZE - 1
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
mcr p15, 0, r0, c7, c5, 1 @ invalidate I entry
@@ -209,6 +217,7 @@ ENTRY(arm926_coherent_user_range)
mcr p15, 0, r0, c7, c10, 4 @ drain WB
mov r0, #0
ret lr
+SYM_FUNC_END(arm926_coherent_user_range)
/*
* flush_kern_dcache_area(void *addr, size_t size)
@@ -219,7 +228,7 @@ ENTRY(arm926_coherent_user_range)
* - addr - kernel address
* - size - region size
*/
-ENTRY(arm926_flush_kern_dcache_area)
+SYM_TYPED_FUNC_START(arm926_flush_kern_dcache_area)
add r1, r0, r1
1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry
add r0, r0, #CACHE_DLINESIZE
@@ -229,6 +238,7 @@ ENTRY(arm926_flush_kern_dcache_area)
mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
mcr p15, 0, r0, c7, c10, 4 @ drain WB
ret lr
+SYM_FUNC_END(arm926_flush_kern_dcache_area)
/*
* dma_inv_range(start, end)
@@ -287,7 +297,7 @@ arm926_dma_clean_range:
* - start - virtual start address
* - end - virtual end address
*/
-ENTRY(arm926_dma_flush_range)
+SYM_TYPED_FUNC_START(arm926_dma_flush_range)
bic r0, r0, #CACHE_DLINESIZE - 1
1:
#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
@@ -300,6 +310,7 @@ ENTRY(arm926_dma_flush_range)
blo 1b
mcr p15, 0, r0, c7, c10, 4 @ drain WB
ret lr
+SYM_FUNC_END(arm926_dma_flush_range)
/*
* dma_map_area(start, size, dir)
@@ -307,13 +318,13 @@ ENTRY(arm926_dma_flush_range)
* - size - size of region
* - dir - DMA direction
*/
-ENTRY(arm926_dma_map_area)
+SYM_TYPED_FUNC_START(arm926_dma_map_area)
add r1, r1, r0
cmp r2, #DMA_TO_DEVICE
beq arm926_dma_clean_range
bcs arm926_dma_inv_range
b arm926_dma_flush_range
-ENDPROC(arm926_dma_map_area)
+SYM_FUNC_END(arm926_dma_map_area)
/*
* dma_unmap_area(start, size, dir)
@@ -321,17 +332,11 @@ ENDPROC(arm926_dma_map_area)
* - size - size of region
* - dir - DMA direction
*/
-ENTRY(arm926_dma_unmap_area)
+SYM_TYPED_FUNC_START(arm926_dma_unmap_area)
ret lr
-ENDPROC(arm926_dma_unmap_area)
-
- .globl arm926_flush_kern_cache_louis
- .equ arm926_flush_kern_cache_louis, arm926_flush_kern_cache_all
+SYM_FUNC_END(arm926_dma_unmap_area)
- @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
- define_cache_functions arm926
-
-ENTRY(cpu_arm926_dcache_clean_area)
+SYM_TYPED_FUNC_START(cpu_arm926_dcache_clean_area)
#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
add r0, r0, #CACHE_DLINESIZE
@@ -340,6 +345,7 @@ ENTRY(cpu_arm926_dcache_clean_area)
#endif
mcr p15, 0, r0, c7, c10, 4 @ drain WB
ret lr
+SYM_FUNC_END(cpu_arm926_dcache_clean_area)
/* =============================== PageTable ============================== */
@@ -351,7 +357,8 @@ ENTRY(cpu_arm926_dcache_clean_area)
* pgd: new page tables
*/
.align 5
-ENTRY(cpu_arm926_switch_mm)
+
+SYM_TYPED_FUNC_START(cpu_arm926_switch_mm)
#ifdef CONFIG_MMU
mov ip, #0
#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
@@ -367,6 +374,7 @@ ENTRY(cpu_arm926_switch_mm)
mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs
#endif
ret lr
+SYM_FUNC_END(cpu_arm926_switch_mm)
/*
* cpu_arm926_set_pte_ext(ptep, pte, ext)
@@ -374,7 +382,7 @@ ENTRY(cpu_arm926_switch_mm)
* Set a PTE and flush it out
*/
.align 5
-ENTRY(cpu_arm926_set_pte_ext)
+SYM_TYPED_FUNC_START(cpu_arm926_set_pte_ext)
#ifdef CONFIG_MMU
armv3_set_pte_ext
mov r0, r0
@@ -384,21 +392,22 @@ ENTRY(cpu_arm926_set_pte_ext)
mcr p15, 0, r0, c7, c10, 4 @ drain WB
#endif
ret lr
+SYM_FUNC_END(cpu_arm926_set_pte_ext)
/* Suspend/resume support: taken from arch/arm/plat-s3c24xx/sleep.S */
.globl cpu_arm926_suspend_size
.equ cpu_arm926_suspend_size, 4 * 3
#ifdef CONFIG_ARM_CPU_SUSPEND
-ENTRY(cpu_arm926_do_suspend)
+SYM_TYPED_FUNC_START(cpu_arm926_do_suspend)
stmfd sp!, {r4 - r6, lr}
mrc p15, 0, r4, c13, c0, 0 @ PID
mrc p15, 0, r5, c3, c0, 0 @ Domain ID
mrc p15, 0, r6, c1, c0, 0 @ Control register
stmia r0, {r4 - r6}
ldmfd sp!, {r4 - r6, pc}
-ENDPROC(cpu_arm926_do_suspend)
+SYM_FUNC_END(cpu_arm926_do_suspend)
-ENTRY(cpu_arm926_do_resume)
+SYM_TYPED_FUNC_START(cpu_arm926_do_resume)
mov ip, #0
mcr p15, 0, ip, c8, c7, 0 @ invalidate I+D TLBs
mcr p15, 0, ip, c7, c7, 0 @ invalidate I+D caches
@@ -408,7 +417,7 @@ ENTRY(cpu_arm926_do_resume)
mcr p15, 0, r1, c2, c0, 0 @ TTB address
mov r0, r6 @ control register
b cpu_resume_mmu
-ENDPROC(cpu_arm926_do_resume)
+SYM_FUNC_END(cpu_arm926_do_resume)
#endif
.type __arm926_setup, #function
diff --git a/arch/arm/mm/proc-arm940.S b/arch/arm/mm/proc-arm940.S
index cf9bfcc825ca..0d30bb25c42b 100644
--- a/arch/arm/mm/proc-arm940.S
+++ b/arch/arm/mm/proc-arm940.S
@@ -6,6 +6,7 @@
*/
#include <linux/linkage.h>
#include <linux/init.h>
+#include <linux/cfi_types.h>
#include <linux/pgtable.h>
#include <asm/assembler.h>
#include <asm/hwcap.h>
@@ -25,19 +26,24 @@
*
* These are not required.
*/
-ENTRY(cpu_arm940_proc_init)
-ENTRY(cpu_arm940_switch_mm)
+SYM_TYPED_FUNC_START(cpu_arm940_proc_init)
ret lr
+SYM_FUNC_END(cpu_arm940_proc_init)
+
+SYM_TYPED_FUNC_START(cpu_arm940_switch_mm)
+ ret lr
+SYM_FUNC_END(cpu_arm940_switch_mm)
/*
* cpu_arm940_proc_fin()
*/
-ENTRY(cpu_arm940_proc_fin)
+SYM_TYPED_FUNC_START(cpu_arm940_proc_fin)
mrc p15, 0, r0, c1, c0, 0 @ ctrl register
bic r0, r0, #0x00001000 @ i-cache
bic r0, r0, #0x00000004 @ d-cache
mcr p15, 0, r0, c1, c0, 0 @ disable caches
ret lr
+SYM_FUNC_END(cpu_arm940_proc_fin)
/*
* cpu_arm940_reset(loc)
@@ -45,7 +51,7 @@ ENTRY(cpu_arm940_proc_fin)
* Notes : This sets up everything for a reset
*/
.pushsection .idmap.text, "ax"
-ENTRY(cpu_arm940_reset)
+SYM_TYPED_FUNC_START(cpu_arm940_reset)
mov ip, #0
mcr p15, 0, ip, c7, c5, 0 @ flush I cache
mcr p15, 0, ip, c7, c6, 0 @ flush D cache
@@ -55,42 +61,43 @@ ENTRY(cpu_arm940_reset)
bic ip, ip, #0x00001000 @ i-cache
mcr p15, 0, ip, c1, c0, 0 @ ctrl register
ret r0
-ENDPROC(cpu_arm940_reset)
+SYM_FUNC_END(cpu_arm940_reset)
.popsection
/*
* cpu_arm940_do_idle()
*/
.align 5
-ENTRY(cpu_arm940_do_idle)
+SYM_TYPED_FUNC_START(cpu_arm940_do_idle)
mcr p15, 0, r0, c7, c0, 4 @ Wait for interrupt
ret lr
+SYM_FUNC_END(cpu_arm940_do_idle)
/*
* flush_icache_all()
*
* Unconditionally clean and invalidate the entire icache.
*/
-ENTRY(arm940_flush_icache_all)
+SYM_TYPED_FUNC_START(arm940_flush_icache_all)
mov r0, #0
mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
ret lr
-ENDPROC(arm940_flush_icache_all)
+SYM_FUNC_END(arm940_flush_icache_all)
/*
* flush_user_cache_all()
*/
-ENTRY(arm940_flush_user_cache_all)
- /* FALLTHROUGH */
+SYM_FUNC_ALIAS(arm940_flush_user_cache_all, arm940_flush_kern_cache_all)
/*
* flush_kern_cache_all()
*
* Clean and invalidate the entire cache.
*/
-ENTRY(arm940_flush_kern_cache_all)
+SYM_TYPED_FUNC_START(arm940_flush_kern_cache_all)
mov r2, #VM_EXEC
- /* FALLTHROUGH */
+ b arm940_flush_user_cache_range
+SYM_FUNC_END(arm940_flush_kern_cache_all)
/*
* flush_user_cache_range(start, end, flags)
@@ -102,7 +109,7 @@ ENTRY(arm940_flush_kern_cache_all)
* - end - end address (exclusive)
* - flags - vm_flags describing address space
*/
-ENTRY(arm940_flush_user_cache_range)
+SYM_TYPED_FUNC_START(arm940_flush_user_cache_range)
mov ip, #0
#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
mcr p15, 0, ip, c7, c6, 0 @ flush D cache
@@ -119,6 +126,7 @@ ENTRY(arm940_flush_user_cache_range)
mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache
mcrne p15, 0, ip, c7, c10, 4 @ drain WB
ret lr
+SYM_FUNC_END(arm940_flush_user_cache_range)
/*
* coherent_kern_range(start, end)
@@ -130,8 +138,9 @@ ENTRY(arm940_flush_user_cache_range)
* - start - virtual start address
* - end - virtual end address
*/
-ENTRY(arm940_coherent_kern_range)
- /* FALLTHROUGH */
+SYM_TYPED_FUNC_START(arm940_coherent_kern_range)
+ b arm940_flush_kern_dcache_area
+SYM_FUNC_END(arm940_coherent_kern_range)
/*
* coherent_user_range(start, end)
@@ -143,8 +152,11 @@ ENTRY(arm940_coherent_kern_range)
* - start - virtual start address
* - end - virtual end address
*/
-ENTRY(arm940_coherent_user_range)
- /* FALLTHROUGH */
+SYM_TYPED_FUNC_START(arm940_coherent_user_range)
+#ifdef CONFIG_CFI_CLANG /* Fallthrough if !CFI */
+ b arm940_flush_kern_dcache_area
+#endif
+SYM_FUNC_END(arm940_coherent_user_range)
/*
* flush_kern_dcache_area(void *addr, size_t size)
@@ -155,7 +167,7 @@ ENTRY(arm940_coherent_user_range)
* - addr - kernel address
* - size - region size
*/
-ENTRY(arm940_flush_kern_dcache_area)
+SYM_TYPED_FUNC_START(arm940_flush_kern_dcache_area)
mov r0, #0
mov r1, #(CACHE_DSEGMENTS - 1) << 4 @ 4 segments
1: orr r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries
@@ -167,6 +179,7 @@ ENTRY(arm940_flush_kern_dcache_area)
mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
mcr p15, 0, r0, c7, c10, 4 @ drain WB
ret lr
+SYM_FUNC_END(arm940_flush_kern_dcache_area)
/*
* dma_inv_range(start, end)
@@ -199,7 +212,7 @@ arm940_dma_inv_range:
* - end - virtual end address
*/
arm940_dma_clean_range:
-ENTRY(cpu_arm940_dcache_clean_area)
+SYM_TYPED_FUNC_START(cpu_arm940_dcache_clean_area)
mov ip, #0
#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
mov r1, #(CACHE_DSEGMENTS - 1) << 4 @ 4 segments
@@ -212,6 +225,7 @@ ENTRY(cpu_arm940_dcache_clean_area)
#endif
mcr p15, 0, ip, c7, c10, 4 @ drain WB
ret lr
+SYM_FUNC_END(cpu_arm940_dcache_clean_area)
/*
* dma_flush_range(start, end)
@@ -222,7 +236,7 @@ ENTRY(cpu_arm940_dcache_clean_area)
* - start - virtual start address
* - end - virtual end address
*/
-ENTRY(arm940_dma_flush_range)
+SYM_TYPED_FUNC_START(arm940_dma_flush_range)
mov ip, #0
mov r1, #(CACHE_DSEGMENTS - 1) << 4 @ 4 segments
1: orr r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries
@@ -238,6 +252,7 @@ ENTRY(arm940_dma_flush_range)
bcs 1b @ segments 7 to 0
mcr p15, 0, ip, c7, c10, 4 @ drain WB
ret lr
+SYM_FUNC_END(arm940_dma_flush_range)
/*
* dma_map_area(start, size, dir)
@@ -245,13 +260,13 @@ ENTRY(arm940_dma_flush_range)
* - size - size of region
* - dir - DMA direction
*/
-ENTRY(arm940_dma_map_area)
+SYM_TYPED_FUNC_START(arm940_dma_map_area)
add r1, r1, r0
cmp r2, #DMA_TO_DEVICE
beq arm940_dma_clean_range
bcs arm940_dma_inv_range
b arm940_dma_flush_range
-ENDPROC(arm940_dma_map_area)
+SYM_FUNC_END(arm940_dma_map_area)
/*
* dma_unmap_area(start, size, dir)
@@ -259,15 +274,9 @@ ENDPROC(arm940_dma_map_area)
* - size - size of region
* - dir - DMA direction
*/
-ENTRY(arm940_dma_unmap_area)
+SYM_TYPED_FUNC_START(arm940_dma_unmap_area)
ret lr
-ENDPROC(arm940_dma_unmap_area)
-
- .globl arm940_flush_kern_cache_louis
- .equ arm940_flush_kern_cache_louis, arm940_flush_kern_cache_all
-
- @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
- define_cache_functions arm940
+SYM_FUNC_END(arm940_dma_unmap_area)
.type __arm940_setup, #function
__arm940_setup:
diff --git a/arch/arm/mm/proc-arm946.S b/arch/arm/mm/proc-arm946.S
index 6fb3898ad1cd..27750ace2ced 100644
--- a/arch/arm/mm/proc-arm946.S
+++ b/arch/arm/mm/proc-arm946.S
@@ -8,6 +8,7 @@
*/
#include <linux/linkage.h>
#include <linux/init.h>
+#include <linux/cfi_types.h>
#include <linux/pgtable.h>
#include <asm/assembler.h>
#include <asm/hwcap.h>
@@ -32,19 +33,24 @@
*
* These are not required.
*/
-ENTRY(cpu_arm946_proc_init)
-ENTRY(cpu_arm946_switch_mm)
+SYM_TYPED_FUNC_START(cpu_arm946_proc_init)
ret lr
+SYM_FUNC_END(cpu_arm946_proc_init)
+
+SYM_TYPED_FUNC_START(cpu_arm946_switch_mm)
+ ret lr
+SYM_FUNC_END(cpu_arm946_switch_mm)
/*
* cpu_arm946_proc_fin()
*/
-ENTRY(cpu_arm946_proc_fin)
+SYM_TYPED_FUNC_START(cpu_arm946_proc_fin)
mrc p15, 0, r0, c1, c0, 0 @ ctrl register
bic r0, r0, #0x00001000 @ i-cache
bic r0, r0, #0x00000004 @ d-cache
mcr p15, 0, r0, c1, c0, 0 @ disable caches
ret lr
+SYM_FUNC_END(cpu_arm946_proc_fin)
/*
* cpu_arm946_reset(loc)
@@ -52,7 +58,7 @@ ENTRY(cpu_arm946_proc_fin)
* Notes : This sets up everything for a reset
*/
.pushsection .idmap.text, "ax"
-ENTRY(cpu_arm946_reset)
+SYM_TYPED_FUNC_START(cpu_arm946_reset)
mov ip, #0
mcr p15, 0, ip, c7, c5, 0 @ flush I cache
mcr p15, 0, ip, c7, c6, 0 @ flush D cache
@@ -62,40 +68,40 @@ ENTRY(cpu_arm946_reset)
bic ip, ip, #0x00001000 @ i-cache
mcr p15, 0, ip, c1, c0, 0 @ ctrl register
ret r0
-ENDPROC(cpu_arm946_reset)
+SYM_FUNC_END(cpu_arm946_reset)
.popsection
/*
* cpu_arm946_do_idle()
*/
.align 5
-ENTRY(cpu_arm946_do_idle)
+SYM_TYPED_FUNC_START(cpu_arm946_do_idle)
mcr p15, 0, r0, c7, c0, 4 @ Wait for interrupt
ret lr
+SYM_FUNC_END(cpu_arm946_do_idle)
/*
* flush_icache_all()
*
* Unconditionally clean and invalidate the entire icache.
*/
-ENTRY(arm946_flush_icache_all)
+SYM_TYPED_FUNC_START(arm946_flush_icache_all)
mov r0, #0
mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
ret lr
-ENDPROC(arm946_flush_icache_all)
+SYM_FUNC_END(arm946_flush_icache_all)
/*
* flush_user_cache_all()
*/
-ENTRY(arm946_flush_user_cache_all)
- /* FALLTHROUGH */
+SYM_FUNC_ALIAS(arm946_flush_user_cache_all, arm946_flush_kern_cache_all)
/*
* flush_kern_cache_all()
*
* Clean and invalidate the entire cache.
*/
-ENTRY(arm946_flush_kern_cache_all)
+SYM_TYPED_FUNC_START(arm946_flush_kern_cache_all)
mov r2, #VM_EXEC
mov ip, #0
__flush_whole_cache:
@@ -114,6 +120,7 @@ __flush_whole_cache:
mcrne p15, 0, ip, c7, c5, 0 @ flush I cache
mcrne p15, 0, ip, c7, c10, 4 @ drain WB
ret lr
+SYM_FUNC_END(arm946_flush_kern_cache_all)
/*
* flush_user_cache_range(start, end, flags)
@@ -126,7 +133,7 @@ __flush_whole_cache:
* - flags - vm_flags describing address space
* (same as arm926)
*/
-ENTRY(arm946_flush_user_cache_range)
+SYM_TYPED_FUNC_START(arm946_flush_user_cache_range)
mov ip, #0
sub r3, r1, r0 @ calculate total size
cmp r3, #CACHE_DLIMIT
@@ -153,6 +160,7 @@ ENTRY(arm946_flush_user_cache_range)
tst r2, #VM_EXEC
mcrne p15, 0, ip, c7, c10, 4 @ drain WB
ret lr
+SYM_FUNC_END(arm946_flush_user_cache_range)
/*
* coherent_kern_range(start, end)
@@ -164,8 +172,11 @@ ENTRY(arm946_flush_user_cache_range)
* - start - virtual start address
* - end - virtual end address
*/
-ENTRY(arm946_coherent_kern_range)
- /* FALLTHROUGH */
+SYM_TYPED_FUNC_START(arm946_coherent_kern_range)
+#ifdef CONFIG_CFI_CLANG /* Fallthrough if !CFI */
+ b arm946_coherent_user_range
+#endif
+SYM_FUNC_END(arm946_coherent_kern_range)
/*
* coherent_user_range(start, end)
@@ -178,7 +189,7 @@ ENTRY(arm946_coherent_kern_range)
* - end - virtual end address
* (same as arm926)
*/
-ENTRY(arm946_coherent_user_range)
+SYM_TYPED_FUNC_START(arm946_coherent_user_range)
bic r0, r0, #CACHE_DLINESIZE - 1
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
mcr p15, 0, r0, c7, c5, 1 @ invalidate I entry
@@ -188,6 +199,7 @@ ENTRY(arm946_coherent_user_range)
mcr p15, 0, r0, c7, c10, 4 @ drain WB
mov r0, #0
ret lr
+SYM_FUNC_END(arm946_coherent_user_range)
/*
* flush_kern_dcache_area(void *addr, size_t size)
@@ -199,7 +211,7 @@ ENTRY(arm946_coherent_user_range)
* - size - region size
* (same as arm926)
*/
-ENTRY(arm946_flush_kern_dcache_area)
+SYM_TYPED_FUNC_START(arm946_flush_kern_dcache_area)
add r1, r0, r1
1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry
add r0, r0, #CACHE_DLINESIZE
@@ -209,6 +221,7 @@ ENTRY(arm946_flush_kern_dcache_area)
mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
mcr p15, 0, r0, c7, c10, 4 @ drain WB
ret lr
+SYM_FUNC_END(arm946_flush_kern_dcache_area)
/*
* dma_inv_range(start, end)
@@ -268,7 +281,7 @@ arm946_dma_clean_range:
*
* (same as arm926)
*/
-ENTRY(arm946_dma_flush_range)
+SYM_TYPED_FUNC_START(arm946_dma_flush_range)
bic r0, r0, #CACHE_DLINESIZE - 1
1:
#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
@@ -281,6 +294,7 @@ ENTRY(arm946_dma_flush_range)
blo 1b
mcr p15, 0, r0, c7, c10, 4 @ drain WB
ret lr
+SYM_FUNC_END(arm946_dma_flush_range)
/*
* dma_map_area(start, size, dir)
@@ -288,13 +302,13 @@ ENTRY(arm946_dma_flush_range)
* - size - size of region
* - dir - DMA direction
*/
-ENTRY(arm946_dma_map_area)
+SYM_TYPED_FUNC_START(arm946_dma_map_area)
add r1, r1, r0
cmp r2, #DMA_TO_DEVICE
beq arm946_dma_clean_range
bcs arm946_dma_inv_range
b arm946_dma_flush_range
-ENDPROC(arm946_dma_map_area)
+SYM_FUNC_END(arm946_dma_map_area)
/*
* dma_unmap_area(start, size, dir)
@@ -302,17 +316,11 @@ ENDPROC(arm946_dma_map_area)
* - size - size of region
* - dir - DMA direction
*/
-ENTRY(arm946_dma_unmap_area)
+SYM_TYPED_FUNC_START(arm946_dma_unmap_area)
ret lr
-ENDPROC(arm946_dma_unmap_area)
-
- .globl arm946_flush_kern_cache_louis
- .equ arm946_flush_kern_cache_louis, arm946_flush_kern_cache_all
-
- @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
- define_cache_functions arm946
+SYM_FUNC_END(arm946_dma_unmap_area)
-ENTRY(cpu_arm946_dcache_clean_area)
+SYM_TYPED_FUNC_START(cpu_arm946_dcache_clean_area)
#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
add r0, r0, #CACHE_DLINESIZE
@@ -321,6 +329,7 @@ ENTRY(cpu_arm946_dcache_clean_area)
#endif
mcr p15, 0, r0, c7, c10, 4 @ drain WB
ret lr
+SYM_FUNC_END(cpu_arm946_dcache_clean_area)
.type __arm946_setup, #function
__arm946_setup:
diff --git a/arch/arm/mm/proc-arm9tdmi.S b/arch/arm/mm/proc-arm9tdmi.S
index a054c0e9c034..c480a8400eff 100644
--- a/arch/arm/mm/proc-arm9tdmi.S
+++ b/arch/arm/mm/proc-arm9tdmi.S
@@ -6,6 +6,7 @@
*/
#include <linux/linkage.h>
#include <linux/init.h>
+#include <linux/cfi_types.h>
#include <linux/pgtable.h>
#include <asm/assembler.h>
#include <asm/asm-offsets.h>
@@ -24,17 +25,28 @@
*
* These are not required.
*/
-ENTRY(cpu_arm9tdmi_proc_init)
-ENTRY(cpu_arm9tdmi_do_idle)
-ENTRY(cpu_arm9tdmi_dcache_clean_area)
-ENTRY(cpu_arm9tdmi_switch_mm)
+SYM_TYPED_FUNC_START(cpu_arm9tdmi_proc_init)
ret lr
+SYM_FUNC_END(cpu_arm9tdmi_proc_init)
+
+SYM_TYPED_FUNC_START(cpu_arm9tdmi_do_idle)
+ ret lr
+SYM_FUNC_END(cpu_arm9tdmi_do_idle)
+
+SYM_TYPED_FUNC_START(cpu_arm9tdmi_dcache_clean_area)
+ ret lr
+SYM_FUNC_END(cpu_arm9tdmi_dcache_clean_area)
+
+SYM_TYPED_FUNC_START(cpu_arm9tdmi_switch_mm)
+ ret lr
+SYM_FUNC_END(cpu_arm9tdmi_switch_mm)
/*
* cpu_arm9tdmi_proc_fin()
*/
-ENTRY(cpu_arm9tdmi_proc_fin)
+SYM_TYPED_FUNC_START(cpu_arm9tdmi_proc_fin)
ret lr
+SYM_FUNC_END(cpu_arm9tdmi_proc_fin)
/*
* Function: cpu_arm9tdmi_reset(loc)
@@ -42,9 +54,9 @@ ENTRY(cpu_arm9tdmi_proc_fin)
* Purpose : Sets up everything for a reset and jump to the location for soft reset.
*/
.pushsection .idmap.text, "ax"
-ENTRY(cpu_arm9tdmi_reset)
+SYM_TYPED_FUNC_START(cpu_arm9tdmi_reset)
ret r0
-ENDPROC(cpu_arm9tdmi_reset)
+SYM_FUNC_END(cpu_arm9tdmi_reset)
.popsection
.type __arm9tdmi_setup, #function
diff --git a/arch/arm/mm/proc-fa526.S b/arch/arm/mm/proc-fa526.S
index 2c73e0d47d08..7c16ccac8a05 100644
--- a/arch/arm/mm/proc-fa526.S
+++ b/arch/arm/mm/proc-fa526.S
@@ -11,6 +11,7 @@
*/
#include <linux/linkage.h>
#include <linux/init.h>
+#include <linux/cfi_types.h>
#include <linux/pgtable.h>
#include <asm/assembler.h>
#include <asm/hwcap.h>
@@ -26,13 +27,14 @@
/*
* cpu_fa526_proc_init()
*/
-ENTRY(cpu_fa526_proc_init)
+SYM_TYPED_FUNC_START(cpu_fa526_proc_init)
ret lr
+SYM_FUNC_END(cpu_fa526_proc_init)
/*
* cpu_fa526_proc_fin()
*/
-ENTRY(cpu_fa526_proc_fin)
+SYM_TYPED_FUNC_START(cpu_fa526_proc_fin)
mrc p15, 0, r0, c1, c0, 0 @ ctrl register
bic r0, r0, #0x1000 @ ...i............
bic r0, r0, #0x000e @ ............wca.
@@ -40,6 +42,7 @@ ENTRY(cpu_fa526_proc_fin)
nop
nop
ret lr
+SYM_FUNC_END(cpu_fa526_proc_fin)
/*
* cpu_fa526_reset(loc)
@@ -52,7 +55,7 @@ ENTRY(cpu_fa526_proc_fin)
*/
.align 4
.pushsection .idmap.text, "ax"
-ENTRY(cpu_fa526_reset)
+SYM_TYPED_FUNC_START(cpu_fa526_reset)
/* TODO: Use CP8 if possible... */
mov ip, #0
mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches
@@ -68,24 +71,25 @@ ENTRY(cpu_fa526_reset)
nop
nop
ret r0
-ENDPROC(cpu_fa526_reset)
+SYM_FUNC_END(cpu_fa526_reset)
.popsection
/*
* cpu_fa526_do_idle()
*/
.align 4
-ENTRY(cpu_fa526_do_idle)
+SYM_TYPED_FUNC_START(cpu_fa526_do_idle)
ret lr
+SYM_FUNC_END(cpu_fa526_do_idle)
-
-ENTRY(cpu_fa526_dcache_clean_area)
+SYM_TYPED_FUNC_START(cpu_fa526_dcache_clean_area)
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
add r0, r0, #CACHE_DLINESIZE
subs r1, r1, #CACHE_DLINESIZE
bhi 1b
mcr p15, 0, r0, c7, c10, 4 @ drain WB
ret lr
+SYM_FUNC_END(cpu_fa526_dcache_clean_area)
/* =============================== PageTable ============================== */
@@ -97,7 +101,7 @@ ENTRY(cpu_fa526_dcache_clean_area)
* pgd: new page tables
*/
.align 4
-ENTRY(cpu_fa526_switch_mm)
+SYM_TYPED_FUNC_START(cpu_fa526_switch_mm)
#ifdef CONFIG_MMU
mov ip, #0
#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
@@ -113,6 +117,7 @@ ENTRY(cpu_fa526_switch_mm)
mcr p15, 0, ip, c8, c7, 0 @ invalidate UTLB
#endif
ret lr
+SYM_FUNC_END(cpu_fa526_switch_mm)
/*
* cpu_fa526_set_pte_ext(ptep, pte, ext)
@@ -120,7 +125,7 @@ ENTRY(cpu_fa526_switch_mm)
* Set a PTE and flush it out
*/
.align 4
-ENTRY(cpu_fa526_set_pte_ext)
+SYM_TYPED_FUNC_START(cpu_fa526_set_pte_ext)
#ifdef CONFIG_MMU
armv3_set_pte_ext
mov r0, r0
@@ -129,6 +134,7 @@ ENTRY(cpu_fa526_set_pte_ext)
mcr p15, 0, r0, c7, c10, 4 @ drain WB
#endif
ret lr
+SYM_FUNC_END(cpu_fa526_set_pte_ext)
.type __fa526_setup, #function
__fa526_setup:
diff --git a/arch/arm/mm/proc-feroceon.S b/arch/arm/mm/proc-feroceon.S
index 072ff9b451f8..f67b2ffac854 100644
--- a/arch/arm/mm/proc-feroceon.S
+++ b/arch/arm/mm/proc-feroceon.S
@@ -8,6 +8,7 @@
#include <linux/linkage.h>
#include <linux/init.h>
+#include <linux/cfi_types.h>
#include <linux/pgtable.h>
#include <asm/assembler.h>
#include <asm/hwcap.h>
@@ -43,7 +44,7 @@ __cache_params:
/*
* cpu_feroceon_proc_init()
*/
-ENTRY(cpu_feroceon_proc_init)
+SYM_TYPED_FUNC_START(cpu_feroceon_proc_init)
mrc p15, 0, r0, c0, c0, 1 @ read cache type register
ldr r1, __cache_params
mov r2, #(16 << 5)
@@ -61,11 +62,12 @@ ENTRY(cpu_feroceon_proc_init)
str_l r1, VFP_arch_feroceon, r2
#endif
ret lr
+SYM_FUNC_END(cpu_feroceon_proc_init)
/*
* cpu_feroceon_proc_fin()
*/
-ENTRY(cpu_feroceon_proc_fin)
+SYM_TYPED_FUNC_START(cpu_feroceon_proc_fin)
#if defined(CONFIG_CACHE_FEROCEON_L2) && \
!defined(CONFIG_CACHE_FEROCEON_L2_WRITETHROUGH)
mov r0, #0
@@ -78,6 +80,7 @@ ENTRY(cpu_feroceon_proc_fin)
bic r0, r0, #0x000e @ ............wca.
mcr p15, 0, r0, c1, c0, 0 @ disable caches
ret lr
+SYM_FUNC_END(cpu_feroceon_proc_fin)
/*
* cpu_feroceon_reset(loc)
@@ -90,7 +93,7 @@ ENTRY(cpu_feroceon_proc_fin)
*/
.align 5
.pushsection .idmap.text, "ax"
-ENTRY(cpu_feroceon_reset)
+SYM_TYPED_FUNC_START(cpu_feroceon_reset)
mov ip, #0
mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches
mcr p15, 0, ip, c7, c10, 4 @ drain WB
@@ -102,7 +105,7 @@ ENTRY(cpu_feroceon_reset)
bic ip, ip, #0x1100 @ ...i...s........
mcr p15, 0, ip, c1, c0, 0 @ ctrl register
ret r0
-ENDPROC(cpu_feroceon_reset)
+SYM_FUNC_END(cpu_feroceon_reset)
.popsection
/*
@@ -111,22 +114,23 @@ ENDPROC(cpu_feroceon_reset)
* Called with IRQs disabled
*/
.align 5
-ENTRY(cpu_feroceon_do_idle)
+SYM_TYPED_FUNC_START(cpu_feroceon_do_idle)
mov r0, #0
mcr p15, 0, r0, c7, c10, 4 @ Drain write buffer
mcr p15, 0, r0, c7, c0, 4 @ Wait for interrupt
ret lr
+SYM_FUNC_END(cpu_feroceon_do_idle)
/*
* flush_icache_all()
*
* Unconditionally clean and invalidate the entire icache.
*/
-ENTRY(feroceon_flush_icache_all)
+SYM_TYPED_FUNC_START(feroceon_flush_icache_all)
mov r0, #0
mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
ret lr
-ENDPROC(feroceon_flush_icache_all)
+SYM_FUNC_END(feroceon_flush_icache_all)
/*
* flush_user_cache_all()
@@ -135,15 +139,14 @@ ENDPROC(feroceon_flush_icache_all)
* address space.
*/
.align 5
-ENTRY(feroceon_flush_user_cache_all)
- /* FALLTHROUGH */
+SYM_FUNC_ALIAS(feroceon_flush_user_cache_all, feroceon_flush_kern_cache_all)
/*
* flush_kern_cache_all()
*
* Clean and invalidate the entire cache.
*/
-ENTRY(feroceon_flush_kern_cache_all)
+SYM_TYPED_FUNC_START(feroceon_flush_kern_cache_all)
mov r2, #VM_EXEC
__flush_whole_cache:
@@ -161,6 +164,7 @@ __flush_whole_cache:
mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache
mcrne p15, 0, ip, c7, c10, 4 @ drain WB
ret lr
+SYM_FUNC_END(feroceon_flush_kern_cache_all)
/*
* flush_user_cache_range(start, end, flags)
@@ -173,7 +177,7 @@ __flush_whole_cache:
* - flags - vm_flags describing address space
*/
.align 5
-ENTRY(feroceon_flush_user_cache_range)
+SYM_TYPED_FUNC_START(feroceon_flush_user_cache_range)
sub r3, r1, r0 @ calculate total size
cmp r3, #CACHE_DLIMIT
bgt __flush_whole_cache
@@ -190,6 +194,7 @@ ENTRY(feroceon_flush_user_cache_range)
mov ip, #0
mcrne p15, 0, ip, c7, c10, 4 @ drain WB
ret lr
+SYM_FUNC_END(feroceon_flush_user_cache_range)
/*
* coherent_kern_range(start, end)
@@ -202,8 +207,11 @@ ENTRY(feroceon_flush_user_cache_range)
* - end - virtual end address
*/
.align 5
-ENTRY(feroceon_coherent_kern_range)
- /* FALLTHROUGH */
+SYM_TYPED_FUNC_START(feroceon_coherent_kern_range)
+#ifdef CONFIG_CFI_CLANG /* Fallthrough if !CFI */
+ b feroceon_coherent_user_range
+#endif
+SYM_FUNC_END(feroceon_coherent_kern_range)
/*
* coherent_user_range(start, end)
@@ -215,7 +223,7 @@ ENTRY(feroceon_coherent_kern_range)
* - start - virtual start address
* - end - virtual end address
*/
-ENTRY(feroceon_coherent_user_range)
+SYM_TYPED_FUNC_START(feroceon_coherent_user_range)
bic r0, r0, #CACHE_DLINESIZE - 1
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
mcr p15, 0, r0, c7, c5, 1 @ invalidate I entry
@@ -225,6 +233,7 @@ ENTRY(feroceon_coherent_user_range)
mcr p15, 0, r0, c7, c10, 4 @ drain WB
mov r0, #0
ret lr
+SYM_FUNC_END(feroceon_coherent_user_range)
/*
* flush_kern_dcache_area(void *addr, size_t size)
@@ -236,7 +245,7 @@ ENTRY(feroceon_coherent_user_range)
* - size - region size
*/
.align 5
-ENTRY(feroceon_flush_kern_dcache_area)
+SYM_TYPED_FUNC_START(feroceon_flush_kern_dcache_area)
add r1, r0, r1
1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry
add r0, r0, #CACHE_DLINESIZE
@@ -246,9 +255,10 @@ ENTRY(feroceon_flush_kern_dcache_area)
mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
mcr p15, 0, r0, c7, c10, 4 @ drain WB
ret lr
+SYM_FUNC_END(feroceon_flush_kern_dcache_area)
.align 5
-ENTRY(feroceon_range_flush_kern_dcache_area)
+SYM_TYPED_FUNC_START(feroceon_range_flush_kern_dcache_area)
mrs r2, cpsr
add r1, r0, #PAGE_SZ - CACHE_DLINESIZE @ top addr is inclusive
orr r3, r2, #PSR_I_BIT
@@ -260,6 +270,7 @@ ENTRY(feroceon_range_flush_kern_dcache_area)
mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
mcr p15, 0, r0, c7, c10, 4 @ drain WB
ret lr
+SYM_FUNC_END(feroceon_range_flush_kern_dcache_area)
/*
* dma_inv_range(start, end)
@@ -346,7 +357,7 @@ feroceon_range_dma_clean_range:
* - end - virtual end address
*/
.align 5
-ENTRY(feroceon_dma_flush_range)
+SYM_TYPED_FUNC_START(feroceon_dma_flush_range)
bic r0, r0, #CACHE_DLINESIZE - 1
1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry
add r0, r0, #CACHE_DLINESIZE
@@ -354,9 +365,10 @@ ENTRY(feroceon_dma_flush_range)
blo 1b
mcr p15, 0, r0, c7, c10, 4 @ drain WB
ret lr
+SYM_FUNC_END(feroceon_dma_flush_range)
.align 5
-ENTRY(feroceon_range_dma_flush_range)
+SYM_TYPED_FUNC_START(feroceon_range_dma_flush_range)
mrs r2, cpsr
cmp r1, r0
subne r1, r1, #1 @ top address is inclusive
@@ -367,6 +379,7 @@ ENTRY(feroceon_range_dma_flush_range)
msr cpsr_c, r2 @ restore interrupts
mcr p15, 0, r0, c7, c10, 4 @ drain WB
ret lr
+SYM_FUNC_END(feroceon_range_dma_flush_range)
/*
* dma_map_area(start, size, dir)
@@ -374,13 +387,13 @@ ENTRY(feroceon_range_dma_flush_range)
* - size - size of region
* - dir - DMA direction
*/
-ENTRY(feroceon_dma_map_area)
+SYM_TYPED_FUNC_START(feroceon_dma_map_area)
add r1, r1, r0
cmp r2, #DMA_TO_DEVICE
beq feroceon_dma_clean_range
bcs feroceon_dma_inv_range
b feroceon_dma_flush_range
-ENDPROC(feroceon_dma_map_area)
+SYM_FUNC_END(feroceon_dma_map_area)
/*
* dma_map_area(start, size, dir)
@@ -388,13 +401,13 @@ ENDPROC(feroceon_dma_map_area)
* - size - size of region
* - dir - DMA direction
*/
-ENTRY(feroceon_range_dma_map_area)
+SYM_TYPED_FUNC_START(feroceon_range_dma_map_area)
add r1, r1, r0
cmp r2, #DMA_TO_DEVICE
beq feroceon_range_dma_clean_range
bcs feroceon_range_dma_inv_range
b feroceon_range_dma_flush_range
-ENDPROC(feroceon_range_dma_map_area)
+SYM_FUNC_END(feroceon_range_dma_map_area)
/*
* dma_unmap_area(start, size, dir)
@@ -402,39 +415,12 @@ ENDPROC(feroceon_range_dma_map_area)
* - size - size of region
* - dir - DMA direction
*/
-ENTRY(feroceon_dma_unmap_area)
+SYM_TYPED_FUNC_START(feroceon_dma_unmap_area)
ret lr
-ENDPROC(feroceon_dma_unmap_area)
-
- .globl feroceon_flush_kern_cache_louis
- .equ feroceon_flush_kern_cache_louis, feroceon_flush_kern_cache_all
-
- @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
- define_cache_functions feroceon
-
-.macro range_alias basename
- .globl feroceon_range_\basename
- .type feroceon_range_\basename , %function
- .equ feroceon_range_\basename , feroceon_\basename
-.endm
-
-/*
- * Most of the cache functions are unchanged for this case.
- * Export suitable alias symbols for the unchanged functions:
- */
- range_alias flush_icache_all
- range_alias flush_user_cache_all
- range_alias flush_kern_cache_all
- range_alias flush_kern_cache_louis
- range_alias flush_user_cache_range
- range_alias coherent_kern_range
- range_alias coherent_user_range
- range_alias dma_unmap_area
-
- define_cache_functions feroceon_range
+SYM_FUNC_END(feroceon_dma_unmap_area)
.align 5
-ENTRY(cpu_feroceon_dcache_clean_area)
+SYM_TYPED_FUNC_START(cpu_feroceon_dcache_clean_area)
#if defined(CONFIG_CACHE_FEROCEON_L2) && \
!defined(CONFIG_CACHE_FEROCEON_L2_WRITETHROUGH)
mov r2, r0
@@ -453,6 +439,7 @@ ENTRY(cpu_feroceon_dcache_clean_area)
#endif
mcr p15, 0, r0, c7, c10, 4 @ drain WB
ret lr
+SYM_FUNC_END(cpu_feroceon_dcache_clean_area)
/* =============================== PageTable ============================== */
@@ -464,7 +451,7 @@ ENTRY(cpu_feroceon_dcache_clean_area)
* pgd: new page tables
*/
.align 5
-ENTRY(cpu_feroceon_switch_mm)
+SYM_TYPED_FUNC_START(cpu_feroceon_switch_mm)
#ifdef CONFIG_MMU
/*
* Note: we wish to call __flush_whole_cache but we need to preserve
@@ -485,6 +472,7 @@ ENTRY(cpu_feroceon_switch_mm)
#else
ret lr
#endif
+SYM_FUNC_END(cpu_feroceon_switch_mm)
/*
* cpu_feroceon_set_pte_ext(ptep, pte, ext)
@@ -492,7 +480,7 @@ ENTRY(cpu_feroceon_switch_mm)
* Set a PTE and flush it out
*/
.align 5
-ENTRY(cpu_feroceon_set_pte_ext)
+SYM_TYPED_FUNC_START(cpu_feroceon_set_pte_ext)
#ifdef CONFIG_MMU
armv3_set_pte_ext wc_disable=0
mov r0, r0
@@ -504,21 +492,22 @@ ENTRY(cpu_feroceon_set_pte_ext)
mcr p15, 0, r0, c7, c10, 4 @ drain WB
#endif
ret lr
+SYM_FUNC_END(cpu_feroceon_set_pte_ext)
/* Suspend/resume support: taken from arch/arm/mm/proc-arm926.S */
.globl cpu_feroceon_suspend_size
.equ cpu_feroceon_suspend_size, 4 * 3
#ifdef CONFIG_ARM_CPU_SUSPEND
-ENTRY(cpu_feroceon_do_suspend)
+SYM_TYPED_FUNC_START(cpu_feroceon_do_suspend)
stmfd sp!, {r4 - r6, lr}
mrc p15, 0, r4, c13, c0, 0 @ PID
mrc p15, 0, r5, c3, c0, 0 @ Domain ID
mrc p15, 0, r6, c1, c0, 0 @ Control register
stmia r0, {r4 - r6}
ldmfd sp!, {r4 - r6, pc}
-ENDPROC(cpu_feroceon_do_suspend)
+SYM_FUNC_END(cpu_feroceon_do_suspend)
-ENTRY(cpu_feroceon_do_resume)
+SYM_TYPED_FUNC_START(cpu_feroceon_do_resume)
mov ip, #0
mcr p15, 0, ip, c8, c7, 0 @ invalidate I+D TLBs
mcr p15, 0, ip, c7, c7, 0 @ invalidate I+D caches
@@ -528,7 +517,7 @@ ENTRY(cpu_feroceon_do_resume)
mcr p15, 0, r1, c2, c0, 0 @ TTB address
mov r0, r6 @ control register
b cpu_resume_mmu
-ENDPROC(cpu_feroceon_do_resume)
+SYM_FUNC_END(cpu_feroceon_do_resume)
#endif
.type __feroceon_setup, #function
diff --git a/arch/arm/mm/proc-macros.S b/arch/arm/mm/proc-macros.S
index e43f6d716b4b..e388c4cc0c44 100644
--- a/arch/arm/mm/proc-macros.S
+++ b/arch/arm/mm/proc-macros.S
@@ -320,39 +320,6 @@ ENTRY(\name\()_processor_functions)
#endif
.endm
-.macro define_cache_functions name:req
- .align 2
- .type \name\()_cache_fns, #object
-ENTRY(\name\()_cache_fns)
- .long \name\()_flush_icache_all
- .long \name\()_flush_kern_cache_all
- .long \name\()_flush_kern_cache_louis
- .long \name\()_flush_user_cache_all
- .long \name\()_flush_user_cache_range
- .long \name\()_coherent_kern_range
- .long \name\()_coherent_user_range
- .long \name\()_flush_kern_dcache_area
- .long \name\()_dma_map_area
- .long \name\()_dma_unmap_area
- .long \name\()_dma_flush_range
- .size \name\()_cache_fns, . - \name\()_cache_fns
-.endm
-
-.macro define_tlb_functions name:req, flags_up:req, flags_smp
- .type \name\()_tlb_fns, #object
- .align 2
-ENTRY(\name\()_tlb_fns)
- .long \name\()_flush_user_tlb_range
- .long \name\()_flush_kern_tlb_range
- .ifnb \flags_smp
- ALT_SMP(.long \flags_smp )
- ALT_UP(.long \flags_up )
- .else
- .long \flags_up
- .endif
- .size \name\()_tlb_fns, . - \name\()_tlb_fns
-.endm
-
.macro globl_equ x, y
.globl \x
.equ \x, \y
diff --git a/arch/arm/mm/proc-mohawk.S b/arch/arm/mm/proc-mohawk.S
index 1645ccaffe96..8e9f38da863a 100644
--- a/arch/arm/mm/proc-mohawk.S
+++ b/arch/arm/mm/proc-mohawk.S
@@ -9,6 +9,7 @@
#include <linux/linkage.h>
#include <linux/init.h>
+#include <linux/cfi_types.h>
#include <linux/pgtable.h>
#include <asm/assembler.h>
#include <asm/hwcap.h>
@@ -31,18 +32,20 @@
/*
* cpu_mohawk_proc_init()
*/
-ENTRY(cpu_mohawk_proc_init)
+SYM_TYPED_FUNC_START(cpu_mohawk_proc_init)
ret lr
+SYM_FUNC_END(cpu_mohawk_proc_init)
/*
* cpu_mohawk_proc_fin()
*/
-ENTRY(cpu_mohawk_proc_fin)
+SYM_TYPED_FUNC_START(cpu_mohawk_proc_fin)
mrc p15, 0, r0, c1, c0, 0 @ ctrl register
bic r0, r0, #0x1800 @ ...iz...........
bic r0, r0, #0x0006 @ .............ca.
mcr p15, 0, r0, c1, c0, 0 @ disable caches
ret lr
+SYM_FUNC_END(cpu_mohawk_proc_fin)
/*
* cpu_mohawk_reset(loc)
@@ -57,7 +60,7 @@ ENTRY(cpu_mohawk_proc_fin)
*/
.align 5
.pushsection .idmap.text, "ax"
-ENTRY(cpu_mohawk_reset)
+SYM_TYPED_FUNC_START(cpu_mohawk_reset)
mov ip, #0
mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches
mcr p15, 0, ip, c7, c10, 4 @ drain WB
@@ -67,7 +70,7 @@ ENTRY(cpu_mohawk_reset)
bic ip, ip, #0x1100 @ ...i...s........
mcr p15, 0, ip, c1, c0, 0 @ ctrl register
ret r0
-ENDPROC(cpu_mohawk_reset)
+SYM_FUNC_END(cpu_mohawk_reset)
.popsection
/*
@@ -76,22 +79,23 @@ ENDPROC(cpu_mohawk_reset)
* Called with IRQs disabled
*/
.align 5
-ENTRY(cpu_mohawk_do_idle)
+SYM_TYPED_FUNC_START(cpu_mohawk_do_idle)
mov r0, #0
mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
mcr p15, 0, r0, c7, c0, 4 @ wait for interrupt
ret lr
+SYM_FUNC_END(cpu_mohawk_do_idle)
/*
* flush_icache_all()
*
* Unconditionally clean and invalidate the entire icache.
*/
-ENTRY(mohawk_flush_icache_all)
+SYM_TYPED_FUNC_START(mohawk_flush_icache_all)
mov r0, #0
mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
ret lr
-ENDPROC(mohawk_flush_icache_all)
+SYM_FUNC_END(mohawk_flush_icache_all)
/*
* flush_user_cache_all()
@@ -99,15 +103,14 @@ ENDPROC(mohawk_flush_icache_all)
* Clean and invalidate all cache entries in a particular
* address space.
*/
-ENTRY(mohawk_flush_user_cache_all)
- /* FALLTHROUGH */
+SYM_FUNC_ALIAS(mohawk_flush_user_cache_all, mohawk_flush_kern_cache_all)
/*
* flush_kern_cache_all()
*
* Clean and invalidate the entire cache.
*/
-ENTRY(mohawk_flush_kern_cache_all)
+SYM_TYPED_FUNC_START(mohawk_flush_kern_cache_all)
mov r2, #VM_EXEC
mov ip, #0
__flush_whole_cache:
@@ -116,6 +119,7 @@ __flush_whole_cache:
mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache
mcrne p15, 0, ip, c7, c10, 0 @ drain write buffer
ret lr
+SYM_FUNC_END(mohawk_flush_kern_cache_all)
/*
* flush_user_cache_range(start, end, flags)
@@ -129,7 +133,7 @@ __flush_whole_cache:
*
* (same as arm926)
*/
-ENTRY(mohawk_flush_user_cache_range)
+SYM_TYPED_FUNC_START(mohawk_flush_user_cache_range)
mov ip, #0
sub r3, r1, r0 @ calculate total size
cmp r3, #CACHE_DLIMIT
@@ -146,6 +150,7 @@ ENTRY(mohawk_flush_user_cache_range)
tst r2, #VM_EXEC
mcrne p15, 0, ip, c7, c10, 4 @ drain WB
ret lr
+SYM_FUNC_END(mohawk_flush_user_cache_range)
/*
* coherent_kern_range(start, end)
@@ -157,8 +162,11 @@ ENTRY(mohawk_flush_user_cache_range)
* - start - virtual start address
* - end - virtual end address
*/
-ENTRY(mohawk_coherent_kern_range)
- /* FALLTHROUGH */
+SYM_TYPED_FUNC_START(mohawk_coherent_kern_range)
+#ifdef CONFIG_CFI_CLANG /* Fallthrough if !CFI */
+ b mohawk_coherent_user_range
+#endif
+SYM_FUNC_END(mohawk_coherent_kern_range)
/*
* coherent_user_range(start, end)
@@ -172,7 +180,7 @@ ENTRY(mohawk_coherent_kern_range)
*
* (same as arm926)
*/
-ENTRY(mohawk_coherent_user_range)
+SYM_TYPED_FUNC_START(mohawk_coherent_user_range)
bic r0, r0, #CACHE_DLINESIZE - 1
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
mcr p15, 0, r0, c7, c5, 1 @ invalidate I entry
@@ -182,6 +190,7 @@ ENTRY(mohawk_coherent_user_range)
mcr p15, 0, r0, c7, c10, 4 @ drain WB
mov r0, #0
ret lr
+SYM_FUNC_END(mohawk_coherent_user_range)
/*
* flush_kern_dcache_area(void *addr, size_t size)
@@ -192,7 +201,7 @@ ENTRY(mohawk_coherent_user_range)
* - addr - kernel address
* - size - region size
*/
-ENTRY(mohawk_flush_kern_dcache_area)
+SYM_TYPED_FUNC_START(mohawk_flush_kern_dcache_area)
add r1, r0, r1
1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry
add r0, r0, #CACHE_DLINESIZE
@@ -202,6 +211,7 @@ ENTRY(mohawk_flush_kern_dcache_area)
mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
mcr p15, 0, r0, c7, c10, 4 @ drain WB
ret lr
+SYM_FUNC_END(mohawk_flush_kern_dcache_area)
/*
* dma_inv_range(start, end)
@@ -256,7 +266,7 @@ mohawk_dma_clean_range:
* - start - virtual start address
* - end - virtual end address
*/
-ENTRY(mohawk_dma_flush_range)
+SYM_TYPED_FUNC_START(mohawk_dma_flush_range)
bic r0, r0, #CACHE_DLINESIZE - 1
1:
mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry
@@ -265,6 +275,7 @@ ENTRY(mohawk_dma_flush_range)
blo 1b
mcr p15, 0, r0, c7, c10, 4 @ drain WB
ret lr
+SYM_FUNC_END(mohawk_dma_flush_range)
/*
* dma_map_area(start, size, dir)
@@ -272,13 +283,13 @@ ENTRY(mohawk_dma_flush_range)
* - size - size of region
* - dir - DMA direction
*/
-ENTRY(mohawk_dma_map_area)
+SYM_TYPED_FUNC_START(mohawk_dma_map_area)
add r1, r1, r0
cmp r2, #DMA_TO_DEVICE
beq mohawk_dma_clean_range
bcs mohawk_dma_inv_range
b mohawk_dma_flush_range
-ENDPROC(mohawk_dma_map_area)
+SYM_FUNC_END(mohawk_dma_map_area)
/*
* dma_unmap_area(start, size, dir)
@@ -286,23 +297,18 @@ ENDPROC(mohawk_dma_map_area)
* - size - size of region
* - dir - DMA direction
*/
-ENTRY(mohawk_dma_unmap_area)
+SYM_TYPED_FUNC_START(mohawk_dma_unmap_area)
ret lr
-ENDPROC(mohawk_dma_unmap_area)
-
- .globl mohawk_flush_kern_cache_louis
- .equ mohawk_flush_kern_cache_louis, mohawk_flush_kern_cache_all
-
- @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
- define_cache_functions mohawk
+SYM_FUNC_END(mohawk_dma_unmap_area)
-ENTRY(cpu_mohawk_dcache_clean_area)
+SYM_TYPED_FUNC_START(cpu_mohawk_dcache_clean_area)
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
add r0, r0, #CACHE_DLINESIZE
subs r1, r1, #CACHE_DLINESIZE
bhi 1b
mcr p15, 0, r0, c7, c10, 4 @ drain WB
ret lr
+SYM_FUNC_END(cpu_mohawk_dcache_clean_area)
/*
* cpu_mohawk_switch_mm(pgd)
@@ -312,7 +318,7 @@ ENTRY(cpu_mohawk_dcache_clean_area)
* pgd: new page tables
*/
.align 5
-ENTRY(cpu_mohawk_switch_mm)
+SYM_TYPED_FUNC_START(cpu_mohawk_switch_mm)
mov ip, #0
mcr p15, 0, ip, c7, c14, 0 @ clean & invalidate all D cache
mcr p15, 0, ip, c7, c5, 0 @ invalidate I cache
@@ -321,6 +327,7 @@ ENTRY(cpu_mohawk_switch_mm)
mcr p15, 0, r0, c2, c0, 0 @ load page table pointer
mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs
ret lr
+SYM_FUNC_END(cpu_mohawk_switch_mm)
/*
* cpu_mohawk_set_pte_ext(ptep, pte, ext)
@@ -328,7 +335,7 @@ ENTRY(cpu_mohawk_switch_mm)
* Set a PTE and flush it out
*/
.align 5
-ENTRY(cpu_mohawk_set_pte_ext)
+SYM_TYPED_FUNC_START(cpu_mohawk_set_pte_ext)
#ifdef CONFIG_MMU
armv3_set_pte_ext
mov r0, r0
@@ -336,11 +343,12 @@ ENTRY(cpu_mohawk_set_pte_ext)
mcr p15, 0, r0, c7, c10, 4 @ drain WB
ret lr
#endif
+SYM_FUNC_END(cpu_mohawk_set_pte_ext)
.globl cpu_mohawk_suspend_size
.equ cpu_mohawk_suspend_size, 4 * 6
#ifdef CONFIG_ARM_CPU_SUSPEND
-ENTRY(cpu_mohawk_do_suspend)
+SYM_TYPED_FUNC_START(cpu_mohawk_do_suspend)
stmfd sp!, {r4 - r9, lr}
mrc p14, 0, r4, c6, c0, 0 @ clock configuration, for turbo mode
mrc p15, 0, r5, c15, c1, 0 @ CP access reg
@@ -351,9 +359,9 @@ ENTRY(cpu_mohawk_do_suspend)
bic r4, r4, #2 @ clear frequency change bit
stmia r0, {r4 - r9} @ store cp regs
ldmia sp!, {r4 - r9, pc}
-ENDPROC(cpu_mohawk_do_suspend)
+SYM_FUNC_END(cpu_mohawk_do_suspend)
-ENTRY(cpu_mohawk_do_resume)
+SYM_TYPED_FUNC_START(cpu_mohawk_do_resume)
ldmia r0, {r4 - r9} @ load cp regs
mov ip, #0
mcr p15, 0, ip, c7, c7, 0 @ invalidate I & D caches, BTB
@@ -369,7 +377,7 @@ ENTRY(cpu_mohawk_do_resume)
mcr p15, 0, r8, c1, c0, 1 @ auxiliary control reg
mov r0, r9 @ control register
b cpu_resume_mmu
-ENDPROC(cpu_mohawk_do_resume)
+SYM_FUNC_END(cpu_mohawk_do_resume)
#endif
.type __mohawk_setup, #function
diff --git a/arch/arm/mm/proc-sa110.S b/arch/arm/mm/proc-sa110.S
index 4071f7a61cb6..3da76fab8ac3 100644
--- a/arch/arm/mm/proc-sa110.S
+++ b/arch/arm/mm/proc-sa110.S
@@ -12,6 +12,7 @@
*/
#include <linux/linkage.h>
#include <linux/init.h>
+#include <linux/cfi_types.h>
#include <linux/pgtable.h>
#include <asm/assembler.h>
#include <asm/asm-offsets.h>
@@ -32,15 +33,16 @@
/*
* cpu_sa110_proc_init()
*/
-ENTRY(cpu_sa110_proc_init)
+SYM_TYPED_FUNC_START(cpu_sa110_proc_init)
mov r0, #0
mcr p15, 0, r0, c15, c1, 2 @ Enable clock switching
ret lr
+SYM_FUNC_END(cpu_sa110_proc_init)
/*
* cpu_sa110_proc_fin()
*/
-ENTRY(cpu_sa110_proc_fin)
+SYM_TYPED_FUNC_START(cpu_sa110_proc_fin)
mov r0, #0
mcr p15, 0, r0, c15, c2, 2 @ Disable clock switching
mrc p15, 0, r0, c1, c0, 0 @ ctrl register
@@ -48,6 +50,7 @@ ENTRY(cpu_sa110_proc_fin)
bic r0, r0, #0x000e @ ............wca.
mcr p15, 0, r0, c1, c0, 0 @ disable caches
ret lr
+SYM_FUNC_END(cpu_sa110_proc_fin)
/*
* cpu_sa110_reset(loc)
@@ -60,7 +63,7 @@ ENTRY(cpu_sa110_proc_fin)
*/
.align 5
.pushsection .idmap.text, "ax"
-ENTRY(cpu_sa110_reset)
+SYM_TYPED_FUNC_START(cpu_sa110_reset)
mov ip, #0
mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches
mcr p15, 0, ip, c7, c10, 4 @ drain WB
@@ -72,7 +75,7 @@ ENTRY(cpu_sa110_reset)
bic ip, ip, #0x1100 @ ...i...s........
mcr p15, 0, ip, c1, c0, 0 @ ctrl register
ret r0
-ENDPROC(cpu_sa110_reset)
+SYM_FUNC_END(cpu_sa110_reset)
.popsection
/*
@@ -88,7 +91,7 @@ ENDPROC(cpu_sa110_reset)
*/
.align 5
-ENTRY(cpu_sa110_do_idle)
+SYM_TYPED_FUNC_START(cpu_sa110_do_idle)
mcr p15, 0, ip, c15, c2, 2 @ disable clock switching
ldr r1, =UNCACHEABLE_ADDR @ load from uncacheable loc
ldr r1, [r1, #0] @ force switch to MCLK
@@ -101,6 +104,7 @@ ENTRY(cpu_sa110_do_idle)
mov r0, r0 @ safety
mcr p15, 0, r0, c15, c1, 2 @ enable clock switching
ret lr
+SYM_FUNC_END(cpu_sa110_do_idle)
/* ================================= CACHE ================================ */
@@ -113,12 +117,13 @@ ENTRY(cpu_sa110_do_idle)
* addr: cache-unaligned virtual address
*/
.align 5
-ENTRY(cpu_sa110_dcache_clean_area)
+SYM_TYPED_FUNC_START(cpu_sa110_dcache_clean_area)
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
add r0, r0, #DCACHELINESIZE
subs r1, r1, #DCACHELINESIZE
bhi 1b
ret lr
+SYM_FUNC_END(cpu_sa110_dcache_clean_area)
/* =============================== PageTable ============================== */
@@ -130,7 +135,7 @@ ENTRY(cpu_sa110_dcache_clean_area)
* pgd: new page tables
*/
.align 5
-ENTRY(cpu_sa110_switch_mm)
+SYM_TYPED_FUNC_START(cpu_sa110_switch_mm)
#ifdef CONFIG_MMU
str lr, [sp, #-4]!
bl v4wb_flush_kern_cache_all @ clears IP
@@ -140,6 +145,7 @@ ENTRY(cpu_sa110_switch_mm)
#else
ret lr
#endif
+SYM_FUNC_END(cpu_sa110_switch_mm)
/*
* cpu_sa110_set_pte_ext(ptep, pte, ext)
@@ -147,7 +153,7 @@ ENTRY(cpu_sa110_switch_mm)
* Set a PTE and flush it out
*/
.align 5
-ENTRY(cpu_sa110_set_pte_ext)
+SYM_TYPED_FUNC_START(cpu_sa110_set_pte_ext)
#ifdef CONFIG_MMU
armv3_set_pte_ext wc_disable=0
mov r0, r0
@@ -155,6 +161,7 @@ ENTRY(cpu_sa110_set_pte_ext)
mcr p15, 0, r0, c7, c10, 4 @ drain WB
#endif
ret lr
+SYM_FUNC_END(cpu_sa110_set_pte_ext)
.type __sa110_setup, #function
__sa110_setup:
diff --git a/arch/arm/mm/proc-sa1100.S b/arch/arm/mm/proc-sa1100.S
index e723bd4119d3..7c496195e440 100644
--- a/arch/arm/mm/proc-sa1100.S
+++ b/arch/arm/mm/proc-sa1100.S
@@ -17,6 +17,7 @@
*/
#include <linux/linkage.h>
#include <linux/init.h>
+#include <linux/cfi_types.h>
#include <linux/pgtable.h>
#include <asm/assembler.h>
#include <asm/asm-offsets.h>
@@ -36,11 +37,12 @@
/*
* cpu_sa1100_proc_init()
*/
-ENTRY(cpu_sa1100_proc_init)
+SYM_TYPED_FUNC_START(cpu_sa1100_proc_init)
mov r0, #0
mcr p15, 0, r0, c15, c1, 2 @ Enable clock switching
mcr p15, 0, r0, c9, c0, 5 @ Allow read-buffer operations from userland
ret lr
+SYM_FUNC_END(cpu_sa1100_proc_init)
/*
* cpu_sa1100_proc_fin()
@@ -49,13 +51,14 @@ ENTRY(cpu_sa1100_proc_init)
* - Disable interrupts
* - Clean and turn off caches.
*/
-ENTRY(cpu_sa1100_proc_fin)
+SYM_TYPED_FUNC_START(cpu_sa1100_proc_fin)
mcr p15, 0, ip, c15, c2, 2 @ Disable clock switching
mrc p15, 0, r0, c1, c0, 0 @ ctrl register
bic r0, r0, #0x1000 @ ...i............
bic r0, r0, #0x000e @ ............wca.
mcr p15, 0, r0, c1, c0, 0 @ disable caches
ret lr
+SYM_FUNC_END(cpu_sa1100_proc_fin)
/*
* cpu_sa1100_reset(loc)
@@ -68,7 +71,7 @@ ENTRY(cpu_sa1100_proc_fin)
*/
.align 5
.pushsection .idmap.text, "ax"
-ENTRY(cpu_sa1100_reset)
+SYM_TYPED_FUNC_START(cpu_sa1100_reset)
mov ip, #0
mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches
mcr p15, 0, ip, c7, c10, 4 @ drain WB
@@ -80,7 +83,7 @@ ENTRY(cpu_sa1100_reset)
bic ip, ip, #0x1100 @ ...i...s........
mcr p15, 0, ip, c1, c0, 0 @ ctrl register
ret r0
-ENDPROC(cpu_sa1100_reset)
+SYM_FUNC_END(cpu_sa1100_reset)
.popsection
/*
@@ -95,7 +98,7 @@ ENDPROC(cpu_sa1100_reset)
* 3 = switch to fast processor clock
*/
.align 5
-ENTRY(cpu_sa1100_do_idle)
+SYM_TYPED_FUNC_START(cpu_sa1100_do_idle)
mov r0, r0 @ 4 nop padding
mov r0, r0
mov r0, r0
@@ -111,6 +114,7 @@ ENTRY(cpu_sa1100_do_idle)
mov r0, r0 @ safety
mcr p15, 0, r0, c15, c1, 2 @ enable clock switching
ret lr
+SYM_FUNC_END(cpu_sa1100_do_idle)
/* ================================= CACHE ================================ */
@@ -123,12 +127,13 @@ ENTRY(cpu_sa1100_do_idle)
* addr: cache-unaligned virtual address
*/
.align 5
-ENTRY(cpu_sa1100_dcache_clean_area)
+SYM_TYPED_FUNC_START(cpu_sa1100_dcache_clean_area)
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
add r0, r0, #DCACHELINESIZE
subs r1, r1, #DCACHELINESIZE
bhi 1b
ret lr
+SYM_FUNC_END(cpu_sa1100_dcache_clean_area)
/* =============================== PageTable ============================== */
@@ -140,7 +145,7 @@ ENTRY(cpu_sa1100_dcache_clean_area)
* pgd: new page tables
*/
.align 5
-ENTRY(cpu_sa1100_switch_mm)
+SYM_TYPED_FUNC_START(cpu_sa1100_switch_mm)
#ifdef CONFIG_MMU
str lr, [sp, #-4]!
bl v4wb_flush_kern_cache_all @ clears IP
@@ -151,6 +156,7 @@ ENTRY(cpu_sa1100_switch_mm)
#else
ret lr
#endif
+SYM_FUNC_END(cpu_sa1100_switch_mm)
/*
* cpu_sa1100_set_pte_ext(ptep, pte, ext)
@@ -158,7 +164,7 @@ ENTRY(cpu_sa1100_switch_mm)
* Set a PTE and flush it out
*/
.align 5
-ENTRY(cpu_sa1100_set_pte_ext)
+SYM_TYPED_FUNC_START(cpu_sa1100_set_pte_ext)
#ifdef CONFIG_MMU
armv3_set_pte_ext wc_disable=0
mov r0, r0
@@ -166,20 +172,21 @@ ENTRY(cpu_sa1100_set_pte_ext)
mcr p15, 0, r0, c7, c10, 4 @ drain WB
#endif
ret lr
+SYM_FUNC_END(cpu_sa1100_set_pte_ext)
.globl cpu_sa1100_suspend_size
.equ cpu_sa1100_suspend_size, 4 * 3
#ifdef CONFIG_ARM_CPU_SUSPEND
-ENTRY(cpu_sa1100_do_suspend)
+SYM_TYPED_FUNC_START(cpu_sa1100_do_suspend)
stmfd sp!, {r4 - r6, lr}
mrc p15, 0, r4, c3, c0, 0 @ domain ID
mrc p15, 0, r5, c13, c0, 0 @ PID
mrc p15, 0, r6, c1, c0, 0 @ control reg
stmia r0, {r4 - r6} @ store cp regs
ldmfd sp!, {r4 - r6, pc}
-ENDPROC(cpu_sa1100_do_suspend)
+SYM_FUNC_END(cpu_sa1100_do_suspend)
-ENTRY(cpu_sa1100_do_resume)
+SYM_TYPED_FUNC_START(cpu_sa1100_do_resume)
ldmia r0, {r4 - r6} @ load cp regs
mov ip, #0
mcr p15, 0, ip, c8, c7, 0 @ flush I+D TLBs
@@ -192,7 +199,7 @@ ENTRY(cpu_sa1100_do_resume)
mcr p15, 0, r5, c13, c0, 0 @ PID
mov r0, r6 @ control register
b cpu_resume_mmu
-ENDPROC(cpu_sa1100_do_resume)
+SYM_FUNC_END(cpu_sa1100_do_resume)
#endif
.type __sa1100_setup, #function
diff --git a/arch/arm/mm/proc-v6.S b/arch/arm/mm/proc-v6.S
index 203dff89ab1a..90a01f5950b9 100644
--- a/arch/arm/mm/proc-v6.S
+++ b/arch/arm/mm/proc-v6.S
@@ -8,6 +8,7 @@
* This is the "shell" of the ARMv6 processor support.
*/
#include <linux/init.h>
+#include <linux/cfi_types.h>
#include <linux/linkage.h>
#include <linux/pgtable.h>
#include <asm/assembler.h>
@@ -34,15 +35,17 @@
.arch armv6
-ENTRY(cpu_v6_proc_init)
+SYM_TYPED_FUNC_START(cpu_v6_proc_init)
ret lr
+SYM_FUNC_END(cpu_v6_proc_init)
-ENTRY(cpu_v6_proc_fin)
+SYM_TYPED_FUNC_START(cpu_v6_proc_fin)
mrc p15, 0, r0, c1, c0, 0 @ ctrl register
bic r0, r0, #0x1000 @ ...i............
bic r0, r0, #0x0006 @ .............ca.
mcr p15, 0, r0, c1, c0, 0 @ disable caches
ret lr
+SYM_FUNC_END(cpu_v6_proc_fin)
/*
* cpu_v6_reset(loc)
@@ -55,14 +58,14 @@ ENTRY(cpu_v6_proc_fin)
*/
.align 5
.pushsection .idmap.text, "ax"
-ENTRY(cpu_v6_reset)
+SYM_TYPED_FUNC_START(cpu_v6_reset)
mrc p15, 0, r1, c1, c0, 0 @ ctrl register
bic r1, r1, #0x1 @ ...............m
mcr p15, 0, r1, c1, c0, 0 @ disable MMU
mov r1, #0
mcr p15, 0, r1, c7, c5, 4 @ ISB
ret r0
-ENDPROC(cpu_v6_reset)
+SYM_FUNC_END(cpu_v6_reset)
.popsection
/*
@@ -72,18 +75,20 @@ ENDPROC(cpu_v6_reset)
*
* IRQs are already disabled.
*/
-ENTRY(cpu_v6_do_idle)
+SYM_TYPED_FUNC_START(cpu_v6_do_idle)
mov r1, #0
mcr p15, 0, r1, c7, c10, 4 @ DWB - WFI may enter a low-power mode
mcr p15, 0, r1, c7, c0, 4 @ wait for interrupt
ret lr
+SYM_FUNC_END(cpu_v6_do_idle)
-ENTRY(cpu_v6_dcache_clean_area)
+SYM_TYPED_FUNC_START(cpu_v6_dcache_clean_area)
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
add r0, r0, #D_CACHE_LINE_SIZE
subs r1, r1, #D_CACHE_LINE_SIZE
bhi 1b
ret lr
+SYM_FUNC_END(cpu_v6_dcache_clean_area)
/*
* cpu_v6_switch_mm(pgd_phys, tsk)
@@ -95,7 +100,7 @@ ENTRY(cpu_v6_dcache_clean_area)
* It is assumed that:
* - we are not using split page tables
*/
-ENTRY(cpu_v6_switch_mm)
+SYM_TYPED_FUNC_START(cpu_v6_switch_mm)
#ifdef CONFIG_MMU
mov r2, #0
mmid r1, r1 @ get mm->context.id
@@ -113,6 +118,7 @@ ENTRY(cpu_v6_switch_mm)
mcr p15, 0, r1, c13, c0, 1 @ set context ID
#endif
ret lr
+SYM_FUNC_END(cpu_v6_switch_mm)
/*
* cpu_v6_set_pte_ext(ptep, pte, ext)
@@ -126,17 +132,18 @@ ENTRY(cpu_v6_switch_mm)
*/
armv6_mt_table cpu_v6
-ENTRY(cpu_v6_set_pte_ext)
+SYM_TYPED_FUNC_START(cpu_v6_set_pte_ext)
#ifdef CONFIG_MMU
armv6_set_pte_ext cpu_v6
#endif
ret lr
+SYM_FUNC_END(cpu_v6_set_pte_ext)
/* Suspend/resume support: taken from arch/arm/mach-s3c64xx/sleep.S */
.globl cpu_v6_suspend_size
.equ cpu_v6_suspend_size, 4 * 6
#ifdef CONFIG_ARM_CPU_SUSPEND
-ENTRY(cpu_v6_do_suspend)
+SYM_TYPED_FUNC_START(cpu_v6_do_suspend)
stmfd sp!, {r4 - r9, lr}
mrc p15, 0, r4, c13, c0, 0 @ FCSE/PID
#ifdef CONFIG_MMU
@@ -148,9 +155,9 @@ ENTRY(cpu_v6_do_suspend)
mrc p15, 0, r9, c1, c0, 0 @ control register
stmia r0, {r4 - r9}
ldmfd sp!, {r4- r9, pc}
-ENDPROC(cpu_v6_do_suspend)
+SYM_FUNC_END(cpu_v6_do_suspend)
-ENTRY(cpu_v6_do_resume)
+SYM_TYPED_FUNC_START(cpu_v6_do_resume)
mov ip, #0
mcr p15, 0, ip, c7, c14, 0 @ clean+invalidate D cache
mcr p15, 0, ip, c7, c5, 0 @ invalidate I cache
@@ -172,7 +179,7 @@ ENTRY(cpu_v6_do_resume)
mcr p15, 0, ip, c7, c5, 4 @ ISB
mov r0, r9 @ control register
b cpu_resume_mmu
-ENDPROC(cpu_v6_do_resume)
+SYM_FUNC_END(cpu_v6_do_resume)
#endif
string cpu_v6_name, "ARMv6-compatible processor"
diff --git a/arch/arm/mm/proc-v7-2level.S b/arch/arm/mm/proc-v7-2level.S
index 0a3083ad19c2..1007702fcaf3 100644
--- a/arch/arm/mm/proc-v7-2level.S
+++ b/arch/arm/mm/proc-v7-2level.S
@@ -40,7 +40,7 @@
* even on Cortex-A8 revisions not affected by 430973.
* If IBE is not set, the flush BTAC/BTB won't do anything.
*/
-ENTRY(cpu_v7_switch_mm)
+SYM_TYPED_FUNC_START(cpu_v7_switch_mm)
#ifdef CONFIG_MMU
mmid r1, r1 @ get mm->context.id
ALT_SMP(orr r0, r0, #TTB_FLAGS_SMP)
@@ -59,7 +59,7 @@ ENTRY(cpu_v7_switch_mm)
isb
#endif
bx lr
-ENDPROC(cpu_v7_switch_mm)
+SYM_FUNC_END(cpu_v7_switch_mm)
/*
* cpu_v7_set_pte_ext(ptep, pte)
@@ -71,7 +71,7 @@ ENDPROC(cpu_v7_switch_mm)
* - pte - PTE value to store
* - ext - value for extended PTE bits
*/
-ENTRY(cpu_v7_set_pte_ext)
+SYM_TYPED_FUNC_START(cpu_v7_set_pte_ext)
#ifdef CONFIG_MMU
str r1, [r0] @ linux version
@@ -106,7 +106,7 @@ ENTRY(cpu_v7_set_pte_ext)
ALT_UP (mcr p15, 0, r0, c7, c10, 1) @ flush_pte
#endif
bx lr
-ENDPROC(cpu_v7_set_pte_ext)
+SYM_FUNC_END(cpu_v7_set_pte_ext)
/*
* Memory region attributes with SCTLR.TRE=1
diff --git a/arch/arm/mm/proc-v7-3level.S b/arch/arm/mm/proc-v7-3level.S
index 131984462d0d..bdabc15cde56 100644
--- a/arch/arm/mm/proc-v7-3level.S
+++ b/arch/arm/mm/proc-v7-3level.S
@@ -42,7 +42,7 @@
* Set the translation table base pointer to be pgd_phys (physical address of
* the new TTB).
*/
-ENTRY(cpu_v7_switch_mm)
+SYM_TYPED_FUNC_START(cpu_v7_switch_mm)
#ifdef CONFIG_MMU
mmid r2, r2
asid r2, r2
@@ -51,7 +51,7 @@ ENTRY(cpu_v7_switch_mm)
isb
#endif
ret lr
-ENDPROC(cpu_v7_switch_mm)
+SYM_FUNC_END(cpu_v7_switch_mm)
#ifdef __ARMEB__
#define rl r3
@@ -68,7 +68,7 @@ ENDPROC(cpu_v7_switch_mm)
* - ptep - pointer to level 3 translation table entry
* - pte - PTE value to store (64-bit in r2 and r3)
*/
-ENTRY(cpu_v7_set_pte_ext)
+SYM_TYPED_FUNC_START(cpu_v7_set_pte_ext)
#ifdef CONFIG_MMU
tst rl, #L_PTE_VALID
beq 1f
@@ -87,7 +87,7 @@ ENTRY(cpu_v7_set_pte_ext)
ALT_UP (mcr p15, 0, r0, c7, c10, 1) @ flush_pte
#endif
ret lr
-ENDPROC(cpu_v7_set_pte_ext)
+SYM_FUNC_END(cpu_v7_set_pte_ext)
/*
* Memory region attributes for LPAE (defined in pgtable-3level.h):
diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S
index 193c7aeb6703..5fb9a6aecb00 100644
--- a/arch/arm/mm/proc-v7.S
+++ b/arch/arm/mm/proc-v7.S
@@ -7,6 +7,7 @@
* This is the "shell" of the ARMv7 processor support.
*/
#include <linux/arm-smccc.h>
+#include <linux/cfi_types.h>
#include <linux/init.h>
#include <linux/linkage.h>
#include <linux/pgtable.h>
@@ -26,17 +27,17 @@
.arch armv7-a
-ENTRY(cpu_v7_proc_init)
+SYM_TYPED_FUNC_START(cpu_v7_proc_init)
ret lr
-ENDPROC(cpu_v7_proc_init)
+SYM_FUNC_END(cpu_v7_proc_init)
-ENTRY(cpu_v7_proc_fin)
+SYM_TYPED_FUNC_START(cpu_v7_proc_fin)
mrc p15, 0, r0, c1, c0, 0 @ ctrl register
bic r0, r0, #0x1000 @ ...i............
bic r0, r0, #0x0006 @ .............ca.
mcr p15, 0, r0, c1, c0, 0 @ disable caches
ret lr
-ENDPROC(cpu_v7_proc_fin)
+SYM_FUNC_END(cpu_v7_proc_fin)
/*
* cpu_v7_reset(loc, hyp)
@@ -53,7 +54,7 @@ ENDPROC(cpu_v7_proc_fin)
*/
.align 5
.pushsection .idmap.text, "ax"
-ENTRY(cpu_v7_reset)
+SYM_TYPED_FUNC_START(cpu_v7_reset)
mrc p15, 0, r2, c1, c0, 0 @ ctrl register
bic r2, r2, #0x1 @ ...............m
THUMB( bic r2, r2, #1 << 30 ) @ SCTLR.TE (Thumb exceptions)
@@ -64,7 +65,7 @@ ENTRY(cpu_v7_reset)
bne __hyp_soft_restart
#endif
bx r0
-ENDPROC(cpu_v7_reset)
+SYM_FUNC_END(cpu_v7_reset)
.popsection
/*
@@ -74,13 +75,13 @@ ENDPROC(cpu_v7_reset)
*
* IRQs are already disabled.
*/
-ENTRY(cpu_v7_do_idle)
+SYM_TYPED_FUNC_START(cpu_v7_do_idle)
dsb @ WFI may enter a low-power mode
wfi
ret lr
-ENDPROC(cpu_v7_do_idle)
+SYM_FUNC_END(cpu_v7_do_idle)
-ENTRY(cpu_v7_dcache_clean_area)
+SYM_TYPED_FUNC_START(cpu_v7_dcache_clean_area)
ALT_SMP(W(nop)) @ MP extensions imply L1 PTW
ALT_UP_B(1f)
ret lr
@@ -91,38 +92,39 @@ ENTRY(cpu_v7_dcache_clean_area)
bhi 2b
dsb ishst
ret lr
-ENDPROC(cpu_v7_dcache_clean_area)
+SYM_FUNC_END(cpu_v7_dcache_clean_area)
#ifdef CONFIG_ARM_PSCI
.arch_extension sec
-ENTRY(cpu_v7_smc_switch_mm)
+SYM_TYPED_FUNC_START(cpu_v7_smc_switch_mm)
stmfd sp!, {r0 - r3}
movw r0, #:lower16:ARM_SMCCC_ARCH_WORKAROUND_1
movt r0, #:upper16:ARM_SMCCC_ARCH_WORKAROUND_1
smc #0
ldmfd sp!, {r0 - r3}
b cpu_v7_switch_mm
-ENDPROC(cpu_v7_smc_switch_mm)
+SYM_FUNC_END(cpu_v7_smc_switch_mm)
.arch_extension virt
-ENTRY(cpu_v7_hvc_switch_mm)
+SYM_TYPED_FUNC_START(cpu_v7_hvc_switch_mm)
stmfd sp!, {r0 - r3}
movw r0, #:lower16:ARM_SMCCC_ARCH_WORKAROUND_1
movt r0, #:upper16:ARM_SMCCC_ARCH_WORKAROUND_1
hvc #0
ldmfd sp!, {r0 - r3}
b cpu_v7_switch_mm
-ENDPROC(cpu_v7_hvc_switch_mm)
+SYM_FUNC_END(cpu_v7_hvc_switch_mm)
#endif
-ENTRY(cpu_v7_iciallu_switch_mm)
+
+SYM_TYPED_FUNC_START(cpu_v7_iciallu_switch_mm)
mov r3, #0
mcr p15, 0, r3, c7, c5, 0 @ ICIALLU
b cpu_v7_switch_mm
-ENDPROC(cpu_v7_iciallu_switch_mm)
-ENTRY(cpu_v7_bpiall_switch_mm)
+SYM_FUNC_END(cpu_v7_iciallu_switch_mm)
+SYM_TYPED_FUNC_START(cpu_v7_bpiall_switch_mm)
mov r3, #0
mcr p15, 0, r3, c7, c5, 6 @ flush BTAC/BTB
b cpu_v7_switch_mm
-ENDPROC(cpu_v7_bpiall_switch_mm)
+SYM_FUNC_END(cpu_v7_bpiall_switch_mm)
string cpu_v7_name, "ARMv7 Processor"
.align
@@ -131,7 +133,7 @@ ENDPROC(cpu_v7_bpiall_switch_mm)
.globl cpu_v7_suspend_size
.equ cpu_v7_suspend_size, 4 * 9
#ifdef CONFIG_ARM_CPU_SUSPEND
-ENTRY(cpu_v7_do_suspend)
+SYM_TYPED_FUNC_START(cpu_v7_do_suspend)
stmfd sp!, {r4 - r11, lr}
mrc p15, 0, r4, c13, c0, 0 @ FCSE/PID
mrc p15, 0, r5, c13, c0, 3 @ User r/o thread ID
@@ -150,9 +152,9 @@ ENTRY(cpu_v7_do_suspend)
mrc p15, 0, r10, c1, c0, 2 @ Co-processor access control
stmia r0, {r5 - r11}
ldmfd sp!, {r4 - r11, pc}
-ENDPROC(cpu_v7_do_suspend)
+SYM_FUNC_END(cpu_v7_do_suspend)
-ENTRY(cpu_v7_do_resume)
+SYM_TYPED_FUNC_START(cpu_v7_do_resume)
mov ip, #0
mcr p15, 0, ip, c7, c5, 0 @ invalidate I cache
mcr p15, 0, ip, c13, c0, 1 @ set reserved context ID
@@ -186,22 +188,22 @@ ENTRY(cpu_v7_do_resume)
dsb
mov r0, r8 @ control register
b cpu_resume_mmu
-ENDPROC(cpu_v7_do_resume)
+SYM_FUNC_END(cpu_v7_do_resume)
#endif
.globl cpu_ca9mp_suspend_size
.equ cpu_ca9mp_suspend_size, cpu_v7_suspend_size + 4 * 2
#ifdef CONFIG_ARM_CPU_SUSPEND
-ENTRY(cpu_ca9mp_do_suspend)
+SYM_TYPED_FUNC_START(cpu_ca9mp_do_suspend)
stmfd sp!, {r4 - r5}
mrc p15, 0, r4, c15, c0, 1 @ Diagnostic register
mrc p15, 0, r5, c15, c0, 0 @ Power register
stmia r0!, {r4 - r5}
ldmfd sp!, {r4 - r5}
b cpu_v7_do_suspend
-ENDPROC(cpu_ca9mp_do_suspend)
+SYM_FUNC_END(cpu_ca9mp_do_suspend)
-ENTRY(cpu_ca9mp_do_resume)
+SYM_TYPED_FUNC_START(cpu_ca9mp_do_resume)
ldmia r0!, {r4 - r5}
mrc p15, 0, r10, c15, c0, 1 @ Read Diagnostic register
teq r4, r10 @ Already restored?
@@ -210,7 +212,7 @@ ENTRY(cpu_ca9mp_do_resume)
teq r5, r10 @ Already restored?
mcrne p15, 0, r5, c15, c0, 0 @ No, so restore it
b cpu_v7_do_resume
-ENDPROC(cpu_ca9mp_do_resume)
+SYM_FUNC_END(cpu_ca9mp_do_resume)
#endif
#ifdef CONFIG_CPU_PJ4B
@@ -220,18 +222,18 @@ ENDPROC(cpu_ca9mp_do_resume)
globl_equ cpu_pj4b_proc_fin, cpu_v7_proc_fin
globl_equ cpu_pj4b_reset, cpu_v7_reset
#ifdef CONFIG_PJ4B_ERRATA_4742
-ENTRY(cpu_pj4b_do_idle)
+SYM_TYPED_FUNC_START(cpu_pj4b_do_idle)
dsb @ WFI may enter a low-power mode
wfi
dsb @barrier
ret lr
-ENDPROC(cpu_pj4b_do_idle)
+SYM_FUNC_END(cpu_pj4b_do_idle)
#else
globl_equ cpu_pj4b_do_idle, cpu_v7_do_idle
#endif
globl_equ cpu_pj4b_dcache_clean_area, cpu_v7_dcache_clean_area
#ifdef CONFIG_ARM_CPU_SUSPEND
-ENTRY(cpu_pj4b_do_suspend)
+SYM_TYPED_FUNC_START(cpu_pj4b_do_suspend)
stmfd sp!, {r6 - r10}
mrc p15, 1, r6, c15, c1, 0 @ save CP15 - extra features
mrc p15, 1, r7, c15, c2, 0 @ save CP15 - Aux Func Modes Ctrl 0
@@ -241,9 +243,9 @@ ENTRY(cpu_pj4b_do_suspend)
stmia r0!, {r6 - r10}
ldmfd sp!, {r6 - r10}
b cpu_v7_do_suspend
-ENDPROC(cpu_pj4b_do_suspend)
+SYM_FUNC_END(cpu_pj4b_do_suspend)
-ENTRY(cpu_pj4b_do_resume)
+SYM_TYPED_FUNC_START(cpu_pj4b_do_resume)
ldmia r0!, {r6 - r10}
mcr p15, 1, r6, c15, c1, 0 @ restore CP15 - extra features
mcr p15, 1, r7, c15, c2, 0 @ restore CP15 - Aux Func Modes Ctrl 0
@@ -251,7 +253,7 @@ ENTRY(cpu_pj4b_do_resume)
mcr p15, 1, r9, c15, c1, 1 @ restore CP15 - Aux Debug Modes Ctrl 1
mcr p15, 0, r10, c9, c14, 0 @ restore CP15 - PMC
b cpu_v7_do_resume
-ENDPROC(cpu_pj4b_do_resume)
+SYM_FUNC_END(cpu_pj4b_do_resume)
#endif
.globl cpu_pj4b_suspend_size
.equ cpu_pj4b_suspend_size, cpu_v7_suspend_size + 4 * 5
diff --git a/arch/arm/mm/proc-v7m.S b/arch/arm/mm/proc-v7m.S
index d65a12f851a9..d4675603593b 100644
--- a/arch/arm/mm/proc-v7m.S
+++ b/arch/arm/mm/proc-v7m.S
@@ -8,18 +8,19 @@
* This is the "shell" of the ARMv7-M processor support.
*/
#include <linux/linkage.h>
+#include <linux/cfi_types.h>
#include <asm/assembler.h>
#include <asm/page.h>
#include <asm/v7m.h>
#include "proc-macros.S"
-ENTRY(cpu_v7m_proc_init)
+SYM_TYPED_FUNC_START(cpu_v7m_proc_init)
ret lr
-ENDPROC(cpu_v7m_proc_init)
+SYM_FUNC_END(cpu_v7m_proc_init)
-ENTRY(cpu_v7m_proc_fin)
+SYM_TYPED_FUNC_START(cpu_v7m_proc_fin)
ret lr
-ENDPROC(cpu_v7m_proc_fin)
+SYM_FUNC_END(cpu_v7m_proc_fin)
/*
* cpu_v7m_reset(loc)
@@ -31,9 +32,9 @@ ENDPROC(cpu_v7m_proc_fin)
* - loc - location to jump to for soft reset
*/
.align 5
-ENTRY(cpu_v7m_reset)
+SYM_TYPED_FUNC_START(cpu_v7m_reset)
ret r0
-ENDPROC(cpu_v7m_reset)
+SYM_FUNC_END(cpu_v7m_reset)
/*
* cpu_v7m_do_idle()
@@ -42,36 +43,36 @@ ENDPROC(cpu_v7m_reset)
*
* IRQs are already disabled.
*/
-ENTRY(cpu_v7m_do_idle)
+SYM_TYPED_FUNC_START(cpu_v7m_do_idle)
wfi
ret lr
-ENDPROC(cpu_v7m_do_idle)
+SYM_FUNC_END(cpu_v7m_do_idle)
-ENTRY(cpu_v7m_dcache_clean_area)
+SYM_TYPED_FUNC_START(cpu_v7m_dcache_clean_area)
ret lr
-ENDPROC(cpu_v7m_dcache_clean_area)
+SYM_FUNC_END(cpu_v7m_dcache_clean_area)
/*
* There is no MMU, so here is nothing to do.
*/
-ENTRY(cpu_v7m_switch_mm)
+SYM_TYPED_FUNC_START(cpu_v7m_switch_mm)
ret lr
-ENDPROC(cpu_v7m_switch_mm)
+SYM_FUNC_END(cpu_v7m_switch_mm)
.globl cpu_v7m_suspend_size
.equ cpu_v7m_suspend_size, 0
#ifdef CONFIG_ARM_CPU_SUSPEND
-ENTRY(cpu_v7m_do_suspend)
+SYM_TYPED_FUNC_START(cpu_v7m_do_suspend)
ret lr
-ENDPROC(cpu_v7m_do_suspend)
+SYM_FUNC_END(cpu_v7m_do_suspend)
-ENTRY(cpu_v7m_do_resume)
+SYM_TYPED_FUNC_START(cpu_v7m_do_resume)
ret lr
-ENDPROC(cpu_v7m_do_resume)
+SYM_FUNC_END(cpu_v7m_do_resume)
#endif
-ENTRY(cpu_cm7_dcache_clean_area)
+SYM_TYPED_FUNC_START(cpu_cm7_dcache_clean_area)
dcache_line_size r2, r3
movw r3, #:lower16:BASEADDR_V7M_SCB + V7M_SCB_DCCMVAC
movt r3, #:upper16:BASEADDR_V7M_SCB + V7M_SCB_DCCMVAC
@@ -82,16 +83,16 @@ ENTRY(cpu_cm7_dcache_clean_area)
bhi 1b
dsb
ret lr
-ENDPROC(cpu_cm7_dcache_clean_area)
+SYM_FUNC_END(cpu_cm7_dcache_clean_area)
-ENTRY(cpu_cm7_proc_fin)
+SYM_TYPED_FUNC_START(cpu_cm7_proc_fin)
movw r2, #:lower16:(BASEADDR_V7M_SCB + V7M_SCB_CCR)
movt r2, #:upper16:(BASEADDR_V7M_SCB + V7M_SCB_CCR)
ldr r0, [r2]
bic r0, r0, #(V7M_SCB_CCR_DC | V7M_SCB_CCR_IC)
str r0, [r2]
ret lr
-ENDPROC(cpu_cm7_proc_fin)
+SYM_FUNC_END(cpu_cm7_proc_fin)
.section ".init.text", "ax"
diff --git a/arch/arm/mm/proc-xsc3.S b/arch/arm/mm/proc-xsc3.S
index a17afe7e195a..14927b380452 100644
--- a/arch/arm/mm/proc-xsc3.S
+++ b/arch/arm/mm/proc-xsc3.S
@@ -23,6 +23,7 @@
#include <linux/linkage.h>
#include <linux/init.h>
+#include <linux/cfi_types.h>
#include <linux/pgtable.h>
#include <asm/assembler.h>
#include <asm/hwcap.h>
@@ -79,18 +80,20 @@
*
* Nothing too exciting at the moment
*/
-ENTRY(cpu_xsc3_proc_init)
+SYM_TYPED_FUNC_START(cpu_xsc3_proc_init)
ret lr
+SYM_FUNC_END(cpu_xsc3_proc_init)
/*
* cpu_xsc3_proc_fin()
*/
-ENTRY(cpu_xsc3_proc_fin)
+SYM_TYPED_FUNC_START(cpu_xsc3_proc_fin)
mrc p15, 0, r0, c1, c0, 0 @ ctrl register
bic r0, r0, #0x1800 @ ...IZ...........
bic r0, r0, #0x0006 @ .............CA.
mcr p15, 0, r0, c1, c0, 0 @ disable caches
ret lr
+SYM_FUNC_END(cpu_xsc3_proc_fin)
/*
* cpu_xsc3_reset(loc)
@@ -103,7 +106,7 @@ ENTRY(cpu_xsc3_proc_fin)
*/
.align 5
.pushsection .idmap.text, "ax"
-ENTRY(cpu_xsc3_reset)
+SYM_TYPED_FUNC_START(cpu_xsc3_reset)
mov r1, #PSR_F_BIT|PSR_I_BIT|SVC_MODE
msr cpsr_c, r1 @ reset CPSR
mrc p15, 0, r1, c1, c0, 0 @ ctrl register
@@ -117,7 +120,7 @@ ENTRY(cpu_xsc3_reset)
@ already containing those two last instructions to survive.
mcr p15, 0, ip, c8, c7, 0 @ invalidate I and D TLBs
ret r0
-ENDPROC(cpu_xsc3_reset)
+SYM_FUNC_END(cpu_xsc3_reset)
.popsection
/*
@@ -132,10 +135,11 @@ ENDPROC(cpu_xsc3_reset)
*/
.align 5
-ENTRY(cpu_xsc3_do_idle)
+SYM_TYPED_FUNC_START(cpu_xsc3_do_idle)
mov r0, #1
mcr p14, 0, r0, c7, c0, 0 @ go to idle
ret lr
+SYM_FUNC_END(cpu_xsc3_do_idle)
/* ================================= CACHE ================================ */
@@ -144,11 +148,11 @@ ENTRY(cpu_xsc3_do_idle)
*
* Unconditionally clean and invalidate the entire icache.
*/
-ENTRY(xsc3_flush_icache_all)
+SYM_TYPED_FUNC_START(xsc3_flush_icache_all)
mov r0, #0
mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
ret lr
-ENDPROC(xsc3_flush_icache_all)
+SYM_FUNC_END(xsc3_flush_icache_all)
/*
* flush_user_cache_all()
@@ -156,15 +160,14 @@ ENDPROC(xsc3_flush_icache_all)
* Invalidate all cache entries in a particular address
* space.
*/
-ENTRY(xsc3_flush_user_cache_all)
- /* FALLTHROUGH */
+SYM_FUNC_ALIAS(xsc3_flush_user_cache_all, xsc3_flush_kern_cache_all)
/*
* flush_kern_cache_all()
*
* Clean and invalidate the entire cache.
*/
-ENTRY(xsc3_flush_kern_cache_all)
+SYM_TYPED_FUNC_START(xsc3_flush_kern_cache_all)
mov r2, #VM_EXEC
mov ip, #0
__flush_whole_cache:
@@ -174,6 +177,7 @@ __flush_whole_cache:
mcrne p15, 0, ip, c7, c10, 4 @ data write barrier
mcrne p15, 0, ip, c7, c5, 4 @ prefetch flush
ret lr
+SYM_FUNC_END(xsc3_flush_kern_cache_all)
/*
* flush_user_cache_range(start, end, vm_flags)
@@ -186,7 +190,7 @@ __flush_whole_cache:
* - vma - vma_area_struct describing address space
*/
.align 5
-ENTRY(xsc3_flush_user_cache_range)
+SYM_TYPED_FUNC_START(xsc3_flush_user_cache_range)
mov ip, #0
sub r3, r1, r0 @ calculate total size
cmp r3, #MAX_AREA_SIZE
@@ -203,6 +207,7 @@ ENTRY(xsc3_flush_user_cache_range)
mcrne p15, 0, ip, c7, c10, 4 @ data write barrier
mcrne p15, 0, ip, c7, c5, 4 @ prefetch flush
ret lr
+SYM_FUNC_END(xsc3_flush_user_cache_range)
/*
* coherent_kern_range(start, end)
@@ -217,9 +222,13 @@ ENTRY(xsc3_flush_user_cache_range)
* Note: single I-cache line invalidation isn't used here since
* it also trashes the mini I-cache used by JTAG debuggers.
*/
-ENTRY(xsc3_coherent_kern_range)
-/* FALLTHROUGH */
-ENTRY(xsc3_coherent_user_range)
+SYM_TYPED_FUNC_START(xsc3_coherent_kern_range)
+#ifdef CONFIG_CFI_CLANG /* Fallthrough if !CFI */
+ b xsc3_coherent_user_range
+#endif
+SYM_FUNC_END(xsc3_coherent_kern_range)
+
+SYM_TYPED_FUNC_START(xsc3_coherent_user_range)
bic r0, r0, #CACHELINESIZE - 1
1: mcr p15, 0, r0, c7, c10, 1 @ clean L1 D line
add r0, r0, #CACHELINESIZE
@@ -230,6 +239,7 @@ ENTRY(xsc3_coherent_user_range)
mcr p15, 0, r0, c7, c10, 4 @ data write barrier
mcr p15, 0, r0, c7, c5, 4 @ prefetch flush
ret lr
+SYM_FUNC_END(xsc3_coherent_user_range)
/*
* flush_kern_dcache_area(void *addr, size_t size)
@@ -240,7 +250,7 @@ ENTRY(xsc3_coherent_user_range)
* - addr - kernel address
* - size - region size
*/
-ENTRY(xsc3_flush_kern_dcache_area)
+SYM_TYPED_FUNC_START(xsc3_flush_kern_dcache_area)
add r1, r0, r1
1: mcr p15, 0, r0, c7, c14, 1 @ clean/invalidate L1 D line
add r0, r0, #CACHELINESIZE
@@ -251,6 +261,7 @@ ENTRY(xsc3_flush_kern_dcache_area)
mcr p15, 0, r0, c7, c10, 4 @ data write barrier
mcr p15, 0, r0, c7, c5, 4 @ prefetch flush
ret lr
+SYM_FUNC_END(xsc3_flush_kern_dcache_area)
/*
* dma_inv_range(start, end)
@@ -301,7 +312,7 @@ xsc3_dma_clean_range:
* - start - virtual start address
* - end - virtual end address
*/
-ENTRY(xsc3_dma_flush_range)
+SYM_TYPED_FUNC_START(xsc3_dma_flush_range)
bic r0, r0, #CACHELINESIZE - 1
1: mcr p15, 0, r0, c7, c14, 1 @ clean/invalidate L1 D line
add r0, r0, #CACHELINESIZE
@@ -309,6 +320,7 @@ ENTRY(xsc3_dma_flush_range)
blo 1b
mcr p15, 0, r0, c7, c10, 4 @ data write barrier
ret lr
+SYM_FUNC_END(xsc3_dma_flush_range)
/*
* dma_map_area(start, size, dir)
@@ -316,13 +328,13 @@ ENTRY(xsc3_dma_flush_range)
* - size - size of region
* - dir - DMA direction
*/
-ENTRY(xsc3_dma_map_area)
+SYM_TYPED_FUNC_START(xsc3_dma_map_area)
add r1, r1, r0
cmp r2, #DMA_TO_DEVICE
beq xsc3_dma_clean_range
bcs xsc3_dma_inv_range
b xsc3_dma_flush_range
-ENDPROC(xsc3_dma_map_area)
+SYM_FUNC_END(xsc3_dma_map_area)
/*
* dma_unmap_area(start, size, dir)
@@ -330,22 +342,17 @@ ENDPROC(xsc3_dma_map_area)
* - size - size of region
* - dir - DMA direction
*/
-ENTRY(xsc3_dma_unmap_area)
+SYM_TYPED_FUNC_START(xsc3_dma_unmap_area)
ret lr
-ENDPROC(xsc3_dma_unmap_area)
-
- .globl xsc3_flush_kern_cache_louis
- .equ xsc3_flush_kern_cache_louis, xsc3_flush_kern_cache_all
-
- @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
- define_cache_functions xsc3
+SYM_FUNC_END(xsc3_dma_unmap_area)
-ENTRY(cpu_xsc3_dcache_clean_area)
+SYM_TYPED_FUNC_START(cpu_xsc3_dcache_clean_area)
1: mcr p15, 0, r0, c7, c10, 1 @ clean L1 D line
add r0, r0, #CACHELINESIZE
subs r1, r1, #CACHELINESIZE
bhi 1b
ret lr
+SYM_FUNC_END(cpu_xsc3_dcache_clean_area)
/* =============================== PageTable ============================== */
@@ -357,7 +364,7 @@ ENTRY(cpu_xsc3_dcache_clean_area)
* pgd: new page tables
*/
.align 5
-ENTRY(cpu_xsc3_switch_mm)
+SYM_TYPED_FUNC_START(cpu_xsc3_switch_mm)
clean_d_cache r1, r2
mcr p15, 0, ip, c7, c5, 0 @ invalidate L1 I cache and BTB
mcr p15, 0, ip, c7, c10, 4 @ data write barrier
@@ -366,6 +373,7 @@ ENTRY(cpu_xsc3_switch_mm)
mcr p15, 0, r0, c2, c0, 0 @ load page table pointer
mcr p15, 0, ip, c8, c7, 0 @ invalidate I and D TLBs
cpwait_ret lr, ip
+SYM_FUNC_END(cpu_xsc3_switch_mm)
/*
* cpu_xsc3_set_pte_ext(ptep, pte, ext)
@@ -391,7 +399,7 @@ cpu_xsc3_mt_table:
.long 0x00 @ unused
.align 5
-ENTRY(cpu_xsc3_set_pte_ext)
+SYM_TYPED_FUNC_START(cpu_xsc3_set_pte_ext)
xscale_set_pte_ext_prologue
tst r1, #L_PTE_SHARED @ shared?
@@ -404,6 +412,7 @@ ENTRY(cpu_xsc3_set_pte_ext)
xscale_set_pte_ext_epilogue
ret lr
+SYM_FUNC_END(cpu_xsc3_set_pte_ext)
.ltorg
.align
@@ -411,7 +420,7 @@ ENTRY(cpu_xsc3_set_pte_ext)
.globl cpu_xsc3_suspend_size
.equ cpu_xsc3_suspend_size, 4 * 6
#ifdef CONFIG_ARM_CPU_SUSPEND
-ENTRY(cpu_xsc3_do_suspend)
+SYM_TYPED_FUNC_START(cpu_xsc3_do_suspend)
stmfd sp!, {r4 - r9, lr}
mrc p14, 0, r4, c6, c0, 0 @ clock configuration, for turbo mode
mrc p15, 0, r5, c15, c1, 0 @ CP access reg
@@ -422,9 +431,9 @@ ENTRY(cpu_xsc3_do_suspend)
bic r4, r4, #2 @ clear frequency change bit
stmia r0, {r4 - r9} @ store cp regs
ldmia sp!, {r4 - r9, pc}
-ENDPROC(cpu_xsc3_do_suspend)
+SYM_FUNC_END(cpu_xsc3_do_suspend)
-ENTRY(cpu_xsc3_do_resume)
+SYM_TYPED_FUNC_START(cpu_xsc3_do_resume)
ldmia r0, {r4 - r9} @ load cp regs
mov ip, #0
mcr p15, 0, ip, c7, c7, 0 @ invalidate I & D caches, BTB
@@ -440,7 +449,7 @@ ENTRY(cpu_xsc3_do_resume)
mcr p15, 0, r8, c1, c0, 1 @ auxiliary control reg
mov r0, r9 @ control register
b cpu_resume_mmu
-ENDPROC(cpu_xsc3_do_resume)
+SYM_FUNC_END(cpu_xsc3_do_resume)
#endif
.type __xsc3_setup, #function
diff --git a/arch/arm/mm/proc-xscale.S b/arch/arm/mm/proc-xscale.S
index d82590aa71c0..d8462df8020b 100644
--- a/arch/arm/mm/proc-xscale.S
+++ b/arch/arm/mm/proc-xscale.S
@@ -19,6 +19,7 @@
#include <linux/linkage.h>
#include <linux/init.h>
+#include <linux/cfi_types.h>
#include <linux/pgtable.h>
#include <asm/assembler.h>
#include <asm/hwcap.h>
@@ -111,22 +112,24 @@ clean_addr: .word CLEAN_ADDR
*
* Nothing too exciting at the moment
*/
-ENTRY(cpu_xscale_proc_init)
+SYM_TYPED_FUNC_START(cpu_xscale_proc_init)
@ enable write buffer coalescing. Some bootloader disable it
mrc p15, 0, r1, c1, c0, 1
bic r1, r1, #1
mcr p15, 0, r1, c1, c0, 1
ret lr
+SYM_FUNC_END(cpu_xscale_proc_init)
/*
* cpu_xscale_proc_fin()
*/
-ENTRY(cpu_xscale_proc_fin)
+SYM_TYPED_FUNC_START(cpu_xscale_proc_fin)
mrc p15, 0, r0, c1, c0, 0 @ ctrl register
bic r0, r0, #0x1800 @ ...IZ...........
bic r0, r0, #0x0006 @ .............CA.
mcr p15, 0, r0, c1, c0, 0 @ disable caches
ret lr
+SYM_FUNC_END(cpu_xscale_proc_fin)
/*
* cpu_xscale_reset(loc)
@@ -141,7 +144,7 @@ ENTRY(cpu_xscale_proc_fin)
*/
.align 5
.pushsection .idmap.text, "ax"
-ENTRY(cpu_xscale_reset)
+SYM_TYPED_FUNC_START(cpu_xscale_reset)
mov r1, #PSR_F_BIT|PSR_I_BIT|SVC_MODE
msr cpsr_c, r1 @ reset CPSR
mcr p15, 0, r1, c10, c4, 1 @ unlock I-TLB
@@ -159,7 +162,7 @@ ENTRY(cpu_xscale_reset)
@ already containing those two last instructions to survive.
mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs
ret r0
-ENDPROC(cpu_xscale_reset)
+SYM_FUNC_END(cpu_xscale_reset)
.popsection
/*
@@ -174,10 +177,11 @@ ENDPROC(cpu_xscale_reset)
*/
.align 5
-ENTRY(cpu_xscale_do_idle)
+SYM_TYPED_FUNC_START(cpu_xscale_do_idle)
mov r0, #1
mcr p14, 0, r0, c7, c0, 0 @ Go to IDLE
ret lr
+SYM_FUNC_END(cpu_xscale_do_idle)
/* ================================= CACHE ================================ */
@@ -186,11 +190,11 @@ ENTRY(cpu_xscale_do_idle)
*
* Unconditionally clean and invalidate the entire icache.
*/
-ENTRY(xscale_flush_icache_all)
+SYM_TYPED_FUNC_START(xscale_flush_icache_all)
mov r0, #0
mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
ret lr
-ENDPROC(xscale_flush_icache_all)
+SYM_FUNC_END(xscale_flush_icache_all)
/*
* flush_user_cache_all()
@@ -198,15 +202,14 @@ ENDPROC(xscale_flush_icache_all)
* Invalidate all cache entries in a particular address
* space.
*/
-ENTRY(xscale_flush_user_cache_all)
- /* FALLTHROUGH */
+SYM_FUNC_ALIAS(xscale_flush_user_cache_all, xscale_flush_kern_cache_all)
/*
* flush_kern_cache_all()
*
* Clean and invalidate the entire cache.
*/
-ENTRY(xscale_flush_kern_cache_all)
+SYM_TYPED_FUNC_START(xscale_flush_kern_cache_all)
mov r2, #VM_EXEC
mov ip, #0
__flush_whole_cache:
@@ -215,6 +218,7 @@ __flush_whole_cache:
mcrne p15, 0, ip, c7, c5, 0 @ Invalidate I cache & BTB
mcrne p15, 0, ip, c7, c10, 4 @ Drain Write (& Fill) Buffer
ret lr
+SYM_FUNC_END(xscale_flush_kern_cache_all)
/*
* flush_user_cache_range(start, end, vm_flags)
@@ -227,7 +231,7 @@ __flush_whole_cache:
* - vma - vma_area_struct describing address space
*/
.align 5
-ENTRY(xscale_flush_user_cache_range)
+SYM_TYPED_FUNC_START(xscale_flush_user_cache_range)
mov ip, #0
sub r3, r1, r0 @ calculate total size
cmp r3, #MAX_AREA_SIZE
@@ -244,6 +248,7 @@ ENTRY(xscale_flush_user_cache_range)
mcrne p15, 0, ip, c7, c5, 6 @ Invalidate BTB
mcrne p15, 0, ip, c7, c10, 4 @ Drain Write (& Fill) Buffer
ret lr
+SYM_FUNC_END(xscale_flush_user_cache_range)
/*
* coherent_kern_range(start, end)
@@ -258,7 +263,7 @@ ENTRY(xscale_flush_user_cache_range)
* Note: single I-cache line invalidation isn't used here since
* it also trashes the mini I-cache used by JTAG debuggers.
*/
-ENTRY(xscale_coherent_kern_range)
+SYM_TYPED_FUNC_START(xscale_coherent_kern_range)
bic r0, r0, #CACHELINESIZE - 1
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
add r0, r0, #CACHELINESIZE
@@ -268,6 +273,7 @@ ENTRY(xscale_coherent_kern_range)
mcr p15, 0, r0, c7, c5, 0 @ Invalidate I cache & BTB
mcr p15, 0, r0, c7, c10, 4 @ Drain Write (& Fill) Buffer
ret lr
+SYM_FUNC_END(xscale_coherent_kern_range)
/*
* coherent_user_range(start, end)
@@ -279,7 +285,7 @@ ENTRY(xscale_coherent_kern_range)
* - start - virtual start address
* - end - virtual end address
*/
-ENTRY(xscale_coherent_user_range)
+SYM_TYPED_FUNC_START(xscale_coherent_user_range)
bic r0, r0, #CACHELINESIZE - 1
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
mcr p15, 0, r0, c7, c5, 1 @ Invalidate I cache entry
@@ -290,6 +296,7 @@ ENTRY(xscale_coherent_user_range)
mcr p15, 0, r0, c7, c5, 6 @ Invalidate BTB
mcr p15, 0, r0, c7, c10, 4 @ Drain Write (& Fill) Buffer
ret lr
+SYM_FUNC_END(xscale_coherent_user_range)
/*
* flush_kern_dcache_area(void *addr, size_t size)
@@ -300,7 +307,7 @@ ENTRY(xscale_coherent_user_range)
* - addr - kernel address
* - size - region size
*/
-ENTRY(xscale_flush_kern_dcache_area)
+SYM_TYPED_FUNC_START(xscale_flush_kern_dcache_area)
add r1, r0, r1
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry
@@ -311,6 +318,7 @@ ENTRY(xscale_flush_kern_dcache_area)
mcr p15, 0, r0, c7, c5, 0 @ Invalidate I cache & BTB
mcr p15, 0, r0, c7, c10, 4 @ Drain Write (& Fill) Buffer
ret lr
+SYM_FUNC_END(xscale_flush_kern_dcache_area)
/*
* dma_inv_range(start, end)
@@ -361,7 +369,7 @@ xscale_dma_clean_range:
* - start - virtual start address
* - end - virtual end address
*/
-ENTRY(xscale_dma_flush_range)
+SYM_TYPED_FUNC_START(xscale_dma_flush_range)
bic r0, r0, #CACHELINESIZE - 1
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry
@@ -370,6 +378,7 @@ ENTRY(xscale_dma_flush_range)
blo 1b
mcr p15, 0, r0, c7, c10, 4 @ Drain Write (& Fill) Buffer
ret lr
+SYM_FUNC_END(xscale_dma_flush_range)
/*
* dma_map_area(start, size, dir)
@@ -377,13 +386,27 @@ ENTRY(xscale_dma_flush_range)
* - size - size of region
* - dir - DMA direction
*/
-ENTRY(xscale_dma_map_area)
+SYM_TYPED_FUNC_START(xscale_dma_map_area)
add r1, r1, r0
cmp r2, #DMA_TO_DEVICE
beq xscale_dma_clean_range
bcs xscale_dma_inv_range
b xscale_dma_flush_range
-ENDPROC(xscale_dma_map_area)
+SYM_FUNC_END(xscale_dma_map_area)
+
+/*
+ * On stepping A0/A1 of the 80200, invalidating D-cache by line doesn't
+ * clear the dirty bits, which means that if we invalidate a dirty line,
+ * the dirty data can still be written back to external memory later on.
+ *
+ * The recommended workaround is to always do a clean D-cache line before
+ * doing an invalidate D-cache line, so on the affected processors,
+ * dma_inv_range() is implemented as dma_flush_range().
+ *
+ * See erratum #25 of "Intel 80200 Processor Specification Update",
+ * revision January 22, 2003, available at:
+ * http://www.intel.com/design/iio/specupdt/273415.htm
+ */
/*
* dma_map_area(start, size, dir)
@@ -391,12 +414,12 @@ ENDPROC(xscale_dma_map_area)
* - size - size of region
* - dir - DMA direction
*/
-ENTRY(xscale_80200_A0_A1_dma_map_area)
+SYM_TYPED_FUNC_START(xscale_80200_A0_A1_dma_map_area)
add r1, r1, r0
teq r2, #DMA_TO_DEVICE
beq xscale_dma_clean_range
b xscale_dma_flush_range
-ENDPROC(xscale_80200_A0_A1_dma_map_area)
+SYM_FUNC_END(xscale_80200_A0_A1_dma_map_area)
/*
* dma_unmap_area(start, size, dir)
@@ -404,59 +427,17 @@ ENDPROC(xscale_80200_A0_A1_dma_map_area)
* - size - size of region
* - dir - DMA direction
*/
-ENTRY(xscale_dma_unmap_area)
+SYM_TYPED_FUNC_START(xscale_dma_unmap_area)
ret lr
-ENDPROC(xscale_dma_unmap_area)
-
- .globl xscale_flush_kern_cache_louis
- .equ xscale_flush_kern_cache_louis, xscale_flush_kern_cache_all
+SYM_FUNC_END(xscale_dma_unmap_area)
- @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
- define_cache_functions xscale
-
-/*
- * On stepping A0/A1 of the 80200, invalidating D-cache by line doesn't
- * clear the dirty bits, which means that if we invalidate a dirty line,
- * the dirty data can still be written back to external memory later on.
- *
- * The recommended workaround is to always do a clean D-cache line before
- * doing an invalidate D-cache line, so on the affected processors,
- * dma_inv_range() is implemented as dma_flush_range().
- *
- * See erratum #25 of "Intel 80200 Processor Specification Update",
- * revision January 22, 2003, available at:
- * http://www.intel.com/design/iio/specupdt/273415.htm
- */
-.macro a0_alias basename
- .globl xscale_80200_A0_A1_\basename
- .type xscale_80200_A0_A1_\basename , %function
- .equ xscale_80200_A0_A1_\basename , xscale_\basename
-.endm
-
-/*
- * Most of the cache functions are unchanged for these processor revisions.
- * Export suitable alias symbols for the unchanged functions:
- */
- a0_alias flush_icache_all
- a0_alias flush_user_cache_all
- a0_alias flush_kern_cache_all
- a0_alias flush_kern_cache_louis
- a0_alias flush_user_cache_range
- a0_alias coherent_kern_range
- a0_alias coherent_user_range
- a0_alias flush_kern_dcache_area
- a0_alias dma_flush_range
- a0_alias dma_unmap_area
-
- @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
- define_cache_functions xscale_80200_A0_A1
-
-ENTRY(cpu_xscale_dcache_clean_area)
+SYM_TYPED_FUNC_START(cpu_xscale_dcache_clean_area)
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
add r0, r0, #CACHELINESIZE
subs r1, r1, #CACHELINESIZE
bhi 1b
ret lr
+SYM_FUNC_END(cpu_xscale_dcache_clean_area)
/* =============================== PageTable ============================== */
@@ -468,13 +449,14 @@ ENTRY(cpu_xscale_dcache_clean_area)
* pgd: new page tables
*/
.align 5
-ENTRY(cpu_xscale_switch_mm)
+SYM_TYPED_FUNC_START(cpu_xscale_switch_mm)
clean_d_cache r1, r2
mcr p15, 0, ip, c7, c5, 0 @ Invalidate I cache & BTB
mcr p15, 0, ip, c7, c10, 4 @ Drain Write (& Fill) Buffer
mcr p15, 0, r0, c2, c0, 0 @ load page table pointer
mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs
cpwait_ret lr, ip
+SYM_FUNC_END(cpu_xscale_switch_mm)
/*
* cpu_xscale_set_pte_ext(ptep, pte, ext)
@@ -502,7 +484,7 @@ cpu_xscale_mt_table:
.long 0x00 @ unused
.align 5
-ENTRY(cpu_xscale_set_pte_ext)
+SYM_TYPED_FUNC_START(cpu_xscale_set_pte_ext)
xscale_set_pte_ext_prologue
@
@@ -520,6 +502,7 @@ ENTRY(cpu_xscale_set_pte_ext)
xscale_set_pte_ext_epilogue
ret lr
+SYM_FUNC_END(cpu_xscale_set_pte_ext)
.ltorg
.align
@@ -527,7 +510,7 @@ ENTRY(cpu_xscale_set_pte_ext)
.globl cpu_xscale_suspend_size
.equ cpu_xscale_suspend_size, 4 * 6
#ifdef CONFIG_ARM_CPU_SUSPEND
-ENTRY(cpu_xscale_do_suspend)
+SYM_TYPED_FUNC_START(cpu_xscale_do_suspend)
stmfd sp!, {r4 - r9, lr}
mrc p14, 0, r4, c6, c0, 0 @ clock configuration, for turbo mode
mrc p15, 0, r5, c15, c1, 0 @ CP access reg
@@ -538,9 +521,9 @@ ENTRY(cpu_xscale_do_suspend)
bic r4, r4, #2 @ clear frequency change bit
stmia r0, {r4 - r9} @ store cp regs
ldmfd sp!, {r4 - r9, pc}
-ENDPROC(cpu_xscale_do_suspend)
+SYM_FUNC_END(cpu_xscale_do_suspend)
-ENTRY(cpu_xscale_do_resume)
+SYM_TYPED_FUNC_START(cpu_xscale_do_resume)
ldmia r0, {r4 - r9} @ load cp regs
mov ip, #0
mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs
@@ -553,7 +536,7 @@ ENTRY(cpu_xscale_do_resume)
mcr p15, 0, r8, c1, c0, 1 @ auxiliary control reg
mov r0, r9 @ control register
b cpu_resume_mmu
-ENDPROC(cpu_xscale_do_resume)
+SYM_FUNC_END(cpu_xscale_do_resume)
#endif
.type __xscale_setup, #function
diff --git a/arch/arm/mm/proc.c b/arch/arm/mm/proc.c
new file mode 100644
index 000000000000..bdbbf65d1b36
--- /dev/null
+++ b/arch/arm/mm/proc.c
@@ -0,0 +1,500 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * This file defines C prototypes for the low-level processor assembly functions
+ * and creates a reference for CFI. This needs to be done for every assembly
+ * processor ("proc") function that is called from C but does not have a
+ * corresponding C implementation.
+ *
+ * Processors are listed in the order they appear in the Makefile.
+ *
+ * Functions are listed if and only if they see use on the target CPU, and in
+ * the order they are defined in struct processor.
+ */
+#include <asm/proc-fns.h>
+
+#ifdef CONFIG_CPU_ARM7TDMI
+void cpu_arm7tdmi_proc_init(void);
+__ADDRESSABLE(cpu_arm7tdmi_proc_init);
+void cpu_arm7tdmi_proc_fin(void);
+__ADDRESSABLE(cpu_arm7tdmi_proc_fin);
+void cpu_arm7tdmi_reset(void);
+__ADDRESSABLE(cpu_arm7tdmi_reset);
+int cpu_arm7tdmi_do_idle(void);
+__ADDRESSABLE(cpu_arm7tdmi_do_idle);
+void cpu_arm7tdmi_dcache_clean_area(void *addr, int size);
+__ADDRESSABLE(cpu_arm7tdmi_dcache_clean_area);
+void cpu_arm7tdmi_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm);
+__ADDRESSABLE(cpu_arm7tdmi_switch_mm);
+#endif
+
+#ifdef CONFIG_CPU_ARM720T
+void cpu_arm720_proc_init(void);
+__ADDRESSABLE(cpu_arm720_proc_init);
+void cpu_arm720_proc_fin(void);
+__ADDRESSABLE(cpu_arm720_proc_fin);
+void cpu_arm720_reset(void);
+__ADDRESSABLE(cpu_arm720_reset);
+int cpu_arm720_do_idle(void);
+__ADDRESSABLE(cpu_arm720_do_idle);
+void cpu_arm720_dcache_clean_area(void *addr, int size);
+__ADDRESSABLE(cpu_arm720_dcache_clean_area);
+void cpu_arm720_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm);
+__ADDRESSABLE(cpu_arm720_switch_mm);
+void cpu_arm720_set_pte_ext(pte_t *ptep, pte_t pte, unsigned int ext);
+__ADDRESSABLE(cpu_arm720_set_pte_ext);
+#endif
+
+#ifdef CONFIG_CPU_ARM740T
+void cpu_arm740_proc_init(void);
+__ADDRESSABLE(cpu_arm740_proc_init);
+void cpu_arm740_proc_fin(void);
+__ADDRESSABLE(cpu_arm740_proc_fin);
+void cpu_arm740_reset(void);
+__ADDRESSABLE(cpu_arm740_reset);
+int cpu_arm740_do_idle(void);
+__ADDRESSABLE(cpu_arm740_do_idle);
+void cpu_arm740_dcache_clean_area(void *addr, int size);
+__ADDRESSABLE(cpu_arm740_dcache_clean_area);
+void cpu_arm740_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm);
+__ADDRESSABLE(cpu_arm740_switch_mm);
+#endif
+
+#ifdef CONFIG_CPU_ARM9TDMI
+void cpu_arm9tdmi_proc_init(void);
+__ADDRESSABLE(cpu_arm9tdmi_proc_init);
+void cpu_arm9tdmi_proc_fin(void);
+__ADDRESSABLE(cpu_arm9tdmi_proc_fin);
+void cpu_arm9tdmi_reset(void);
+__ADDRESSABLE(cpu_arm9tdmi_reset);
+int cpu_arm9tdmi_do_idle(void);
+__ADDRESSABLE(cpu_arm9tdmi_do_idle);
+void cpu_arm9tdmi_dcache_clean_area(void *addr, int size);
+__ADDRESSABLE(cpu_arm9tdmi_dcache_clean_area);
+void cpu_arm9tdmi_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm);
+__ADDRESSABLE(cpu_arm9tdmi_switch_mm);
+#endif
+
+#ifdef CONFIG_CPU_ARM920T
+void cpu_arm920_proc_init(void);
+__ADDRESSABLE(cpu_arm920_proc_init);
+void cpu_arm920_proc_fin(void);
+__ADDRESSABLE(cpu_arm920_proc_fin);
+void cpu_arm920_reset(void);
+__ADDRESSABLE(cpu_arm920_reset);
+int cpu_arm920_do_idle(void);
+__ADDRESSABLE(cpu_arm920_do_idle);
+void cpu_arm920_dcache_clean_area(void *addr, int size);
+__ADDRESSABLE(cpu_arm920_dcache_clean_area);
+void cpu_arm920_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm);
+__ADDRESSABLE(cpu_arm920_switch_mm);
+void cpu_arm920_set_pte_ext(pte_t *ptep, pte_t pte, unsigned int ext);
+__ADDRESSABLE(cpu_arm920_set_pte_ext);
+#ifdef CONFIG_ARM_CPU_SUSPEND
+void cpu_arm920_do_suspend(void *);
+__ADDRESSABLE(cpu_arm920_do_suspend);
+void cpu_arm920_do_resume(void *);
+__ADDRESSABLE(cpu_arm920_do_resume);
+#endif /* CONFIG_ARM_CPU_SUSPEND */
+#endif /* CONFIG_CPU_ARM920T */
+
+#ifdef CONFIG_CPU_ARM922T
+void cpu_arm922_proc_init(void);
+__ADDRESSABLE(cpu_arm922_proc_init);
+void cpu_arm922_proc_fin(void);
+__ADDRESSABLE(cpu_arm922_proc_fin);
+void cpu_arm922_reset(void);
+__ADDRESSABLE(cpu_arm922_reset);
+int cpu_arm922_do_idle(void);
+__ADDRESSABLE(cpu_arm922_do_idle);
+void cpu_arm922_dcache_clean_area(void *addr, int size);
+__ADDRESSABLE(cpu_arm922_dcache_clean_area);
+void cpu_arm922_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm);
+__ADDRESSABLE(cpu_arm922_switch_mm);
+void cpu_arm922_set_pte_ext(pte_t *ptep, pte_t pte, unsigned int ext);
+__ADDRESSABLE(cpu_arm922_set_pte_ext);
+#endif
+
+#ifdef CONFIG_CPU_ARM925T
+void cpu_arm925_proc_init(void);
+__ADDRESSABLE(cpu_arm925_proc_init);
+void cpu_arm925_proc_fin(void);
+__ADDRESSABLE(cpu_arm925_proc_fin);
+void cpu_arm925_reset(void);
+__ADDRESSABLE(cpu_arm925_reset);
+int cpu_arm925_do_idle(void);
+__ADDRESSABLE(cpu_arm925_do_idle);
+void cpu_arm925_dcache_clean_area(void *addr, int size);
+__ADDRESSABLE(cpu_arm925_dcache_clean_area);
+void cpu_arm925_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm);
+__ADDRESSABLE(cpu_arm925_switch_mm);
+void cpu_arm925_set_pte_ext(pte_t *ptep, pte_t pte, unsigned int ext);
+__ADDRESSABLE(cpu_arm925_set_pte_ext);
+#endif
+
+#ifdef CONFIG_CPU_ARM926T
+void cpu_arm926_proc_init(void);
+__ADDRESSABLE(cpu_arm926_proc_init);
+void cpu_arm926_proc_fin(void);
+__ADDRESSABLE(cpu_arm926_proc_fin);
+void cpu_arm926_reset(unsigned long addr, bool hvc);
+__ADDRESSABLE(cpu_arm926_reset);
+int cpu_arm926_do_idle(void);
+__ADDRESSABLE(cpu_arm926_do_idle);
+void cpu_arm926_dcache_clean_area(void *addr, int size);
+__ADDRESSABLE(cpu_arm926_dcache_clean_area);
+void cpu_arm926_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm);
+__ADDRESSABLE(cpu_arm926_switch_mm);
+void cpu_arm926_set_pte_ext(pte_t *ptep, pte_t pte, unsigned int ext);
+__ADDRESSABLE(cpu_arm926_set_pte_ext);
+#ifdef CONFIG_ARM_CPU_SUSPEND
+void cpu_arm926_do_suspend(void *);
+__ADDRESSABLE(cpu_arm926_do_suspend);
+void cpu_arm926_do_resume(void *);
+__ADDRESSABLE(cpu_arm926_do_resume);
+#endif /* CONFIG_ARM_CPU_SUSPEND */
+#endif /* CONFIG_CPU_ARM926T */
+
+#ifdef CONFIG_CPU_ARM940T
+void cpu_arm940_proc_init(void);
+__ADDRESSABLE(cpu_arm940_proc_init);
+void cpu_arm940_proc_fin(void);
+__ADDRESSABLE(cpu_arm940_proc_fin);
+void cpu_arm940_reset(void);
+__ADDRESSABLE(cpu_arm940_reset);
+int cpu_arm940_do_idle(void);
+__ADDRESSABLE(cpu_arm940_do_idle);
+void cpu_arm940_dcache_clean_area(void *addr, int size);
+__ADDRESSABLE(cpu_arm940_dcache_clean_area);
+void cpu_arm940_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm);
+__ADDRESSABLE(cpu_arm940_switch_mm);
+#endif
+
+#ifdef CONFIG_CPU_ARM946E
+void cpu_arm946_proc_init(void);
+__ADDRESSABLE(cpu_arm946_proc_init);
+void cpu_arm946_proc_fin(void);
+__ADDRESSABLE(cpu_arm946_proc_fin);
+void cpu_arm946_reset(void);
+__ADDRESSABLE(cpu_arm946_reset);
+int cpu_arm946_do_idle(void);
+__ADDRESSABLE(cpu_arm946_do_idle);
+void cpu_arm946_dcache_clean_area(void *addr, int size);
+__ADDRESSABLE(cpu_arm946_dcache_clean_area);
+void cpu_arm946_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm);
+__ADDRESSABLE(cpu_arm946_switch_mm);
+#endif
+
+#ifdef CONFIG_CPU_FA526
+void cpu_fa526_proc_init(void);
+__ADDRESSABLE(cpu_fa526_proc_init);
+void cpu_fa526_proc_fin(void);
+__ADDRESSABLE(cpu_fa526_proc_fin);
+void cpu_fa526_reset(unsigned long addr, bool hvc);
+__ADDRESSABLE(cpu_fa526_reset);
+int cpu_fa526_do_idle(void);
+__ADDRESSABLE(cpu_fa526_do_idle);
+void cpu_fa526_dcache_clean_area(void *addr, int size);
+__ADDRESSABLE(cpu_fa526_dcache_clean_area);
+void cpu_fa526_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm);
+__ADDRESSABLE(cpu_fa526_switch_mm);
+void cpu_fa526_set_pte_ext(pte_t *ptep, pte_t pte, unsigned int ext);
+__ADDRESSABLE(cpu_fa526_set_pte_ext);
+#endif
+
+#ifdef CONFIG_CPU_ARM1020
+void cpu_arm1020_proc_init(void);
+__ADDRESSABLE(cpu_arm1020_proc_init);
+void cpu_arm1020_proc_fin(void);
+__ADDRESSABLE(cpu_arm1020_proc_fin);
+void cpu_arm1020_reset(unsigned long addr, bool hvc);
+__ADDRESSABLE(cpu_arm1020_reset);
+int cpu_arm1020_do_idle(void);
+__ADDRESSABLE(cpu_arm1020_do_idle);
+void cpu_arm1020_dcache_clean_area(void *addr, int size);
+__ADDRESSABLE(cpu_arm1020_dcache_clean_area);
+void cpu_arm1020_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm);
+__ADDRESSABLE(cpu_arm1020_switch_mm);
+void cpu_arm1020_set_pte_ext(pte_t *ptep, pte_t pte, unsigned int ext);
+__ADDRESSABLE(cpu_arm1020_set_pte_ext);
+#endif
+
+#ifdef CONFIG_CPU_ARM1020E
+void cpu_arm1020e_proc_init(void);
+__ADDRESSABLE(cpu_arm1020e_proc_init);
+void cpu_arm1020e_proc_fin(void);
+__ADDRESSABLE(cpu_arm1020e_proc_fin);
+void cpu_arm1020e_reset(unsigned long addr, bool hvc);
+__ADDRESSABLE(cpu_arm1020e_reset);
+int cpu_arm1020e_do_idle(void);
+__ADDRESSABLE(cpu_arm1020e_do_idle);
+void cpu_arm1020e_dcache_clean_area(void *addr, int size);
+__ADDRESSABLE(cpu_arm1020e_dcache_clean_area);
+void cpu_arm1020e_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm);
+__ADDRESSABLE(cpu_arm1020e_switch_mm);
+void cpu_arm1020e_set_pte_ext(pte_t *ptep, pte_t pte, unsigned int ext);
+__ADDRESSABLE(cpu_arm1020e_set_pte_ext);
+#endif
+
+#ifdef CONFIG_CPU_ARM1022
+void cpu_arm1022_proc_init(void);
+__ADDRESSABLE(cpu_arm1022_proc_init);
+void cpu_arm1022_proc_fin(void);
+__ADDRESSABLE(cpu_arm1022_proc_fin);
+void cpu_arm1022_reset(unsigned long addr, bool hvc);
+__ADDRESSABLE(cpu_arm1022_reset);
+int cpu_arm1022_do_idle(void);
+__ADDRESSABLE(cpu_arm1022_do_idle);
+void cpu_arm1022_dcache_clean_area(void *addr, int size);
+__ADDRESSABLE(cpu_arm1022_dcache_clean_area);
+void cpu_arm1022_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm);
+__ADDRESSABLE(cpu_arm1022_switch_mm);
+void cpu_arm1022_set_pte_ext(pte_t *ptep, pte_t pte, unsigned int ext);
+__ADDRESSABLE(cpu_arm1022_set_pte_ext);
+#endif
+
+#ifdef CONFIG_CPU_ARM1026
+void cpu_arm1026_proc_init(void);
+__ADDRESSABLE(cpu_arm1026_proc_init);
+void cpu_arm1026_proc_fin(void);
+__ADDRESSABLE(cpu_arm1026_proc_fin);
+void cpu_arm1026_reset(unsigned long addr, bool hvc);
+__ADDRESSABLE(cpu_arm1026_reset);
+int cpu_arm1026_do_idle(void);
+__ADDRESSABLE(cpu_arm1026_do_idle);
+void cpu_arm1026_dcache_clean_area(void *addr, int size);
+__ADDRESSABLE(cpu_arm1026_dcache_clean_area);
+void cpu_arm1026_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm);
+__ADDRESSABLE(cpu_arm1026_switch_mm);
+void cpu_arm1026_set_pte_ext(pte_t *ptep, pte_t pte, unsigned int ext);
+__ADDRESSABLE(cpu_arm1026_set_pte_ext);
+#endif
+
+#ifdef CONFIG_CPU_SA110
+void cpu_sa110_proc_init(void);
+__ADDRESSABLE(cpu_sa110_proc_init);
+void cpu_sa110_proc_fin(void);
+__ADDRESSABLE(cpu_sa110_proc_fin);
+void cpu_sa110_reset(unsigned long addr, bool hvc);
+__ADDRESSABLE(cpu_sa110_reset);
+int cpu_sa110_do_idle(void);
+__ADDRESSABLE(cpu_sa110_do_idle);
+void cpu_sa110_dcache_clean_area(void *addr, int size);
+__ADDRESSABLE(cpu_sa110_dcache_clean_area);
+void cpu_sa110_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm);
+__ADDRESSABLE(cpu_sa110_switch_mm);
+void cpu_sa110_set_pte_ext(pte_t *ptep, pte_t pte, unsigned int ext);
+__ADDRESSABLE(cpu_sa110_set_pte_ext);
+#endif
+
+#ifdef CONFIG_CPU_SA1100
+void cpu_sa1100_proc_init(void);
+__ADDRESSABLE(cpu_sa1100_proc_init);
+void cpu_sa1100_proc_fin(void);
+__ADDRESSABLE(cpu_sa1100_proc_fin);
+void cpu_sa1100_reset(unsigned long addr, bool hvc);
+__ADDRESSABLE(cpu_sa1100_reset);
+int cpu_sa1100_do_idle(void);
+__ADDRESSABLE(cpu_sa1100_do_idle);
+void cpu_sa1100_dcache_clean_area(void *addr, int size);
+__ADDRESSABLE(cpu_sa1100_dcache_clean_area);
+void cpu_sa1100_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm);
+__ADDRESSABLE(cpu_sa1100_switch_mm);
+void cpu_sa1100_set_pte_ext(pte_t *ptep, pte_t pte, unsigned int ext);
+__ADDRESSABLE(cpu_sa1100_set_pte_ext);
+#ifdef CONFIG_ARM_CPU_SUSPEND
+void cpu_sa1100_do_suspend(void *);
+__ADDRESSABLE(cpu_sa1100_do_suspend);
+void cpu_sa1100_do_resume(void *);
+__ADDRESSABLE(cpu_sa1100_do_resume);
+#endif /* CONFIG_ARM_CPU_SUSPEND */
+#endif /* CONFIG_CPU_SA1100 */
+
+#ifdef CONFIG_CPU_XSCALE
+void cpu_xscale_proc_init(void);
+__ADDRESSABLE(cpu_xscale_proc_init);
+void cpu_xscale_proc_fin(void);
+__ADDRESSABLE(cpu_xscale_proc_fin);
+void cpu_xscale_reset(unsigned long addr, bool hvc);
+__ADDRESSABLE(cpu_xscale_reset);
+int cpu_xscale_do_idle(void);
+__ADDRESSABLE(cpu_xscale_do_idle);
+void cpu_xscale_dcache_clean_area(void *addr, int size);
+__ADDRESSABLE(cpu_xscale_dcache_clean_area);
+void cpu_xscale_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm);
+__ADDRESSABLE(cpu_xscale_switch_mm);
+void cpu_xscale_set_pte_ext(pte_t *ptep, pte_t pte, unsigned int ext);
+__ADDRESSABLE(cpu_xscale_set_pte_ext);
+#ifdef CONFIG_ARM_CPU_SUSPEND
+void cpu_xscale_do_suspend(void *);
+__ADDRESSABLE(cpu_xscale_do_suspend);
+void cpu_xscale_do_resume(void *);
+__ADDRESSABLE(cpu_xscale_do_resume);
+#endif /* CONFIG_ARM_CPU_SUSPEND */
+#endif /* CONFIG_CPU_XSCALE */
+
+#ifdef CONFIG_CPU_XSC3
+void cpu_xsc3_proc_init(void);
+__ADDRESSABLE(cpu_xsc3_proc_init);
+void cpu_xsc3_proc_fin(void);
+__ADDRESSABLE(cpu_xsc3_proc_fin);
+void cpu_xsc3_reset(unsigned long addr, bool hvc);
+__ADDRESSABLE(cpu_xsc3_reset);
+int cpu_xsc3_do_idle(void);
+__ADDRESSABLE(cpu_xsc3_do_idle);
+void cpu_xsc3_dcache_clean_area(void *addr, int size);
+__ADDRESSABLE(cpu_xsc3_dcache_clean_area);
+void cpu_xsc3_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm);
+__ADDRESSABLE(cpu_xsc3_switch_mm);
+void cpu_xsc3_set_pte_ext(pte_t *ptep, pte_t pte, unsigned int ext);
+__ADDRESSABLE(cpu_xsc3_set_pte_ext);
+#ifdef CONFIG_ARM_CPU_SUSPEND
+void cpu_xsc3_do_suspend(void *);
+__ADDRESSABLE(cpu_xsc3_do_suspend);
+void cpu_xsc3_do_resume(void *);
+__ADDRESSABLE(cpu_xsc3_do_resume);
+#endif /* CONFIG_ARM_CPU_SUSPEND */
+#endif /* CONFIG_CPU_XSC3 */
+
+#ifdef CONFIG_CPU_MOHAWK
+void cpu_mohawk_proc_init(void);
+__ADDRESSABLE(cpu_mohawk_proc_init);
+void cpu_mohawk_proc_fin(void);
+__ADDRESSABLE(cpu_mohawk_proc_fin);
+void cpu_mohawk_reset(unsigned long addr, bool hvc);
+__ADDRESSABLE(cpu_mohawk_reset);
+int cpu_mohawk_do_idle(void);
+__ADDRESSABLE(cpu_mohawk_do_idle);
+void cpu_mohawk_dcache_clean_area(void *addr, int size);
+__ADDRESSABLE(cpu_mohawk_dcache_clean_area);
+void cpu_mohawk_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm);
+__ADDRESSABLE(cpu_mohawk_switch_mm);
+void cpu_mohawk_set_pte_ext(pte_t *ptep, pte_t pte, unsigned int ext);
+__ADDRESSABLE(cpu_mohawk_set_pte_ext);
+#ifdef CONFIG_ARM_CPU_SUSPEND
+void cpu_mohawk_do_suspend(void *);
+__ADDRESSABLE(cpu_mohawk_do_suspend);
+void cpu_mohawk_do_resume(void *);
+__ADDRESSABLE(cpu_mohawk_do_resume);
+#endif /* CONFIG_ARM_CPU_SUSPEND */
+#endif /* CONFIG_CPU_MOHAWK */
+
+#ifdef CONFIG_CPU_FEROCEON
+void cpu_feroceon_proc_init(void);
+__ADDRESSABLE(cpu_feroceon_proc_init);
+void cpu_feroceon_proc_fin(void);
+__ADDRESSABLE(cpu_feroceon_proc_fin);
+void cpu_feroceon_reset(unsigned long addr, bool hvc);
+__ADDRESSABLE(cpu_feroceon_reset);
+int cpu_feroceon_do_idle(void);
+__ADDRESSABLE(cpu_feroceon_do_idle);
+void cpu_feroceon_dcache_clean_area(void *addr, int size);
+__ADDRESSABLE(cpu_feroceon_dcache_clean_area);
+void cpu_feroceon_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm);
+__ADDRESSABLE(cpu_feroceon_switch_mm);
+void cpu_feroceon_set_pte_ext(pte_t *ptep, pte_t pte, unsigned int ext);
+__ADDRESSABLE(cpu_feroceon_set_pte_ext);
+#ifdef CONFIG_ARM_CPU_SUSPEND
+void cpu_feroceon_do_suspend(void *);
+__ADDRESSABLE(cpu_feroceon_do_suspend);
+void cpu_feroceon_do_resume(void *);
+__ADDRESSABLE(cpu_feroceon_do_resume);
+#endif /* CONFIG_ARM_CPU_SUSPEND */
+#endif /* CONFIG_CPU_FEROCEON */
+
+#if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K)
+void cpu_v6_proc_init(void);
+__ADDRESSABLE(cpu_v6_proc_init);
+void cpu_v6_proc_fin(void);
+__ADDRESSABLE(cpu_v6_proc_fin);
+void cpu_v6_reset(unsigned long addr, bool hvc);
+__ADDRESSABLE(cpu_v6_reset);
+int cpu_v6_do_idle(void);
+__ADDRESSABLE(cpu_v6_do_idle);
+void cpu_v6_dcache_clean_area(void *addr, int size);
+__ADDRESSABLE(cpu_v6_dcache_clean_area);
+void cpu_v6_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm);
+__ADDRESSABLE(cpu_v6_switch_mm);
+void cpu_v6_set_pte_ext(pte_t *ptep, pte_t pte, unsigned int ext);
+__ADDRESSABLE(cpu_v6_set_pte_ext);
+#ifdef CONFIG_ARM_CPU_SUSPEND
+void cpu_v6_do_suspend(void *);
+__ADDRESSABLE(cpu_v6_do_suspend);
+void cpu_v6_do_resume(void *);
+__ADDRESSABLE(cpu_v6_do_resume);
+#endif /* CONFIG_ARM_CPU_SUSPEND */
+#endif /* CPU_V6 */
+
+#ifdef CONFIG_CPU_V7
+void cpu_v7_proc_init(void);
+__ADDRESSABLE(cpu_v7_proc_init);
+void cpu_v7_proc_fin(void);
+__ADDRESSABLE(cpu_v7_proc_fin);
+void cpu_v7_reset(void);
+__ADDRESSABLE(cpu_v7_reset);
+int cpu_v7_do_idle(void);
+__ADDRESSABLE(cpu_v7_do_idle);
+#ifdef CONFIG_PJ4B_ERRATA_4742
+int cpu_pj4b_do_idle(void);
+__ADDRESSABLE(cpu_pj4b_do_idle);
+#endif
+void cpu_v7_dcache_clean_area(void *addr, int size);
+__ADDRESSABLE(cpu_v7_dcache_clean_area);
+void cpu_v7_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm);
+/* Special switch_mm() callbacks to work around bugs in v7 */
+__ADDRESSABLE(cpu_v7_switch_mm);
+void cpu_v7_iciallu_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm);
+__ADDRESSABLE(cpu_v7_iciallu_switch_mm);
+void cpu_v7_bpiall_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm);
+__ADDRESSABLE(cpu_v7_bpiall_switch_mm);
+#ifdef CONFIG_ARM_LPAE
+void cpu_v7_set_pte_ext(pte_t *ptep, pte_t pte);
+#else
+void cpu_v7_set_pte_ext(pte_t *ptep, pte_t pte, unsigned int ext);
+#endif
+__ADDRESSABLE(cpu_v7_set_pte_ext);
+#ifdef CONFIG_ARM_CPU_SUSPEND
+void cpu_v7_do_suspend(void *);
+__ADDRESSABLE(cpu_v7_do_suspend);
+void cpu_v7_do_resume(void *);
+__ADDRESSABLE(cpu_v7_do_resume);
+/* Special versions of suspend and resume for the CA9MP cores */
+void cpu_ca9mp_do_suspend(void *);
+__ADDRESSABLE(cpu_ca9mp_do_suspend);
+void cpu_ca9mp_do_resume(void *);
+__ADDRESSABLE(cpu_ca9mp_do_resume);
+/* Special versions of suspend and resume for the Marvell PJ4B cores */
+#ifdef CONFIG_CPU_PJ4B
+void cpu_pj4b_do_suspend(void *);
+__ADDRESSABLE(cpu_pj4b_do_suspend);
+void cpu_pj4b_do_resume(void *);
+__ADDRESSABLE(cpu_pj4b_do_resume);
+#endif /* CONFIG_CPU_PJ4B */
+#endif /* CONFIG_ARM_CPU_SUSPEND */
+#endif /* CONFIG_CPU_V7 */
+
+#ifdef CONFIG_CPU_V7M
+void cpu_v7m_proc_init(void);
+__ADDRESSABLE(cpu_v7m_proc_init);
+void cpu_v7m_proc_fin(void);
+__ADDRESSABLE(cpu_v7m_proc_fin);
+void cpu_v7m_reset(unsigned long addr, bool hvc);
+__ADDRESSABLE(cpu_v7m_reset);
+int cpu_v7m_do_idle(void);
+__ADDRESSABLE(cpu_v7m_do_idle);
+void cpu_v7m_dcache_clean_area(void *addr, int size);
+__ADDRESSABLE(cpu_v7m_dcache_clean_area);
+void cpu_v7m_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm);
+__ADDRESSABLE(cpu_v7m_switch_mm);
+void cpu_v7m_set_pte_ext(pte_t *ptep, pte_t pte, unsigned int ext);
+__ADDRESSABLE(cpu_v7m_set_pte_ext);
+#ifdef CONFIG_ARM_CPU_SUSPEND
+void cpu_v7m_do_suspend(void *);
+__ADDRESSABLE(cpu_v7m_do_suspend);
+void cpu_v7m_do_resume(void *);
+__ADDRESSABLE(cpu_v7m_do_resume);
+#endif /* CONFIG_ARM_CPU_SUSPEND */
+void cpu_cm7_proc_fin(void);
+__ADDRESSABLE(cpu_cm7_proc_fin);
+void cpu_cm7_dcache_clean_area(void *addr, int size);
+__ADDRESSABLE(cpu_cm7_dcache_clean_area);
+#endif /* CONFIG_CPU_V7M */
diff --git a/arch/arm/mm/tlb-fa.S b/arch/arm/mm/tlb-fa.S
index def6161ec452..85a6fe766b21 100644
--- a/arch/arm/mm/tlb-fa.S
+++ b/arch/arm/mm/tlb-fa.S
@@ -15,6 +15,7 @@
*/
#include <linux/linkage.h>
#include <linux/init.h>
+#include <linux/cfi_types.h>
#include <asm/assembler.h>
#include <asm/asm-offsets.h>
#include <asm/tlbflush.h>
@@ -31,7 +32,7 @@
* - mm - mm_struct describing address space
*/
.align 4
-ENTRY(fa_flush_user_tlb_range)
+SYM_TYPED_FUNC_START(fa_flush_user_tlb_range)
vma_vm_mm ip, r2
act_mm r3 @ get current->active_mm
eors r3, ip, r3 @ == mm ?
@@ -46,9 +47,10 @@ ENTRY(fa_flush_user_tlb_range)
blo 1b
mcr p15, 0, r3, c7, c10, 4 @ data write barrier
ret lr
+SYM_FUNC_END(fa_flush_user_tlb_range)
-ENTRY(fa_flush_kern_tlb_range)
+SYM_TYPED_FUNC_START(fa_flush_kern_tlb_range)
mov r3, #0
mcr p15, 0, r3, c7, c10, 4 @ drain WB
bic r0, r0, #0x0ff
@@ -60,8 +62,4 @@ ENTRY(fa_flush_kern_tlb_range)
mcr p15, 0, r3, c7, c10, 4 @ data write barrier
mcr p15, 0, r3, c7, c5, 4 @ prefetch flush (isb)
ret lr
-
- __INITDATA
-
- /* define struct cpu_tlb_fns (see <asm/tlbflush.h> and proc-macros.S) */
- define_tlb_functions fa, fa_tlb_flags
+SYM_FUNC_END(fa_flush_kern_tlb_range)
diff --git a/arch/arm/mm/tlb-v4.S b/arch/arm/mm/tlb-v4.S
index b962b4e75158..09ff69008d94 100644
--- a/arch/arm/mm/tlb-v4.S
+++ b/arch/arm/mm/tlb-v4.S
@@ -11,6 +11,7 @@
*/
#include <linux/linkage.h>
#include <linux/init.h>
+#include <linux/cfi_types.h>
#include <asm/assembler.h>
#include <asm/asm-offsets.h>
#include <asm/tlbflush.h>
@@ -27,7 +28,7 @@
* - mm - mm_struct describing address space
*/
.align 5
-ENTRY(v4_flush_user_tlb_range)
+SYM_TYPED_FUNC_START(v4_flush_user_tlb_range)
vma_vm_mm ip, r2
act_mm r3 @ get current->active_mm
eors r3, ip, r3 @ == mm ?
@@ -40,6 +41,7 @@ ENTRY(v4_flush_user_tlb_range)
cmp r0, r1
blo 1b
ret lr
+SYM_FUNC_END(v4_flush_user_tlb_range)
/*
* v4_flush_kern_tlb_range(start, end)
@@ -50,10 +52,11 @@ ENTRY(v4_flush_user_tlb_range)
* - start - virtual address (may not be aligned)
* - end - virtual address (may not be aligned)
*/
+#ifdef CONFIG_CFI_CLANG
+SYM_TYPED_FUNC_START(v4_flush_kern_tlb_range)
+ b .v4_flush_kern_tlb_range
+SYM_FUNC_END(v4_flush_kern_tlb_range)
+#else
.globl v4_flush_kern_tlb_range
.equ v4_flush_kern_tlb_range, .v4_flush_kern_tlb_range
-
- __INITDATA
-
- /* define struct cpu_tlb_fns (see <asm/tlbflush.h> and proc-macros.S) */
- define_tlb_functions v4, v4_tlb_flags
+#endif
diff --git a/arch/arm/mm/tlb-v4wb.S b/arch/arm/mm/tlb-v4wb.S
index 9348bba7586a..04e46c359e75 100644
--- a/arch/arm/mm/tlb-v4wb.S
+++ b/arch/arm/mm/tlb-v4wb.S
@@ -11,6 +11,7 @@
*/
#include <linux/linkage.h>
#include <linux/init.h>
+#include <linux/cfi_types.h>
#include <asm/assembler.h>
#include <asm/asm-offsets.h>
#include <asm/tlbflush.h>
@@ -27,7 +28,7 @@
* - mm - mm_struct describing address space
*/
.align 5
-ENTRY(v4wb_flush_user_tlb_range)
+SYM_TYPED_FUNC_START(v4wb_flush_user_tlb_range)
vma_vm_mm ip, r2
act_mm r3 @ get current->active_mm
eors r3, ip, r3 @ == mm ?
@@ -43,6 +44,7 @@ ENTRY(v4wb_flush_user_tlb_range)
cmp r0, r1
blo 1b
ret lr
+SYM_FUNC_END(v4wb_flush_user_tlb_range)
/*
* v4_flush_kern_tlb_range(start, end)
@@ -53,7 +55,7 @@ ENTRY(v4wb_flush_user_tlb_range)
* - start - virtual address (may not be aligned)
* - end - virtual address (may not be aligned)
*/
-ENTRY(v4wb_flush_kern_tlb_range)
+SYM_TYPED_FUNC_START(v4wb_flush_kern_tlb_range)
mov r3, #0
mcr p15, 0, r3, c7, c10, 4 @ drain WB
bic r0, r0, #0x0ff
@@ -64,8 +66,4 @@ ENTRY(v4wb_flush_kern_tlb_range)
cmp r0, r1
blo 1b
ret lr
-
- __INITDATA
-
- /* define struct cpu_tlb_fns (see <asm/tlbflush.h> and proc-macros.S) */
- define_tlb_functions v4wb, v4wb_tlb_flags
+SYM_FUNC_END(v4wb_flush_kern_tlb_range)
diff --git a/arch/arm/mm/tlb-v4wbi.S b/arch/arm/mm/tlb-v4wbi.S
index d4f9040a4111..502dfe5628a3 100644
--- a/arch/arm/mm/tlb-v4wbi.S
+++ b/arch/arm/mm/tlb-v4wbi.S
@@ -11,6 +11,7 @@
*/
#include <linux/linkage.h>
#include <linux/init.h>
+#include <linux/cfi_types.h>
#include <asm/assembler.h>
#include <asm/asm-offsets.h>
#include <asm/tlbflush.h>
@@ -26,7 +27,7 @@
* - mm - mm_struct describing address space
*/
.align 5
-ENTRY(v4wbi_flush_user_tlb_range)
+SYM_TYPED_FUNC_START(v4wbi_flush_user_tlb_range)
vma_vm_mm ip, r2
act_mm r3 @ get current->active_mm
eors r3, ip, r3 @ == mm ?
@@ -43,8 +44,9 @@ ENTRY(v4wbi_flush_user_tlb_range)
cmp r0, r1
blo 1b
ret lr
+SYM_FUNC_END(v4wbi_flush_user_tlb_range)
-ENTRY(v4wbi_flush_kern_tlb_range)
+SYM_TYPED_FUNC_START(v4wbi_flush_kern_tlb_range)
mov r3, #0
mcr p15, 0, r3, c7, c10, 4 @ drain WB
bic r0, r0, #0x0ff
@@ -55,8 +57,4 @@ ENTRY(v4wbi_flush_kern_tlb_range)
cmp r0, r1
blo 1b
ret lr
-
- __INITDATA
-
- /* define struct cpu_tlb_fns (see <asm/tlbflush.h> and proc-macros.S) */
- define_tlb_functions v4wbi, v4wbi_tlb_flags
+SYM_FUNC_END(v4wbi_flush_kern_tlb_range)
diff --git a/arch/arm/mm/tlb-v6.S b/arch/arm/mm/tlb-v6.S
index 1d91e49b2c2d..8256a67ac654 100644
--- a/arch/arm/mm/tlb-v6.S
+++ b/arch/arm/mm/tlb-v6.S
@@ -9,6 +9,7 @@
*/
#include <linux/init.h>
#include <linux/linkage.h>
+#include <linux/cfi_types.h>
#include <asm/asm-offsets.h>
#include <asm/assembler.h>
#include <asm/page.h>
@@ -32,7 +33,7 @@
* - the "Invalidate single entry" instruction will invalidate
* both the I and the D TLBs on Harvard-style TLBs
*/
-ENTRY(v6wbi_flush_user_tlb_range)
+SYM_TYPED_FUNC_START(v6wbi_flush_user_tlb_range)
vma_vm_mm r3, r2 @ get vma->vm_mm
mov ip, #0
mmid r3, r3 @ get vm_mm->context.id
@@ -56,6 +57,7 @@ ENTRY(v6wbi_flush_user_tlb_range)
blo 1b
mcr p15, 0, ip, c7, c10, 4 @ data synchronization barrier
ret lr
+SYM_FUNC_END(v6wbi_flush_user_tlb_range)
/*
* v6wbi_flush_kern_tlb_range(start,end)
@@ -65,7 +67,7 @@ ENTRY(v6wbi_flush_user_tlb_range)
* - start - start address (may not be aligned)
* - end - end address (exclusive, may not be aligned)
*/
-ENTRY(v6wbi_flush_kern_tlb_range)
+SYM_TYPED_FUNC_START(v6wbi_flush_kern_tlb_range)
mov r2, #0
mcr p15, 0, r2, c7, c10, 4 @ drain write buffer
mov r0, r0, lsr #PAGE_SHIFT @ align address
@@ -85,8 +87,4 @@ ENTRY(v6wbi_flush_kern_tlb_range)
mcr p15, 0, r2, c7, c10, 4 @ data synchronization barrier
mcr p15, 0, r2, c7, c5, 4 @ prefetch flush (isb)
ret lr
-
- __INIT
-
- /* define struct cpu_tlb_fns (see <asm/tlbflush.h> and proc-macros.S) */
- define_tlb_functions v6wbi, v6wbi_tlb_flags
+SYM_FUNC_END(v6wbi_flush_kern_tlb_range)
diff --git a/arch/arm/mm/tlb-v7.S b/arch/arm/mm/tlb-v7.S
index 35fd6d4f0d03..f1aa0764a2cc 100644
--- a/arch/arm/mm/tlb-v7.S
+++ b/arch/arm/mm/tlb-v7.S
@@ -10,6 +10,7 @@
*/
#include <linux/init.h>
#include <linux/linkage.h>
+#include <linux/cfi_types.h>
#include <asm/assembler.h>
#include <asm/asm-offsets.h>
#include <asm/page.h>
@@ -31,7 +32,7 @@
* - the "Invalidate single entry" instruction will invalidate
* both the I and the D TLBs on Harvard-style TLBs
*/
-ENTRY(v7wbi_flush_user_tlb_range)
+SYM_TYPED_FUNC_START(v7wbi_flush_user_tlb_range)
vma_vm_mm r3, r2 @ get vma->vm_mm
mmid r3, r3 @ get vm_mm->context.id
dsb ish
@@ -57,7 +58,7 @@ ENTRY(v7wbi_flush_user_tlb_range)
blo 1b
dsb ish
ret lr
-ENDPROC(v7wbi_flush_user_tlb_range)
+SYM_FUNC_END(v7wbi_flush_user_tlb_range)
/*
* v7wbi_flush_kern_tlb_range(start,end)
@@ -67,7 +68,7 @@ ENDPROC(v7wbi_flush_user_tlb_range)
* - start - start address (may not be aligned)
* - end - end address (exclusive, may not be aligned)
*/
-ENTRY(v7wbi_flush_kern_tlb_range)
+SYM_TYPED_FUNC_START(v7wbi_flush_kern_tlb_range)
dsb ish
mov r0, r0, lsr #PAGE_SHIFT @ align address
mov r1, r1, lsr #PAGE_SHIFT
@@ -86,9 +87,4 @@ ENTRY(v7wbi_flush_kern_tlb_range)
dsb ish
isb
ret lr
-ENDPROC(v7wbi_flush_kern_tlb_range)
-
- __INIT
-
- /* define struct cpu_tlb_fns (see <asm/tlbflush.h> and proc-macros.S) */
- define_tlb_functions v7wbi, v7wbi_tlb_flags_up, flags_smp=v7wbi_tlb_flags_smp
+SYM_FUNC_END(v7wbi_flush_kern_tlb_range)
diff --git a/arch/arm/mm/tlb.c b/arch/arm/mm/tlb.c
new file mode 100644
index 000000000000..42359793120b
--- /dev/null
+++ b/arch/arm/mm/tlb.c
@@ -0,0 +1,84 @@
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright 2024 Google LLC
+// Author: Ard Biesheuvel <ardb@google.com>
+
+#include <linux/types.h>
+#include <asm/tlbflush.h>
+
+#ifdef CONFIG_CPU_TLB_V4WT
+void v4_flush_user_tlb_range(unsigned long, unsigned long, struct vm_area_struct *);
+void v4_flush_kern_tlb_range(unsigned long, unsigned long);
+
+struct cpu_tlb_fns v4_tlb_fns __initconst = {
+ .flush_user_range = v4_flush_user_tlb_range,
+ .flush_kern_range = v4_flush_kern_tlb_range,
+ .tlb_flags = v4_tlb_flags,
+};
+#endif
+
+#ifdef CONFIG_CPU_TLB_V4WB
+void v4wb_flush_user_tlb_range(unsigned long, unsigned long, struct vm_area_struct *);
+void v4wb_flush_kern_tlb_range(unsigned long, unsigned long);
+
+struct cpu_tlb_fns v4wb_tlb_fns __initconst = {
+ .flush_user_range = v4wb_flush_user_tlb_range,
+ .flush_kern_range = v4wb_flush_kern_tlb_range,
+ .tlb_flags = v4wb_tlb_flags,
+};
+#endif
+
+#if defined(CONFIG_CPU_TLB_V4WBI) || defined(CONFIG_CPU_TLB_FEROCEON)
+void v4wbi_flush_user_tlb_range(unsigned long, unsigned long, struct vm_area_struct *);
+void v4wbi_flush_kern_tlb_range(unsigned long, unsigned long);
+
+struct cpu_tlb_fns v4wbi_tlb_fns __initconst = {
+ .flush_user_range = v4wbi_flush_user_tlb_range,
+ .flush_kern_range = v4wbi_flush_kern_tlb_range,
+ .tlb_flags = v4wbi_tlb_flags,
+};
+#endif
+
+#ifdef CONFIG_CPU_TLB_V6
+void v6wbi_flush_user_tlb_range(unsigned long, unsigned long, struct vm_area_struct *);
+void v6wbi_flush_kern_tlb_range(unsigned long, unsigned long);
+
+struct cpu_tlb_fns v6wbi_tlb_fns __initconst = {
+ .flush_user_range = v6wbi_flush_user_tlb_range,
+ .flush_kern_range = v6wbi_flush_kern_tlb_range,
+ .tlb_flags = v6wbi_tlb_flags,
+};
+#endif
+
+#ifdef CONFIG_CPU_TLB_V7
+void v7wbi_flush_user_tlb_range(unsigned long, unsigned long, struct vm_area_struct *);
+void v7wbi_flush_kern_tlb_range(unsigned long, unsigned long);
+
+struct cpu_tlb_fns v7wbi_tlb_fns __initconst = {
+ .flush_user_range = v7wbi_flush_user_tlb_range,
+ .flush_kern_range = v7wbi_flush_kern_tlb_range,
+ .tlb_flags = IS_ENABLED(CONFIG_SMP) ? v7wbi_tlb_flags_smp
+ : v7wbi_tlb_flags_up,
+};
+
+#ifdef CONFIG_SMP_ON_UP
+/* This will be run-time patched so the offset better be right */
+static_assert(offsetof(struct cpu_tlb_fns, tlb_flags) == 8);
+
+asm(" .pushsection \".alt.smp.init\", \"a\" \n" \
+ " .align 2 \n" \
+ " .long v7wbi_tlb_fns + 8 - . \n" \
+ " .long " __stringify(v7wbi_tlb_flags_up) " \n" \
+ " .popsection \n");
+#endif
+#endif
+
+#ifdef CONFIG_CPU_TLB_FA
+void fa_flush_user_tlb_range(unsigned long, unsigned long, struct vm_area_struct *);
+void fa_flush_kern_tlb_range(unsigned long, unsigned long);
+
+struct cpu_tlb_fns fa_tlb_fns __initconst = {
+ .flush_user_range = fa_flush_user_tlb_range,
+ .flush_kern_range = fa_flush_kern_tlb_range,
+ .tlb_flags = fa_tlb_flags,
+};
+#endif
diff --git a/drivers/amba/bus.c b/drivers/amba/bus.c
index a24c152bfaac..aba3aa95b224 100644
--- a/drivers/amba/bus.c
+++ b/drivers/amba/bus.c
@@ -488,28 +488,31 @@ static int __init amba_stub_drv_init(void)
* waiting on amba_match(). So, register a stub driver to make sure
* amba_match() is called even if no amba driver has been registered.
*/
- return amba_driver_register(&amba_proxy_drv);
+ return __amba_driver_register(&amba_proxy_drv, NULL);
}
late_initcall_sync(amba_stub_drv_init);
/**
- * amba_driver_register - register an AMBA device driver
+ * __amba_driver_register - register an AMBA device driver
* @drv: amba device driver structure
+ * @owner: owning module/driver
*
* Register an AMBA device driver with the Linux device model
* core. If devices pre-exist, the drivers probe function will
* be called.
*/
-int amba_driver_register(struct amba_driver *drv)
+int __amba_driver_register(struct amba_driver *drv,
+ struct module *owner)
{
if (!drv->probe)
return -EINVAL;
+ drv->drv.owner = owner;
drv->drv.bus = &amba_bustype;
return driver_register(&drv->drv);
}
-EXPORT_SYMBOL(amba_driver_register);
+EXPORT_SYMBOL(__amba_driver_register);
/**
* amba_driver_unregister - remove an AMBA device driver
diff --git a/drivers/char/hw_random/nomadik-rng.c b/drivers/char/hw_random/nomadik-rng.c
index a2009fc4ad3c..f2a2aa7a531c 100644
--- a/drivers/char/hw_random/nomadik-rng.c
+++ b/drivers/char/hw_random/nomadik-rng.c
@@ -78,7 +78,6 @@ MODULE_DEVICE_TABLE(amba, nmk_rng_ids);
static struct amba_driver nmk_rng_driver = {
.drv = {
- .owner = THIS_MODULE,
.name = "rng",
},
.probe = nmk_rng_probe,
diff --git a/drivers/clk/clkdev.c b/drivers/clk/clkdev.c
index 9cd80522ca2d..6a77d7e201a9 100644
--- a/drivers/clk/clkdev.c
+++ b/drivers/clk/clkdev.c
@@ -158,23 +158,54 @@ vclkdev_alloc(struct clk_hw *hw, const char *con_id, const char *dev_fmt,
va_list ap)
{
struct clk_lookup_alloc *cla;
+ struct va_format vaf;
+ const char *failure;
+ va_list ap_copy;
+ size_t max_size;
+ ssize_t res;
cla = kzalloc(sizeof(*cla), GFP_KERNEL);
if (!cla)
return NULL;
+ va_copy(ap_copy, ap);
+
cla->cl.clk_hw = hw;
if (con_id) {
- strscpy(cla->con_id, con_id, sizeof(cla->con_id));
+ res = strscpy(cla->con_id, con_id, sizeof(cla->con_id));
+ if (res < 0) {
+ max_size = sizeof(cla->con_id);
+ failure = "connection";
+ goto fail;
+ }
cla->cl.con_id = cla->con_id;
}
if (dev_fmt) {
- vscnprintf(cla->dev_id, sizeof(cla->dev_id), dev_fmt, ap);
+ res = vsnprintf(cla->dev_id, sizeof(cla->dev_id), dev_fmt, ap);
+ if (res >= sizeof(cla->dev_id)) {
+ max_size = sizeof(cla->dev_id);
+ failure = "device";
+ goto fail;
+ }
cla->cl.dev_id = cla->dev_id;
}
+ va_end(ap_copy);
+
return &cla->cl;
+
+fail:
+ if (dev_fmt)
+ vaf.fmt = dev_fmt;
+ else
+ vaf.fmt = "null-device";
+ vaf.va = &ap_copy;
+ pr_err("%pV:%s: %s ID is greater than %zu\n",
+ &vaf, con_id, failure, max_size);
+ va_end(ap_copy);
+ kfree(cla);
+ return NULL;
}
static struct clk_lookup *
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
index ad8e3da1b2cd..60c4de8dac1d 100644
--- a/drivers/dma/pl330.c
+++ b/drivers/dma/pl330.c
@@ -3262,7 +3262,6 @@ MODULE_DEVICE_TABLE(amba, pl330_ids);
static struct amba_driver pl330_driver = {
.drv = {
- .owner = THIS_MODULE,
.name = "dma-pl330",
.pm = &pl330_pm,
},
diff --git a/drivers/hwtracing/coresight/coresight-catu.c b/drivers/hwtracing/coresight/coresight-catu.c
index 3949ded0d4fa..375bd0d89b0c 100644
--- a/drivers/hwtracing/coresight/coresight-catu.c
+++ b/drivers/hwtracing/coresight/coresight-catu.c
@@ -590,7 +590,6 @@ MODULE_DEVICE_TABLE(amba, catu_ids);
static struct amba_driver catu_driver = {
.drv = {
.name = "coresight-catu",
- .owner = THIS_MODULE,
.suppress_bind_attrs = true,
},
.probe = catu_probe,
diff --git a/drivers/hwtracing/coresight/coresight-cti-core.c b/drivers/hwtracing/coresight/coresight-cti-core.c
index e805617020d0..d2b5a5718c29 100644
--- a/drivers/hwtracing/coresight/coresight-cti-core.c
+++ b/drivers/hwtracing/coresight/coresight-cti-core.c
@@ -982,7 +982,6 @@ MODULE_DEVICE_TABLE(amba, cti_ids);
static struct amba_driver cti_driver = {
.drv = {
.name = "coresight-cti",
- .owner = THIS_MODULE,
.suppress_bind_attrs = true,
},
.probe = cti_probe,
diff --git a/drivers/hwtracing/coresight/coresight-etb10.c b/drivers/hwtracing/coresight/coresight-etb10.c
index 3aab182b562f..7edd3f1d0d46 100644
--- a/drivers/hwtracing/coresight/coresight-etb10.c
+++ b/drivers/hwtracing/coresight/coresight-etb10.c
@@ -844,7 +844,6 @@ MODULE_DEVICE_TABLE(amba, etb_ids);
static struct amba_driver etb_driver = {
.drv = {
.name = "coresight-etb10",
- .owner = THIS_MODULE,
.pm = &etb_dev_pm_ops,
.suppress_bind_attrs = true,
diff --git a/drivers/hwtracing/coresight/coresight-etm3x-core.c b/drivers/hwtracing/coresight/coresight-etm3x-core.c
index 9d5c1391ffb1..8b362605d242 100644
--- a/drivers/hwtracing/coresight/coresight-etm3x-core.c
+++ b/drivers/hwtracing/coresight/coresight-etm3x-core.c
@@ -1008,7 +1008,6 @@ MODULE_DEVICE_TABLE(amba, etm_ids);
static struct amba_driver etm_driver = {
.drv = {
.name = "coresight-etm3x",
- .owner = THIS_MODULE,
.pm = &etm_dev_pm_ops,
.suppress_bind_attrs = true,
},
diff --git a/drivers/hwtracing/coresight/coresight-etm4x-core.c b/drivers/hwtracing/coresight/coresight-etm4x-core.c
index c2ca4a02dfce..e6cd9705596c 100644
--- a/drivers/hwtracing/coresight/coresight-etm4x-core.c
+++ b/drivers/hwtracing/coresight/coresight-etm4x-core.c
@@ -2344,7 +2344,6 @@ MODULE_DEVICE_TABLE(amba, etm4_ids);
static struct amba_driver etm4x_amba_driver = {
.drv = {
.name = "coresight-etm4x",
- .owner = THIS_MODULE,
.suppress_bind_attrs = true,
},
.probe = etm4_probe_amba,
diff --git a/drivers/hwtracing/coresight/coresight-funnel.c b/drivers/hwtracing/coresight/coresight-funnel.c
index ef1a0abfee4e..5ab1f592917a 100644
--- a/drivers/hwtracing/coresight/coresight-funnel.c
+++ b/drivers/hwtracing/coresight/coresight-funnel.c
@@ -399,7 +399,6 @@ MODULE_DEVICE_TABLE(amba, dynamic_funnel_ids);
static struct amba_driver dynamic_funnel_driver = {
.drv = {
.name = "coresight-dynamic-funnel",
- .owner = THIS_MODULE,
.pm = &funnel_dev_pm_ops,
.suppress_bind_attrs = true,
},
diff --git a/drivers/hwtracing/coresight/coresight-replicator.c b/drivers/hwtracing/coresight/coresight-replicator.c
index 73452d9dc13b..2bb9ba66e3c0 100644
--- a/drivers/hwtracing/coresight/coresight-replicator.c
+++ b/drivers/hwtracing/coresight/coresight-replicator.c
@@ -406,7 +406,6 @@ static struct amba_driver dynamic_replicator_driver = {
.drv = {
.name = "coresight-dynamic-replicator",
.pm = &replicator_dev_pm_ops,
- .owner = THIS_MODULE,
.suppress_bind_attrs = true,
},
.probe = dynamic_replicator_probe,
diff --git a/drivers/hwtracing/coresight/coresight-stm.c b/drivers/hwtracing/coresight/coresight-stm.c
index 974d37e5f94c..15b52358965c 100644
--- a/drivers/hwtracing/coresight/coresight-stm.c
+++ b/drivers/hwtracing/coresight/coresight-stm.c
@@ -954,7 +954,6 @@ MODULE_DEVICE_TABLE(amba, stm_ids);
static struct amba_driver stm_driver = {
.drv = {
.name = "coresight-stm",
- .owner = THIS_MODULE,
.pm = &stm_dev_pm_ops,
.suppress_bind_attrs = true,
},
diff --git a/drivers/hwtracing/coresight/coresight-tmc-core.c b/drivers/hwtracing/coresight/coresight-tmc-core.c
index 72005b0c633e..0d251cae814f 100644
--- a/drivers/hwtracing/coresight/coresight-tmc-core.c
+++ b/drivers/hwtracing/coresight/coresight-tmc-core.c
@@ -602,7 +602,6 @@ MODULE_DEVICE_TABLE(amba, tmc_ids);
static struct amba_driver tmc_driver = {
.drv = {
.name = "coresight-tmc",
- .owner = THIS_MODULE,
.suppress_bind_attrs = true,
},
.probe = tmc_probe,
diff --git a/drivers/hwtracing/coresight/coresight-tpda.c b/drivers/hwtracing/coresight/coresight-tpda.c
index 7739bc7adc44..bfca103f9f84 100644
--- a/drivers/hwtracing/coresight/coresight-tpda.c
+++ b/drivers/hwtracing/coresight/coresight-tpda.c
@@ -333,7 +333,6 @@ static struct amba_id tpda_ids[] = {
static struct amba_driver tpda_driver = {
.drv = {
.name = "coresight-tpda",
- .owner = THIS_MODULE,
.suppress_bind_attrs = true,
},
.probe = tpda_probe,
diff --git a/drivers/hwtracing/coresight/coresight-tpdm.c b/drivers/hwtracing/coresight/coresight-tpdm.c
index a9708ab0d488..0726f8842552 100644
--- a/drivers/hwtracing/coresight/coresight-tpdm.c
+++ b/drivers/hwtracing/coresight/coresight-tpdm.c
@@ -1310,7 +1310,6 @@ static struct amba_id tpdm_ids[] = {
static struct amba_driver tpdm_driver = {
.drv = {
.name = "coresight-tpdm",
- .owner = THIS_MODULE,
.suppress_bind_attrs = true,
},
.probe = tpdm_probe,
diff --git a/drivers/hwtracing/coresight/coresight-tpiu.c b/drivers/hwtracing/coresight/coresight-tpiu.c
index 29024f880fda..7dc9ea564bca 100644
--- a/drivers/hwtracing/coresight/coresight-tpiu.c
+++ b/drivers/hwtracing/coresight/coresight-tpiu.c
@@ -236,7 +236,6 @@ MODULE_DEVICE_TABLE(amba, tpiu_ids);
static struct amba_driver tpiu_driver = {
.drv = {
.name = "coresight-tpiu",
- .owner = THIS_MODULE,
.pm = &tpiu_dev_pm_ops,
.suppress_bind_attrs = true,
},
diff --git a/drivers/i2c/busses/i2c-nomadik.c b/drivers/i2c/busses/i2c-nomadik.c
index 4f41a3c7824d..17fb313565b8 100644
--- a/drivers/i2c/busses/i2c-nomadik.c
+++ b/drivers/i2c/busses/i2c-nomadik.c
@@ -1194,7 +1194,6 @@ MODULE_DEVICE_TABLE(amba, nmk_i2c_ids);
static struct amba_driver nmk_i2c_driver = {
.drv = {
- .owner = THIS_MODULE,
.name = DRIVER_NAME,
.pm = pm_ptr(&nmk_i2c_pm),
},
diff --git a/drivers/input/serio/ambakmi.c b/drivers/input/serio/ambakmi.c
index 8fbfa448be4a..496bb7a312d2 100644
--- a/drivers/input/serio/ambakmi.c
+++ b/drivers/input/serio/ambakmi.c
@@ -195,7 +195,6 @@ MODULE_DEVICE_TABLE(amba, amba_kmi_idtable);
static struct amba_driver ambakmi_driver = {
.drv = {
.name = "kmi-pl050",
- .owner = THIS_MODULE,
.pm = pm_sleep_ptr(&amba_kmi_dev_pm_ops),
},
.id_table = amba_kmi_idtable,
diff --git a/drivers/memory/pl353-smc.c b/drivers/memory/pl353-smc.c
index 48540817e046..56e51737c81f 100644
--- a/drivers/memory/pl353-smc.c
+++ b/drivers/memory/pl353-smc.c
@@ -154,7 +154,6 @@ MODULE_DEVICE_TABLE(amba, pl353_ids);
static struct amba_driver pl353_smc_driver = {
.drv = {
- .owner = THIS_MODULE,
.name = "pl353-smc",
.pm = &pl353_smc_dev_pm_ops,
},
diff --git a/drivers/vfio/platform/vfio_amba.c b/drivers/vfio/platform/vfio_amba.c
index 485c6f9161a9..ff8ff8480968 100644
--- a/drivers/vfio/platform/vfio_amba.c
+++ b/drivers/vfio/platform/vfio_amba.c
@@ -134,7 +134,6 @@ static struct amba_driver vfio_amba_driver = {
.id_table = vfio_amba_ids,
.drv = {
.name = "vfio-amba",
- .owner = THIS_MODULE,
},
.driver_managed_dma = true,
};
diff --git a/include/linux/amba/bus.h b/include/linux/amba/bus.h
index c60a6a14638c..958a55bcc708 100644
--- a/include/linux/amba/bus.h
+++ b/include/linux/amba/bus.h
@@ -112,11 +112,18 @@ extern struct bus_type amba_bustype;
#define amba_get_drvdata(d) dev_get_drvdata(&d->dev)
#define amba_set_drvdata(d,p) dev_set_drvdata(&d->dev, p)
+/*
+ * use a macro to avoid include chaining to get THIS_MODULE
+ */
+#define amba_driver_register(drv) \
+ __amba_driver_register(drv, THIS_MODULE)
+
#ifdef CONFIG_ARM_AMBA
-int amba_driver_register(struct amba_driver *);
+int __amba_driver_register(struct amba_driver *, struct module *);
void amba_driver_unregister(struct amba_driver *);
#else
-static inline int amba_driver_register(struct amba_driver *drv)
+static inline int __amba_driver_register(struct amba_driver *drv,
+ struct module *owner)
{
return -EINVAL;
}