summaryrefslogtreecommitdiff
path: root/arch/arm64/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm64/kernel')
-rw-r--r--arch/arm64/kernel/asm-offsets.c7
-rw-r--r--arch/arm64/kernel/cpu_errata.c48
-rw-r--r--arch/arm64/kernel/cpufeature.c2
-rw-r--r--arch/arm64/kernel/entry.S4
-rw-r--r--arch/arm64/kernel/fpsimd.c179
-rw-r--r--arch/arm64/kernel/hibernate.c3
-rw-r--r--arch/arm64/kernel/kaslr.c6
-rw-r--r--arch/arm64/kernel/module.c44
-rw-r--r--arch/arm64/kernel/paravirt.c9
-rw-r--r--arch/arm64/kernel/perf_event.c50
-rw-r--r--arch/arm64/kernel/probes/decode-insn.c10
-rw-r--r--arch/arm64/kernel/probes/decode-insn.h10
-rw-r--r--arch/arm64/kernel/probes/kprobes.c11
-rw-r--r--arch/arm64/kernel/probes/simulate-insn.c10
-rw-r--r--arch/arm64/kernel/probes/simulate-insn.h10
-rw-r--r--arch/arm64/kernel/psci.c9
-rw-r--r--arch/arm64/kernel/signal.c5
-rw-r--r--arch/arm64/kernel/smccc-call.S11
-rw-r--r--arch/arm64/kernel/sys.c16
-rw-r--r--arch/arm64/kernel/sys32.c7
-rw-r--r--arch/arm64/kernel/syscall.c31
-rw-r--r--arch/arm64/kernel/traps.c9
-rw-r--r--arch/arm64/kernel/vdso/Makefile4
23 files changed, 315 insertions, 180 deletions
diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c
index e10e2a5d9ddc..947e39896e28 100644
--- a/arch/arm64/kernel/asm-offsets.c
+++ b/arch/arm64/kernel/asm-offsets.c
@@ -125,9 +125,16 @@ int main(void)
DEFINE(VCPU_CONTEXT, offsetof(struct kvm_vcpu, arch.ctxt));
DEFINE(VCPU_FAULT_DISR, offsetof(struct kvm_vcpu, arch.fault.disr_el1));
DEFINE(VCPU_WORKAROUND_FLAGS, offsetof(struct kvm_vcpu, arch.workaround_flags));
+ DEFINE(VCPU_HCR_EL2, offsetof(struct kvm_vcpu, arch.hcr_el2));
DEFINE(CPU_GP_REGS, offsetof(struct kvm_cpu_context, gp_regs));
+ DEFINE(CPU_APIAKEYLO_EL1, offsetof(struct kvm_cpu_context, sys_regs[APIAKEYLO_EL1]));
+ DEFINE(CPU_APIBKEYLO_EL1, offsetof(struct kvm_cpu_context, sys_regs[APIBKEYLO_EL1]));
+ DEFINE(CPU_APDAKEYLO_EL1, offsetof(struct kvm_cpu_context, sys_regs[APDAKEYLO_EL1]));
+ DEFINE(CPU_APDBKEYLO_EL1, offsetof(struct kvm_cpu_context, sys_regs[APDBKEYLO_EL1]));
+ DEFINE(CPU_APGAKEYLO_EL1, offsetof(struct kvm_cpu_context, sys_regs[APGAKEYLO_EL1]));
DEFINE(CPU_USER_PT_REGS, offsetof(struct kvm_regs, regs));
DEFINE(HOST_CONTEXT_VCPU, offsetof(struct kvm_cpu_context, __hyp_running_vcpu));
+ DEFINE(HOST_DATA_CONTEXT, offsetof(struct kvm_host_data, host_ctxt));
#endif
#ifdef CONFIG_CPU_PM
DEFINE(CPU_CTX_SP, offsetof(struct cpu_suspend_ctx, sp));
diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c
index e88d4e7bdfc7..d61beedba101 100644
--- a/arch/arm64/kernel/cpu_errata.c
+++ b/arch/arm64/kernel/cpu_errata.c
@@ -502,6 +502,22 @@ static const struct midr_range arm64_ssb_cpus[] = {
{},
};
+#ifdef CONFIG_ARM64_ERRATUM_1463225
+DEFINE_PER_CPU(int, __in_cortex_a76_erratum_1463225_wa);
+
+static bool
+has_cortex_a76_erratum_1463225(const struct arm64_cpu_capabilities *entry,
+ int scope)
+{
+ u32 midr = read_cpuid_id();
+ /* Cortex-A76 r0p0 - r3p1 */
+ struct midr_range range = MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 3, 1);
+
+ WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
+ return is_midr_in_range(midr, &range) && is_kernel_in_hyp_mode();
+}
+#endif
+
static void __maybe_unused
cpu_enable_cache_maint_trap(const struct arm64_cpu_capabilities *__unused)
{
@@ -682,12 +698,16 @@ static const struct midr_range workaround_clean_cache[] = {
};
#endif
-#ifdef CONFIG_ARM64_ERRATUM_1188873
-static const struct midr_range erratum_1188873_list[] = {
- /* Cortex-A76 r0p0 to r2p0 */
- MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 2, 0),
- /* Neoverse-N1 r0p0 to r2p0 */
- MIDR_RANGE(MIDR_NEOVERSE_N1, 0, 0, 2, 0),
+#ifdef CONFIG_ARM64_ERRATUM_1418040
+/*
+ * - 1188873 affects r0p0 to r2p0
+ * - 1418040 affects r0p0 to r3p1
+ */
+static const struct midr_range erratum_1418040_list[] = {
+ /* Cortex-A76 r0p0 to r3p1 */
+ MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 3, 1),
+ /* Neoverse-N1 r0p0 to r3p1 */
+ MIDR_RANGE(MIDR_NEOVERSE_N1, 0, 0, 3, 1),
{},
};
#endif
@@ -809,11 +829,11 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
.matches = has_ssbd_mitigation,
.midr_range_list = arm64_ssb_cpus,
},
-#ifdef CONFIG_ARM64_ERRATUM_1188873
+#ifdef CONFIG_ARM64_ERRATUM_1418040
{
- .desc = "ARM erratum 1188873",
- .capability = ARM64_WORKAROUND_1188873,
- ERRATA_MIDR_RANGE_LIST(erratum_1188873_list),
+ .desc = "ARM erratum 1418040",
+ .capability = ARM64_WORKAROUND_1418040,
+ ERRATA_MIDR_RANGE_LIST(erratum_1418040_list),
},
#endif
#ifdef CONFIG_ARM64_ERRATUM_1165522
@@ -824,6 +844,14 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
ERRATA_MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 2, 0),
},
#endif
+#ifdef CONFIG_ARM64_ERRATUM_1463225
+ {
+ .desc = "ARM erratum 1463225",
+ .capability = ARM64_WORKAROUND_1463225,
+ .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
+ .matches = has_cortex_a76_erratum_1463225,
+ },
+#endif
{
}
};
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index 2b807f129e60..ca27e08e3d8a 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -1913,7 +1913,7 @@ static void verify_sve_features(void)
unsigned int len = zcr & ZCR_ELx_LEN_MASK;
if (len < safe_len || sve_verify_vq_map()) {
- pr_crit("CPU%d: SVE: required vector length(s) missing\n",
+ pr_crit("CPU%d: SVE: vector length support mismatch\n",
smp_processor_id());
cpu_die_early();
}
diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
index 1a7811b7e3c4..cd0c7af8e4a8 100644
--- a/arch/arm64/kernel/entry.S
+++ b/arch/arm64/kernel/entry.S
@@ -336,8 +336,8 @@ alternative_if ARM64_WORKAROUND_845719
alternative_else_nop_endif
#endif
3:
-#ifdef CONFIG_ARM64_ERRATUM_1188873
-alternative_if_not ARM64_WORKAROUND_1188873
+#ifdef CONFIG_ARM64_ERRATUM_1418040
+alternative_if_not ARM64_WORKAROUND_1418040
b 4f
alternative_else_nop_endif
/*
diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c
index 735cf1f8b109..a38bf74bcca8 100644
--- a/arch/arm64/kernel/fpsimd.c
+++ b/arch/arm64/kernel/fpsimd.c
@@ -18,6 +18,7 @@
*/
#include <linux/bitmap.h>
+#include <linux/bitops.h>
#include <linux/bottom_half.h>
#include <linux/bug.h>
#include <linux/cache.h>
@@ -48,6 +49,7 @@
#include <asm/sigcontext.h>
#include <asm/sysreg.h>
#include <asm/traps.h>
+#include <asm/virt.h>
#define FPEXC_IOF (1 << 0)
#define FPEXC_DZF (1 << 1)
@@ -119,6 +121,8 @@
*/
struct fpsimd_last_state_struct {
struct user_fpsimd_state *st;
+ void *sve_state;
+ unsigned int sve_vl;
};
static DEFINE_PER_CPU(struct fpsimd_last_state_struct, fpsimd_last_state);
@@ -130,14 +134,23 @@ static int sve_default_vl = -1;
/* Maximum supported vector length across all CPUs (initially poisoned) */
int __ro_after_init sve_max_vl = SVE_VL_MIN;
-/* Set of available vector lengths, as vq_to_bit(vq): */
-static __ro_after_init DECLARE_BITMAP(sve_vq_map, SVE_VQ_MAX);
+int __ro_after_init sve_max_virtualisable_vl = SVE_VL_MIN;
+
+/*
+ * Set of available vector lengths,
+ * where length vq encoded as bit __vq_to_bit(vq):
+ */
+__ro_after_init DECLARE_BITMAP(sve_vq_map, SVE_VQ_MAX);
+/* Set of vector lengths present on at least one cpu: */
+static __ro_after_init DECLARE_BITMAP(sve_vq_partial_map, SVE_VQ_MAX);
+
static void __percpu *efi_sve_state;
#else /* ! CONFIG_ARM64_SVE */
/* Dummy declaration for code that will be optimised out: */
extern __ro_after_init DECLARE_BITMAP(sve_vq_map, SVE_VQ_MAX);
+extern __ro_after_init DECLARE_BITMAP(sve_vq_partial_map, SVE_VQ_MAX);
extern void __percpu *efi_sve_state;
#endif /* ! CONFIG_ARM64_SVE */
@@ -235,14 +248,15 @@ static void task_fpsimd_load(void)
*/
void fpsimd_save(void)
{
- struct user_fpsimd_state *st = __this_cpu_read(fpsimd_last_state.st);
+ struct fpsimd_last_state_struct const *last =
+ this_cpu_ptr(&fpsimd_last_state);
/* set by fpsimd_bind_task_to_cpu() or fpsimd_bind_state_to_cpu() */
WARN_ON(!in_softirq() && !irqs_disabled());
if (!test_thread_flag(TIF_FOREIGN_FPSTATE)) {
if (system_supports_sve() && test_thread_flag(TIF_SVE)) {
- if (WARN_ON(sve_get_vl() != current->thread.sve_vl)) {
+ if (WARN_ON(sve_get_vl() != last->sve_vl)) {
/*
* Can't save the user regs, so current would
* re-enter user with corrupt state.
@@ -252,32 +266,15 @@ void fpsimd_save(void)
return;
}
- sve_save_state(sve_pffr(&current->thread), &st->fpsr);
+ sve_save_state((char *)last->sve_state +
+ sve_ffr_offset(last->sve_vl),
+ &last->st->fpsr);
} else
- fpsimd_save_state(st);
+ fpsimd_save_state(last->st);
}
}
/*
- * Helpers to translate bit indices in sve_vq_map to VQ values (and
- * vice versa). This allows find_next_bit() to be used to find the
- * _maximum_ VQ not exceeding a certain value.
- */
-
-static unsigned int vq_to_bit(unsigned int vq)
-{
- return SVE_VQ_MAX - vq;
-}
-
-static unsigned int bit_to_vq(unsigned int bit)
-{
- if (WARN_ON(bit >= SVE_VQ_MAX))
- bit = SVE_VQ_MAX - 1;
-
- return SVE_VQ_MAX - bit;
-}
-
-/*
* All vector length selection from userspace comes through here.
* We're on a slow path, so some sanity-checks are included.
* If things go wrong there's a bug somewhere, but try to fall back to a
@@ -298,8 +295,8 @@ static unsigned int find_supported_vector_length(unsigned int vl)
vl = max_vl;
bit = find_next_bit(sve_vq_map, SVE_VQ_MAX,
- vq_to_bit(sve_vq_from_vl(vl)));
- return sve_vl_from_vq(bit_to_vq(bit));
+ __vq_to_bit(sve_vq_from_vl(vl)));
+ return sve_vl_from_vq(__bit_to_vq(bit));
}
#ifdef CONFIG_SYSCTL
@@ -550,7 +547,6 @@ int sve_set_vector_length(struct task_struct *task,
local_bh_disable();
fpsimd_save();
- set_thread_flag(TIF_FOREIGN_FPSTATE);
}
fpsimd_flush_task_state(task);
@@ -624,12 +620,6 @@ int sve_get_current_vl(void)
return sve_prctl_status(0);
}
-/*
- * Bitmap for temporary storage of the per-CPU set of supported vector lengths
- * during secondary boot.
- */
-static DECLARE_BITMAP(sve_secondary_vq_map, SVE_VQ_MAX);
-
static void sve_probe_vqs(DECLARE_BITMAP(map, SVE_VQ_MAX))
{
unsigned int vq, vl;
@@ -644,40 +634,82 @@ static void sve_probe_vqs(DECLARE_BITMAP(map, SVE_VQ_MAX))
write_sysreg_s(zcr | (vq - 1), SYS_ZCR_EL1); /* self-syncing */
vl = sve_get_vl();
vq = sve_vq_from_vl(vl); /* skip intervening lengths */
- set_bit(vq_to_bit(vq), map);
+ set_bit(__vq_to_bit(vq), map);
}
}
+/*
+ * Initialise the set of known supported VQs for the boot CPU.
+ * This is called during kernel boot, before secondary CPUs are brought up.
+ */
void __init sve_init_vq_map(void)
{
sve_probe_vqs(sve_vq_map);
+ bitmap_copy(sve_vq_partial_map, sve_vq_map, SVE_VQ_MAX);
}
/*
* If we haven't committed to the set of supported VQs yet, filter out
* those not supported by the current CPU.
+ * This function is called during the bring-up of early secondary CPUs only.
*/
void sve_update_vq_map(void)
{
- sve_probe_vqs(sve_secondary_vq_map);
- bitmap_and(sve_vq_map, sve_vq_map, sve_secondary_vq_map, SVE_VQ_MAX);
+ DECLARE_BITMAP(tmp_map, SVE_VQ_MAX);
+
+ sve_probe_vqs(tmp_map);
+ bitmap_and(sve_vq_map, sve_vq_map, tmp_map, SVE_VQ_MAX);
+ bitmap_or(sve_vq_partial_map, sve_vq_partial_map, tmp_map, SVE_VQ_MAX);
}
-/* Check whether the current CPU supports all VQs in the committed set */
+/*
+ * Check whether the current CPU supports all VQs in the committed set.
+ * This function is called during the bring-up of late secondary CPUs only.
+ */
int sve_verify_vq_map(void)
{
- int ret = 0;
+ DECLARE_BITMAP(tmp_map, SVE_VQ_MAX);
+ unsigned long b;
- sve_probe_vqs(sve_secondary_vq_map);
- bitmap_andnot(sve_secondary_vq_map, sve_vq_map, sve_secondary_vq_map,
- SVE_VQ_MAX);
- if (!bitmap_empty(sve_secondary_vq_map, SVE_VQ_MAX)) {
+ sve_probe_vqs(tmp_map);
+
+ bitmap_complement(tmp_map, tmp_map, SVE_VQ_MAX);
+ if (bitmap_intersects(tmp_map, sve_vq_map, SVE_VQ_MAX)) {
pr_warn("SVE: cpu%d: Required vector length(s) missing\n",
smp_processor_id());
- ret = -EINVAL;
+ return -EINVAL;
}
- return ret;
+ if (!IS_ENABLED(CONFIG_KVM) || !is_hyp_mode_available())
+ return 0;
+
+ /*
+ * For KVM, it is necessary to ensure that this CPU doesn't
+ * support any vector length that guests may have probed as
+ * unsupported.
+ */
+
+ /* Recover the set of supported VQs: */
+ bitmap_complement(tmp_map, tmp_map, SVE_VQ_MAX);
+ /* Find VQs supported that are not globally supported: */
+ bitmap_andnot(tmp_map, tmp_map, sve_vq_map, SVE_VQ_MAX);
+
+ /* Find the lowest such VQ, if any: */
+ b = find_last_bit(tmp_map, SVE_VQ_MAX);
+ if (b >= SVE_VQ_MAX)
+ return 0; /* no mismatches */
+
+ /*
+ * Mismatches above sve_max_virtualisable_vl are fine, since
+ * no guest is allowed to configure ZCR_EL2.LEN to exceed this:
+ */
+ if (sve_vl_from_vq(__bit_to_vq(b)) <= sve_max_virtualisable_vl) {
+ pr_warn("SVE: cpu%d: Unsupported vector length(s) present\n",
+ smp_processor_id());
+ return -EINVAL;
+ }
+
+ return 0;
}
static void __init sve_efi_setup(void)
@@ -744,6 +776,8 @@ u64 read_zcr_features(void)
void __init sve_setup(void)
{
u64 zcr;
+ DECLARE_BITMAP(tmp_map, SVE_VQ_MAX);
+ unsigned long b;
if (!system_supports_sve())
return;
@@ -753,8 +787,8 @@ void __init sve_setup(void)
* so sve_vq_map must have at least SVE_VQ_MIN set.
* If something went wrong, at least try to patch it up:
*/
- if (WARN_ON(!test_bit(vq_to_bit(SVE_VQ_MIN), sve_vq_map)))
- set_bit(vq_to_bit(SVE_VQ_MIN), sve_vq_map);
+ if (WARN_ON(!test_bit(__vq_to_bit(SVE_VQ_MIN), sve_vq_map)))
+ set_bit(__vq_to_bit(SVE_VQ_MIN), sve_vq_map);
zcr = read_sanitised_ftr_reg(SYS_ZCR_EL1);
sve_max_vl = sve_vl_from_vq((zcr & ZCR_ELx_LEN_MASK) + 1);
@@ -772,11 +806,31 @@ void __init sve_setup(void)
*/
sve_default_vl = find_supported_vector_length(64);
+ bitmap_andnot(tmp_map, sve_vq_partial_map, sve_vq_map,
+ SVE_VQ_MAX);
+
+ b = find_last_bit(tmp_map, SVE_VQ_MAX);
+ if (b >= SVE_VQ_MAX)
+ /* No non-virtualisable VLs found */
+ sve_max_virtualisable_vl = SVE_VQ_MAX;
+ else if (WARN_ON(b == SVE_VQ_MAX - 1))
+ /* No virtualisable VLs? This is architecturally forbidden. */
+ sve_max_virtualisable_vl = SVE_VQ_MIN;
+ else /* b + 1 < SVE_VQ_MAX */
+ sve_max_virtualisable_vl = sve_vl_from_vq(__bit_to_vq(b + 1));
+
+ if (sve_max_virtualisable_vl > sve_max_vl)
+ sve_max_virtualisable_vl = sve_max_vl;
+
pr_info("SVE: maximum available vector length %u bytes per vector\n",
sve_max_vl);
pr_info("SVE: default vector length %u bytes per vector\n",
sve_default_vl);
+ /* KVM decides whether to support mismatched systems. Just warn here: */
+ if (sve_max_virtualisable_vl < sve_max_vl)
+ pr_warn("SVE: unvirtualisable vector lengths present\n");
+
sve_efi_setup();
}
@@ -816,12 +870,11 @@ asmlinkage void do_sve_acc(unsigned int esr, struct pt_regs *regs)
local_bh_disable();
fpsimd_save();
- fpsimd_to_sve(current);
/* Force ret_to_user to reload the registers: */
fpsimd_flush_task_state(current);
- set_thread_flag(TIF_FOREIGN_FPSTATE);
+ fpsimd_to_sve(current);
if (test_and_set_thread_flag(TIF_SVE))
WARN_ON(1); /* SVE access shouldn't have trapped */
@@ -894,9 +947,9 @@ void fpsimd_flush_thread(void)
local_bh_disable();
+ fpsimd_flush_task_state(current);
memset(&current->thread.uw.fpsimd_state, 0,
sizeof(current->thread.uw.fpsimd_state));
- fpsimd_flush_task_state(current);
if (system_supports_sve()) {
clear_thread_flag(TIF_SVE);
@@ -933,8 +986,6 @@ void fpsimd_flush_thread(void)
current->thread.sve_vl_onexec = 0;
}
- set_thread_flag(TIF_FOREIGN_FPSTATE);
-
local_bh_enable();
}
@@ -974,6 +1025,8 @@ void fpsimd_bind_task_to_cpu(void)
this_cpu_ptr(&fpsimd_last_state);
last->st = &current->thread.uw.fpsimd_state;
+ last->sve_state = current->thread.sve_state;
+ last->sve_vl = current->thread.sve_vl;
current->thread.fpsimd_cpu = smp_processor_id();
if (system_supports_sve()) {
@@ -987,7 +1040,8 @@ void fpsimd_bind_task_to_cpu(void)
}
}
-void fpsimd_bind_state_to_cpu(struct user_fpsimd_state *st)
+void fpsimd_bind_state_to_cpu(struct user_fpsimd_state *st, void *sve_state,
+ unsigned int sve_vl)
{
struct fpsimd_last_state_struct *last =
this_cpu_ptr(&fpsimd_last_state);
@@ -995,6 +1049,8 @@ void fpsimd_bind_state_to_cpu(struct user_fpsimd_state *st)
WARN_ON(!in_softirq() && !irqs_disabled());
last->st = st;
+ last->sve_state = sve_state;
+ last->sve_vl = sve_vl;
}
/*
@@ -1043,12 +1099,29 @@ void fpsimd_update_current_state(struct user_fpsimd_state const *state)
/*
* Invalidate live CPU copies of task t's FPSIMD state
+ *
+ * This function may be called with preemption enabled. The barrier()
+ * ensures that the assignment to fpsimd_cpu is visible to any
+ * preemption/softirq that could race with set_tsk_thread_flag(), so
+ * that TIF_FOREIGN_FPSTATE cannot be spuriously re-cleared.
+ *
+ * The final barrier ensures that TIF_FOREIGN_FPSTATE is seen set by any
+ * subsequent code.
*/
void fpsimd_flush_task_state(struct task_struct *t)
{
t->thread.fpsimd_cpu = NR_CPUS;
+
+ barrier();
+ set_tsk_thread_flag(t, TIF_FOREIGN_FPSTATE);
+
+ barrier();
}
+/*
+ * Invalidate any task's FPSIMD state that is present on this cpu.
+ * This function must be called with softirqs disabled.
+ */
void fpsimd_flush_cpu_state(void)
{
__this_cpu_write(fpsimd_last_state.st, NULL);
diff --git a/arch/arm64/kernel/hibernate.c b/arch/arm64/kernel/hibernate.c
index 9859e1178e6b..9341fcc6e809 100644
--- a/arch/arm64/kernel/hibernate.c
+++ b/arch/arm64/kernel/hibernate.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*:
* Hibernate support specific for ARM64
*
@@ -11,8 +12,6 @@
* https://patchwork.kernel.org/patch/96442/
*
* Copyright (C) 2006 Rafael J. Wysocki <rjw@sisk.pl>
- *
- * License terms: GNU General Public License (GPL) version 2
*/
#define pr_fmt(x) "hibernate: " x
#include <linux/cpu.h>
diff --git a/arch/arm64/kernel/kaslr.c b/arch/arm64/kernel/kaslr.c
index b09b6f75f759..06941c1fe418 100644
--- a/arch/arm64/kernel/kaslr.c
+++ b/arch/arm64/kernel/kaslr.c
@@ -145,15 +145,15 @@ u64 __init kaslr_early_init(u64 dt_phys)
if (IS_ENABLED(CONFIG_RANDOMIZE_MODULE_REGION_FULL)) {
/*
- * Randomize the module region over a 4 GB window covering the
+ * Randomize the module region over a 2 GB window covering the
* kernel. This reduces the risk of modules leaking information
* about the address of the kernel itself, but results in
* branches between modules and the core kernel that are
* resolved via PLTs. (Branches between modules will be
* resolved normally.)
*/
- module_range = SZ_4G - (u64)(_end - _stext);
- module_alloc_base = max((u64)_end + offset - SZ_4G,
+ module_range = SZ_2G - (u64)(_end - _stext);
+ module_alloc_base = max((u64)_end + offset - SZ_2G,
(u64)MODULES_VADDR);
} else {
/*
diff --git a/arch/arm64/kernel/module.c b/arch/arm64/kernel/module.c
index f713e2fc4d75..dd080837e6a9 100644
--- a/arch/arm64/kernel/module.c
+++ b/arch/arm64/kernel/module.c
@@ -56,7 +56,7 @@ void *module_alloc(unsigned long size)
* can simply omit this fallback in that case.
*/
p = __vmalloc_node_range(size, MODULE_ALIGN, module_alloc_base,
- module_alloc_base + SZ_4G, GFP_KERNEL,
+ module_alloc_base + SZ_2G, GFP_KERNEL,
PAGE_KERNEL_EXEC, 0, NUMA_NO_NODE,
__builtin_return_address(0));
@@ -96,16 +96,50 @@ static int reloc_data(enum aarch64_reloc_op op, void *place, u64 val, int len)
{
s64 sval = do_reloc(op, place, val);
+ /*
+ * The ELF psABI for AArch64 documents the 16-bit and 32-bit place
+ * relative and absolute relocations as having a range of [-2^15, 2^16)
+ * or [-2^31, 2^32), respectively. However, in order to be able to
+ * detect overflows reliably, we have to choose whether we interpret
+ * such quantities as signed or as unsigned, and stick with it.
+ * The way we organize our address space requires a signed
+ * interpretation of 32-bit relative references, so let's use that
+ * for all R_AARCH64_PRELxx relocations. This means our upper
+ * bound for overflow detection should be Sxx_MAX rather than Uxx_MAX.
+ */
+
switch (len) {
case 16:
*(s16 *)place = sval;
- if (sval < S16_MIN || sval > U16_MAX)
- return -ERANGE;
+ switch (op) {
+ case RELOC_OP_ABS:
+ if (sval < 0 || sval > U16_MAX)
+ return -ERANGE;
+ break;
+ case RELOC_OP_PREL:
+ if (sval < S16_MIN || sval > S16_MAX)
+ return -ERANGE;
+ break;
+ default:
+ pr_err("Invalid 16-bit data relocation (%d)\n", op);
+ return 0;
+ }
break;
case 32:
*(s32 *)place = sval;
- if (sval < S32_MIN || sval > U32_MAX)
- return -ERANGE;
+ switch (op) {
+ case RELOC_OP_ABS:
+ if (sval < 0 || sval > U32_MAX)
+ return -ERANGE;
+ break;
+ case RELOC_OP_PREL:
+ if (sval < S32_MIN || sval > S32_MAX)
+ return -ERANGE;
+ break;
+ default:
+ pr_err("Invalid 32-bit data relocation (%d)\n", op);
+ return 0;
+ }
break;
case 64:
*(s64 *)place = sval;
diff --git a/arch/arm64/kernel/paravirt.c b/arch/arm64/kernel/paravirt.c
index 75c158b0353f..4cfed91fe256 100644
--- a/arch/arm64/kernel/paravirt.c
+++ b/arch/arm64/kernel/paravirt.c
@@ -1,12 +1,5 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*
* Copyright (C) 2013 Citrix Systems
*
diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c
index 6164d389eed6..348d12eec566 100644
--- a/arch/arm64/kernel/perf_event.c
+++ b/arch/arm64/kernel/perf_event.c
@@ -26,6 +26,7 @@
#include <linux/acpi.h>
#include <linux/clocksource.h>
+#include <linux/kvm_host.h>
#include <linux/of.h>
#include <linux/perf/arm_pmu.h>
#include <linux/platform_device.h>
@@ -528,12 +529,21 @@ static inline int armv8pmu_enable_counter(int idx)
static inline void armv8pmu_enable_event_counter(struct perf_event *event)
{
+ struct perf_event_attr *attr = &event->attr;
int idx = event->hw.idx;
+ u32 counter_bits = BIT(ARMV8_IDX_TO_COUNTER(idx));
- armv8pmu_enable_counter(idx);
if (armv8pmu_event_is_chained(event))
- armv8pmu_enable_counter(idx - 1);
- isb();
+ counter_bits |= BIT(ARMV8_IDX_TO_COUNTER(idx - 1));
+
+ kvm_set_pmu_events(counter_bits, attr);
+
+ /* We rely on the hypervisor switch code to enable guest counters */
+ if (!kvm_pmu_counter_deferred(attr)) {
+ armv8pmu_enable_counter(idx);
+ if (armv8pmu_event_is_chained(event))
+ armv8pmu_enable_counter(idx - 1);
+ }
}
static inline int armv8pmu_disable_counter(int idx)
@@ -546,11 +556,21 @@ static inline int armv8pmu_disable_counter(int idx)
static inline void armv8pmu_disable_event_counter(struct perf_event *event)
{
struct hw_perf_event *hwc = &event->hw;
+ struct perf_event_attr *attr = &event->attr;
int idx = hwc->idx;
+ u32 counter_bits = BIT(ARMV8_IDX_TO_COUNTER(idx));
if (armv8pmu_event_is_chained(event))
- armv8pmu_disable_counter(idx - 1);
- armv8pmu_disable_counter(idx);
+ counter_bits |= BIT(ARMV8_IDX_TO_COUNTER(idx - 1));
+
+ kvm_clr_pmu_events(counter_bits);
+
+ /* We rely on the hypervisor switch code to disable guest counters */
+ if (!kvm_pmu_counter_deferred(attr)) {
+ if (armv8pmu_event_is_chained(event))
+ armv8pmu_disable_counter(idx - 1);
+ armv8pmu_disable_counter(idx);
+ }
}
static inline int armv8pmu_enable_intens(int idx)
@@ -827,14 +847,23 @@ static int armv8pmu_set_event_filter(struct hw_perf_event *event,
* with other architectures (x86 and Power).
*/
if (is_kernel_in_hyp_mode()) {
- if (!attr->exclude_kernel)
+ if (!attr->exclude_kernel && !attr->exclude_host)
config_base |= ARMV8_PMU_INCLUDE_EL2;
- } else {
- if (attr->exclude_kernel)
+ if (attr->exclude_guest)
config_base |= ARMV8_PMU_EXCLUDE_EL1;
- if (!attr->exclude_hv)
+ if (attr->exclude_host)
+ config_base |= ARMV8_PMU_EXCLUDE_EL0;
+ } else {
+ if (!attr->exclude_hv && !attr->exclude_host)
config_base |= ARMV8_PMU_INCLUDE_EL2;
}
+
+ /*
+ * Filter out !VHE kernels and guest kernels
+ */
+ if (attr->exclude_kernel)
+ config_base |= ARMV8_PMU_EXCLUDE_EL1;
+
if (attr->exclude_user)
config_base |= ARMV8_PMU_EXCLUDE_EL0;
@@ -864,6 +893,9 @@ static void armv8pmu_reset(void *info)
armv8pmu_disable_intens(idx);
}
+ /* Clear the counters we flip at guest entry/exit */
+ kvm_clr_pmu_events(U32_MAX);
+
/*
* Initialize & Reset PMNC. Request overflow interrupt for
* 64 bit cycle counter but cheat in armv8pmu_write_counter().
diff --git a/arch/arm64/kernel/probes/decode-insn.c b/arch/arm64/kernel/probes/decode-insn.c
index 6bf6657a5a52..b78fac9e546c 100644
--- a/arch/arm64/kernel/probes/decode-insn.c
+++ b/arch/arm64/kernel/probes/decode-insn.c
@@ -1,16 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* arch/arm64/kernel/probes/decode-insn.c
*
* Copyright (C) 2013 Linaro Limited.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
*/
#include <linux/kernel.h>
diff --git a/arch/arm64/kernel/probes/decode-insn.h b/arch/arm64/kernel/probes/decode-insn.h
index 192ab007bacb..8b758c5a2062 100644
--- a/arch/arm64/kernel/probes/decode-insn.h
+++ b/arch/arm64/kernel/probes/decode-insn.h
@@ -1,16 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* arch/arm64/kernel/probes/decode-insn.h
*
* Copyright (C) 2013 Linaro Limited.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
*/
#ifndef _ARM_KERNEL_KPROBES_ARM64_H
diff --git a/arch/arm64/kernel/probes/kprobes.c b/arch/arm64/kernel/probes/kprobes.c
index 2509fcb6d404..88ce502c8e6f 100644
--- a/arch/arm64/kernel/probes/kprobes.c
+++ b/arch/arm64/kernel/probes/kprobes.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* arch/arm64/kernel/probes/kprobes.c
*
@@ -5,16 +6,6 @@
*
* Copyright (C) 2013 Linaro Limited.
* Author: Sandeepa Prabhu <sandeepa.prabhu@linaro.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
*/
#include <linux/kasan.h>
#include <linux/kernel.h>
diff --git a/arch/arm64/kernel/probes/simulate-insn.c b/arch/arm64/kernel/probes/simulate-insn.c
index be05868418ee..25f67ec59635 100644
--- a/arch/arm64/kernel/probes/simulate-insn.c
+++ b/arch/arm64/kernel/probes/simulate-insn.c
@@ -1,16 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* arch/arm64/kernel/probes/simulate-insn.c
*
* Copyright (C) 2013 Linaro Limited.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
*/
#include <linux/bitops.h>
diff --git a/arch/arm64/kernel/probes/simulate-insn.h b/arch/arm64/kernel/probes/simulate-insn.h
index 050bde683c2d..e065dc92218e 100644
--- a/arch/arm64/kernel/probes/simulate-insn.h
+++ b/arch/arm64/kernel/probes/simulate-insn.h
@@ -1,16 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* arch/arm64/kernel/probes/simulate-insn.h
*
* Copyright (C) 2013 Linaro Limited
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
*/
#ifndef _ARM_KERNEL_KPROBES_SIMULATE_INSN_H
diff --git a/arch/arm64/kernel/psci.c b/arch/arm64/kernel/psci.c
index 8cdaf25e99cd..85ee7d07889e 100644
--- a/arch/arm64/kernel/psci.c
+++ b/arch/arm64/kernel/psci.c
@@ -1,12 +1,5 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*
* Copyright (C) 2013 ARM Limited
*
diff --git a/arch/arm64/kernel/signal.c b/arch/arm64/kernel/signal.c
index 867a7cea70e5..a9b0485df074 100644
--- a/arch/arm64/kernel/signal.c
+++ b/arch/arm64/kernel/signal.c
@@ -296,11 +296,6 @@ static int restore_sve_fpsimd_context(struct user_ctxs *user)
*/
fpsimd_flush_task_state(current);
- barrier();
- /* From now, fpsimd_thread_switch() won't clear TIF_FOREIGN_FPSTATE */
-
- set_thread_flag(TIF_FOREIGN_FPSTATE);
- barrier();
/* From now, fpsimd_thread_switch() won't touch thread.sve_state */
sve_alloc(current);
diff --git a/arch/arm64/kernel/smccc-call.S b/arch/arm64/kernel/smccc-call.S
index 184332286a81..54655273d1e0 100644
--- a/arch/arm64/kernel/smccc-call.S
+++ b/arch/arm64/kernel/smccc-call.S
@@ -1,15 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2015, Linaro Limited
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License Version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
*/
#include <linux/linkage.h>
#include <linux/arm-smccc.h>
diff --git a/arch/arm64/kernel/sys.c b/arch/arm64/kernel/sys.c
index 6f91e8116514..fe20c461582a 100644
--- a/arch/arm64/kernel/sys.c
+++ b/arch/arm64/kernel/sys.c
@@ -47,22 +47,26 @@ SYSCALL_DEFINE1(arm64_personality, unsigned int, personality)
return ksys_personality(personality);
}
+asmlinkage long sys_ni_syscall(void);
+
+asmlinkage long __arm64_sys_ni_syscall(const struct pt_regs *__unused)
+{
+ return sys_ni_syscall();
+}
+
/*
* Wrappers to pass the pt_regs argument.
*/
-#define sys_personality sys_arm64_personality
-
-asmlinkage long sys_ni_syscall(const struct pt_regs *);
-#define __arm64_sys_ni_syscall sys_ni_syscall
+#define __arm64_sys_personality __arm64_sys_arm64_personality
#undef __SYSCALL
#define __SYSCALL(nr, sym) asmlinkage long __arm64_##sym(const struct pt_regs *);
#include <asm/unistd.h>
#undef __SYSCALL
-#define __SYSCALL(nr, sym) [nr] = (syscall_fn_t)__arm64_##sym,
+#define __SYSCALL(nr, sym) [nr] = __arm64_##sym,
const syscall_fn_t sys_call_table[__NR_syscalls] = {
- [0 ... __NR_syscalls - 1] = (syscall_fn_t)sys_ni_syscall,
+ [0 ... __NR_syscalls - 1] = __arm64_sys_ni_syscall,
#include <asm/unistd.h>
};
diff --git a/arch/arm64/kernel/sys32.c b/arch/arm64/kernel/sys32.c
index 0f8bcb7de700..3c80a40c1c9d 100644
--- a/arch/arm64/kernel/sys32.c
+++ b/arch/arm64/kernel/sys32.c
@@ -133,17 +133,14 @@ COMPAT_SYSCALL_DEFINE6(aarch32_fallocate, int, fd, int, mode,
return ksys_fallocate(fd, mode, arg_u64(offset), arg_u64(len));
}
-asmlinkage long sys_ni_syscall(const struct pt_regs *);
-#define __arm64_sys_ni_syscall sys_ni_syscall
-
#undef __SYSCALL
#define __SYSCALL(nr, sym) asmlinkage long __arm64_##sym(const struct pt_regs *);
#include <asm/unistd32.h>
#undef __SYSCALL
-#define __SYSCALL(nr, sym) [nr] = (syscall_fn_t)__arm64_##sym,
+#define __SYSCALL(nr, sym) [nr] = __arm64_##sym,
const syscall_fn_t compat_sys_call_table[__NR_compat_syscalls] = {
- [0 ... __NR_compat_syscalls - 1] = (syscall_fn_t)sys_ni_syscall,
+ [0 ... __NR_compat_syscalls - 1] = __arm64_sys_ni_syscall,
#include <asm/unistd32.h>
};
diff --git a/arch/arm64/kernel/syscall.c b/arch/arm64/kernel/syscall.c
index 5610ac01c1ec..871c739f060a 100644
--- a/arch/arm64/kernel/syscall.c
+++ b/arch/arm64/kernel/syscall.c
@@ -8,6 +8,7 @@
#include <linux/syscalls.h>
#include <asm/daifflags.h>
+#include <asm/debug-monitors.h>
#include <asm/fpsimd.h>
#include <asm/syscall.h>
#include <asm/thread_info.h>
@@ -60,6 +61,35 @@ static inline bool has_syscall_work(unsigned long flags)
int syscall_trace_enter(struct pt_regs *regs);
void syscall_trace_exit(struct pt_regs *regs);
+#ifdef CONFIG_ARM64_ERRATUM_1463225
+DECLARE_PER_CPU(int, __in_cortex_a76_erratum_1463225_wa);
+
+static void cortex_a76_erratum_1463225_svc_handler(void)
+{
+ u32 reg, val;
+
+ if (!unlikely(test_thread_flag(TIF_SINGLESTEP)))
+ return;
+
+ if (!unlikely(this_cpu_has_cap(ARM64_WORKAROUND_1463225)))
+ return;
+
+ __this_cpu_write(__in_cortex_a76_erratum_1463225_wa, 1);
+ reg = read_sysreg(mdscr_el1);
+ val = reg | DBG_MDSCR_SS | DBG_MDSCR_KDE;
+ write_sysreg(val, mdscr_el1);
+ asm volatile("msr daifclr, #8");
+ isb();
+
+ /* We will have taken a single-step exception by this point */
+
+ write_sysreg(reg, mdscr_el1);
+ __this_cpu_write(__in_cortex_a76_erratum_1463225_wa, 0);
+}
+#else
+static void cortex_a76_erratum_1463225_svc_handler(void) { }
+#endif /* CONFIG_ARM64_ERRATUM_1463225 */
+
static void el0_svc_common(struct pt_regs *regs, int scno, int sc_nr,
const syscall_fn_t syscall_table[])
{
@@ -68,6 +98,7 @@ static void el0_svc_common(struct pt_regs *regs, int scno, int sc_nr,
regs->orig_x0 = regs->regs[0];
regs->syscallno = scno;
+ cortex_a76_erratum_1463225_svc_handler();
local_daif_restore(DAIF_PROCCTX);
user_exit();
diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c
index ade32046f3fe..177c0f6ebabf 100644
--- a/arch/arm64/kernel/traps.c
+++ b/arch/arm64/kernel/traps.c
@@ -168,7 +168,6 @@ void show_stack(struct task_struct *tsk, unsigned long *sp)
static int __die(const char *str, int err, struct pt_regs *regs)
{
- struct task_struct *tsk = current;
static int die_counter;
int ret;
@@ -181,9 +180,6 @@ static int __die(const char *str, int err, struct pt_regs *regs)
return ret;
print_modules();
- pr_emerg("Process %.*s (pid: %d, stack limit = 0x%p)\n",
- TASK_COMM_LEN, tsk->comm, task_pid_nr(tsk),
- end_of_stack(tsk));
show_regs(regs);
if (!user_mode(regs))
@@ -256,7 +252,10 @@ void arm64_force_sig_fault(int signo, int code, void __user *addr,
const char *str)
{
arm64_show_signal(signo, str);
- force_sig_fault(signo, code, addr, current);
+ if (signo == SIGKILL)
+ force_sig(SIGKILL, current);
+ else
+ force_sig_fault(signo, code, addr, current);
}
void arm64_force_sig_mceerr(int code, void __user *addr, short lsb,
diff --git a/arch/arm64/kernel/vdso/Makefile b/arch/arm64/kernel/vdso/Makefile
index 744b9dbaba03..fa230ff09aa1 100644
--- a/arch/arm64/kernel/vdso/Makefile
+++ b/arch/arm64/kernel/vdso/Makefile
@@ -12,8 +12,8 @@ obj-vdso := gettimeofday.o note.o sigreturn.o
targets := $(obj-vdso) vdso.so vdso.so.dbg
obj-vdso := $(addprefix $(obj)/, $(obj-vdso))
-ldflags-y := -shared -nostdlib -soname=linux-vdso.so.1 \
- $(call ld-option, --hash-style=sysv) -n -T
+ldflags-y := -shared -nostdlib -soname=linux-vdso.so.1 --hash-style=sysv \
+ --build-id -n -T
# Disable gcov profiling for VDSO code
GCOV_PROFILE := n