diff options
-rw-r--r-- | arch/x86/kvm/vmx/pmu_intel.c | 5 | ||||
-rw-r--r-- | arch/x86/kvm/x86.c | 24 |
2 files changed, 11 insertions, 18 deletions
diff --git a/arch/x86/kvm/vmx/pmu_intel.c b/arch/x86/kvm/vmx/pmu_intel.c index 422f0a6562ac..3b324ce0b142 100644 --- a/arch/x86/kvm/vmx/pmu_intel.c +++ b/arch/x86/kvm/vmx/pmu_intel.c @@ -536,8 +536,6 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu) pmu->pebs_enable_mask = ~0ull; pmu->pebs_data_cfg_mask = ~0ull; - vcpu->arch.ia32_misc_enable_msr |= MSR_IA32_MISC_ENABLE_PMU_RO_MASK; - entry = kvm_find_cpuid_entry(vcpu, 0xa, 0); if (!entry || !vcpu->kvm->arch.enable_pmu) return; @@ -548,8 +546,6 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu) if (!pmu->version) return; - vcpu->arch.ia32_misc_enable_msr |= MSR_IA32_MISC_ENABLE_EMON; - pmu->nr_arch_gp_counters = min_t(int, eax.split.num_counters, kvm_pmu_cap.num_counters_gp); eax.split.bit_width = min_t(int, eax.split.bit_width, @@ -611,7 +607,6 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu) bitmap_set(pmu->all_valid_pmc_idx, INTEL_PMC_IDX_FIXED_VLBR, 1); if (vcpu->arch.perf_capabilities & PERF_CAP_PEBS_FORMAT) { - vcpu->arch.ia32_misc_enable_msr &= ~MSR_IA32_MISC_ENABLE_PEBS_UNAVAIL; if (vcpu->arch.perf_capabilities & PERF_CAP_PEBS_BASELINE) { pmu->pebs_enable_mask = counter_mask; pmu->reserved_bits &= ~ICL_EVENTSEL_ADAPTIVE; diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 00e23dc518e0..67bdd7e20534 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -3550,21 +3550,17 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) break; case MSR_IA32_MISC_ENABLE: { u64 old_val = vcpu->arch.ia32_misc_enable_msr; - u64 pmu_mask = MSR_IA32_MISC_ENABLE_PMU_RO_MASK | - MSR_IA32_MISC_ENABLE_EMON; - /* RO bits */ - if (!msr_info->host_initiated && - ((old_val ^ data) & MSR_IA32_MISC_ENABLE_PMU_RO_MASK)) - return 1; + if (!msr_info->host_initiated) { + /* RO bits */ + if ((old_val ^ data) & MSR_IA32_MISC_ENABLE_PMU_RO_MASK) + return 1; + + /* R bits, i.e. writes are ignored, but don't fault. */ + data = data & ~MSR_IA32_MISC_ENABLE_EMON; + data |= old_val & MSR_IA32_MISC_ENABLE_EMON; + } - /* - * For a dummy user space, the order of setting vPMU capabilities and - * initialising MSR_IA32_MISC_ENABLE is not strictly guaranteed, so to - * avoid inconsistent functionality we keep the vPMU bits unchanged here. - */ - data &= ~pmu_mask; - data |= old_val & pmu_mask; if (!kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_MISC_ENABLE_NO_MWAIT) && ((old_val ^ data) & MSR_IA32_MISC_ENABLE_MWAIT)) { if (!guest_cpuid_has(vcpu, X86_FEATURE_XMM3)) @@ -11573,6 +11569,8 @@ void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event) vcpu->arch.smbase = 0x30000; vcpu->arch.msr_misc_features_enables = 0; + vcpu->arch.ia32_misc_enable_msr = MSR_IA32_MISC_ENABLE_PEBS_UNAVAIL | + MSR_IA32_MISC_ENABLE_BTS_UNAVAIL; __kvm_set_xcr(vcpu, 0, XFEATURE_MASK_FP); __kvm_set_msr(vcpu, MSR_IA32_XSS, 0, true); |