diff mbox series

[RFC,19/22] KVM: x86: Allow host and guest access to IA32_[AM]PERF

Message ID 20241121185315.3416855-20-mizhang@google.com
State New
Headers show
Series KVM: x86: Virtualize IA32_APERF and IA32_MPERF MSRs | expand

Commit Message

Mingwei Zhang Nov. 21, 2024, 6:53 p.m. UTC
Implement MSR read/write handlers for IA32_APERF and IA32_MPERF to
support both host and guest access:

- Host userspace access via KVM_[GS]ET_MSRS only reads/writes the
  snapshot values in vcpu->arch.aperfmperf
- Guest writes update both the hardware MSRs (via set_guest_[am]perf)
  and the snapshots
- For host-initiated writes of IA32_MPERF, record the current TSC to
  establish a new baseline for background cycle accumulation
- Guest reads don't reach these handlers as they access the MSRs directly

Add both MSRs to msrs_to_save_base[] to ensure they are properly
serialized during vCPU state save/restore operations.

Signed-off-by: Mingwei Zhang <mizhang@google.com>
Co-developed-by: Jim Mattson <jmattson@google.com>
Signed-off-by: Jim Mattson <jmattson@google.com>
---
 arch/x86/kvm/x86.c | 42 ++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 42 insertions(+)
diff mbox series

Patch

diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index cd1f1ae86f83f..4394ecb291401 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -334,6 +334,7 @@  static const u32 msrs_to_save_base[] = {
 	MSR_IA32_UMWAIT_CONTROL,
 
 	MSR_IA32_XFD, MSR_IA32_XFD_ERR,
+	MSR_IA32_APERF, MSR_IA32_MPERF,
 };
 
 static const u32 msrs_to_save_pmu[] = {
@@ -4151,6 +4152,26 @@  int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
 			return 1;
 		vcpu->arch.msr_misc_features_enables = data;
 		break;
+	case MSR_IA32_APERF:
+		if ((data || !msr_info->host_initiated) &&
+		    !guest_can_use(vcpu, X86_FEATURE_APERFMPERF))
+			return 1;
+
+		vcpu->arch.aperfmperf.guest_aperf = data;
+		if (unlikely(!msr_info->host_initiated))
+			set_guest_aperf(data);
+		break;
+	case MSR_IA32_MPERF:
+		if ((data || !msr_info->host_initiated) &&
+		    !guest_can_use(vcpu, X86_FEATURE_APERFMPERF))
+			return 1;
+
+		vcpu->arch.aperfmperf.guest_mperf = data;
+		if (likely(msr_info->host_initiated))
+			vcpu->arch.aperfmperf.host_tsc = rdtsc();
+		else
+			set_guest_mperf(data);
+		break;
 #ifdef CONFIG_X86_64
 	case MSR_IA32_XFD:
 		if (!msr_info->host_initiated &&
@@ -4524,6 +4545,22 @@  int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
 		msr_info->data = vcpu->arch.guest_fpu.xfd_err;
 		break;
 #endif
+	case MSR_IA32_APERF:
+		/* Guest read access should never reach here. */
+		if (!msr_info->host_initiated)
+			return 1;
+
+		msr_info->data = vcpu->arch.aperfmperf.guest_aperf;
+		break;
+	case MSR_IA32_MPERF:
+		/* Guest read access should never reach here. */
+		if (!msr_info->host_initiated)
+			return 1;
+
+		if (vcpu->arch.mp_state != KVM_MP_STATE_HALTED)
+			kvm_accumulate_background_guest_mperf(vcpu);
+		msr_info->data = vcpu->arch.aperfmperf.guest_mperf;
+		break;
 	default:
 		if (kvm_pmu_is_valid_msr(vcpu, msr_info->index))
 			return kvm_pmu_get_msr(vcpu, msr_info);
@@ -7535,6 +7572,11 @@  static void kvm_probe_msr_to_save(u32 msr_index)
 		if (!(kvm_get_arch_capabilities() & ARCH_CAP_TSX_CTRL_MSR))
 			return;
 		break;
+	case MSR_IA32_APERF:
+	case MSR_IA32_MPERF:
+		if (!kvm_cpu_cap_has(KVM_X86_FEATURE_APERFMPERF))
+			return;
+		break;
 	default:
 		break;
 	}