@@ -1714,9 +1714,10 @@ bool kvm_vcpu_pmu_is_partitioned(struct kvm_vcpu *vcpu);
struct kvm_pmu_events *kvm_get_pmu_events(void);
void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu);
void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu);
-bool kvm_pmu_overflow_status(struct kvm_vcpu *vcpu);
+bool kvm_pmu_emul_overflow_status(struct kvm_vcpu *vcpu);
void kvm_pmu_load(struct kvm_vcpu *vcpu);
void kvm_pmu_put(struct kvm_vcpu *vcpu);
+bool kvm_pmu_part_overflow_status(struct kvm_vcpu *vcpu);
/*
* Updates the vcpu's view of the pmu events for this cpu.
@@ -320,7 +320,7 @@ void kvm_pmu_reprogram_counter_mask(struct kvm_vcpu *vcpu, u64 val)
* counter where the values of the global enable control, PMOVSSET_EL0[n], and
* PMINTENSET_EL1[n] are all 1.
*/
-bool kvm_pmu_overflow_status(struct kvm_vcpu *vcpu)
+bool kvm_pmu_emul_overflow_status(struct kvm_vcpu *vcpu)
{
u64 reg = __vcpu_sys_reg(vcpu, PMOVSSET_EL0);
@@ -457,7 +457,7 @@ static void kvm_pmu_perf_overflow(struct perf_event *perf_event,
kvm_pmu_counter_increment(vcpu, BIT(idx + 1),
ARMV8_PMUV3_PERFCTR_CHAIN);
- if (kvm_pmu_overflow_status(vcpu)) {
+ if (kvm_pmu_emul_overflow_status(vcpu)) {
kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu);
if (!in_nmi())
@@ -252,7 +252,7 @@ void kvm_pmu_load(struct kvm_vcpu *vcpu)
write_pmcr(val);
/*
- * Loading these registers is tricky because of
+ * Loading these registers is more intricate because of
* 1. Applying only the bits for guest counters (indicated by mask)
* 2. Setting and clearing are different registers
*/
@@ -336,3 +336,23 @@ void kvm_pmu_handle_guest_irq(u64 govf)
__vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= govf;
}
+
+/**
+ * kvm_pmu_part_overflow_status() - Determine if any guest counters have overflowed
+ * @vcpu: Ponter to struct kvm_vcpu
+ *
+ * Determine if any guest counters have overflowed and therefore an
+ * IRQ needs to be injected into the guest.
+ *
+ * Return: True if there was an overflow, false otherwise
+ */
+bool kvm_pmu_part_overflow_status(struct kvm_vcpu *vcpu)
+{
+ struct arm_pmu *pmu = vcpu->kvm->arch.arm_pmu;
+ u64 mask = kvm_pmu_guest_counter_mask(pmu);
+ u64 pmovs = __vcpu_sys_reg(vcpu, PMOVSSET_EL0);
+ u64 pmint = read_pmintenset();
+ u64 pmcr = read_pmcr();
+
+ return (pmcr & ARMV8_PMU_PMCR_E) && (mask & pmovs & pmint);
+}
@@ -425,7 +425,11 @@ static void kvm_pmu_update_state(struct kvm_vcpu *vcpu)
struct kvm_pmu *pmu = &vcpu->arch.pmu;
bool overflow;
- overflow = kvm_pmu_overflow_status(vcpu);
+ if (kvm_vcpu_pmu_is_partitioned(vcpu))
+ overflow = kvm_pmu_part_overflow_status(vcpu);
+ else
+ overflow = kvm_pmu_emul_overflow_status(vcpu);
+
if (pmu->irq_level == overflow)
return;
@@ -694,6 +698,7 @@ int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
return -EBUSY;
kvm_debug("Set kvm ARM PMU irq: %d\n", irq);
+
vcpu->arch.pmu.irq_num = irq;
return 0;
}
When we re-enter the VM after handling a PMU interrupt, calculate whether it was any of the guest counters that overflowed and inject an interrupt into the guest if so. Signed-off-by: Colton Lewis <coltonlewis@google.com> --- arch/arm64/include/asm/kvm_host.h | 3 ++- arch/arm64/kvm/pmu-emul.c | 4 ++-- arch/arm64/kvm/pmu-part.c | 22 +++++++++++++++++++++- arch/arm64/kvm/pmu.c | 7 ++++++- 4 files changed, 31 insertions(+), 5 deletions(-)