|
@@ -21,6 +21,7 @@
|
|
|
#include <linux/perf_event.h>
|
|
|
#include <asm/kvm_emulate.h>
|
|
|
#include <kvm/arm_pmu.h>
|
|
|
+#include <kvm/arm_vgic.h>
|
|
|
|
|
|
/**
|
|
|
* kvm_pmu_get_counter_value - get PMU counter value
|
|
@@ -180,6 +181,71 @@ void kvm_pmu_overflow_set(struct kvm_vcpu *vcpu, u64 val)
|
|
|
kvm_vcpu_kick(vcpu);
|
|
|
}
|
|
|
|
|
|
+static void kvm_pmu_update_state(struct kvm_vcpu *vcpu)
|
|
|
+{
|
|
|
+ struct kvm_pmu *pmu = &vcpu->arch.pmu;
|
|
|
+ bool overflow;
|
|
|
+
|
|
|
+ if (!kvm_arm_pmu_v3_ready(vcpu))
|
|
|
+ return;
|
|
|
+
|
|
|
+ overflow = !!kvm_pmu_overflow_status(vcpu);
|
|
|
+ if (pmu->irq_level != overflow) {
|
|
|
+ pmu->irq_level = overflow;
|
|
|
+ kvm_vgic_inject_irq(vcpu->kvm, vcpu->vcpu_id,
|
|
|
+ pmu->irq_num, overflow);
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
+/**
|
|
|
+ * kvm_pmu_flush_hwstate - flush pmu state to cpu
|
|
|
+ * @vcpu: The vcpu pointer
|
|
|
+ *
|
|
|
+ * Check if the PMU has overflowed while we were running in the host, and inject
|
|
|
+ * an interrupt if that was the case.
|
|
|
+ */
|
|
|
+void kvm_pmu_flush_hwstate(struct kvm_vcpu *vcpu)
|
|
|
+{
|
|
|
+ kvm_pmu_update_state(vcpu);
|
|
|
+}
|
|
|
+
|
|
|
+/**
|
|
|
+ * kvm_pmu_sync_hwstate - sync pmu state from cpu
|
|
|
+ * @vcpu: The vcpu pointer
|
|
|
+ *
|
|
|
+ * Check if the PMU has overflowed while we were running in the guest, and
|
|
|
+ * inject an interrupt if that was the case.
|
|
|
+ */
|
|
|
+void kvm_pmu_sync_hwstate(struct kvm_vcpu *vcpu)
|
|
|
+{
|
|
|
+ kvm_pmu_update_state(vcpu);
|
|
|
+}
|
|
|
+
|
|
|
+static inline struct kvm_vcpu *kvm_pmc_to_vcpu(struct kvm_pmc *pmc)
|
|
|
+{
|
|
|
+ struct kvm_pmu *pmu;
|
|
|
+ struct kvm_vcpu_arch *vcpu_arch;
|
|
|
+
|
|
|
+ pmc -= pmc->idx;
|
|
|
+ pmu = container_of(pmc, struct kvm_pmu, pmc[0]);
|
|
|
+ vcpu_arch = container_of(pmu, struct kvm_vcpu_arch, pmu);
|
|
|
+ return container_of(vcpu_arch, struct kvm_vcpu, arch);
|
|
|
+}
|
|
|
+
|
|
|
+/**
|
|
|
+ * When perf event overflows, call kvm_pmu_overflow_set to set overflow status.
|
|
|
+ */
|
|
|
+static void kvm_pmu_perf_overflow(struct perf_event *perf_event,
|
|
|
+ struct perf_sample_data *data,
|
|
|
+ struct pt_regs *regs)
|
|
|
+{
|
|
|
+ struct kvm_pmc *pmc = perf_event->overflow_handler_context;
|
|
|
+ struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc);
|
|
|
+ int idx = pmc->idx;
|
|
|
+
|
|
|
+ kvm_pmu_overflow_set(vcpu, BIT(idx));
|
|
|
+}
|
|
|
+
|
|
|
/**
|
|
|
* kvm_pmu_software_increment - do software increment
|
|
|
* @vcpu: The vcpu pointer
|
|
@@ -291,7 +357,8 @@ void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data,
|
|
|
/* The initial sample period (overflow count) of an event. */
|
|
|
attr.sample_period = (-counter) & pmc->bitmask;
|
|
|
|
|
|
- event = perf_event_create_kernel_counter(&attr, -1, current, NULL, pmc);
|
|
|
+ event = perf_event_create_kernel_counter(&attr, -1, current,
|
|
|
+ kvm_pmu_perf_overflow, pmc);
|
|
|
if (IS_ERR(event)) {
|
|
|
pr_err_once("kvm: pmu event creation failed %ld\n",
|
|
|
PTR_ERR(event));
|