|
@@ -210,6 +210,40 @@ void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val)
|
|
|
}
|
|
|
}
|
|
|
|
|
|
+/**
|
|
|
+ * kvm_pmu_handle_pmcr - handle PMCR register
|
|
|
+ * @vcpu: The vcpu pointer
|
|
|
+ * @val: the value guest writes to PMCR register
|
|
|
+ */
|
|
|
+void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val)
|
|
|
+{
|
|
|
+ struct kvm_pmu *pmu = &vcpu->arch.pmu;
|
|
|
+ struct kvm_pmc *pmc;
|
|
|
+ u64 mask;
|
|
|
+ int i;
|
|
|
+
|
|
|
+ mask = kvm_pmu_valid_counter_mask(vcpu);
|
|
|
+ if (val & ARMV8_PMU_PMCR_E) {
|
|
|
+ kvm_pmu_enable_counter(vcpu,
|
|
|
+ vcpu_sys_reg(vcpu, PMCNTENSET_EL0) & mask);
|
|
|
+ } else {
|
|
|
+ kvm_pmu_disable_counter(vcpu, mask);
|
|
|
+ }
|
|
|
+
|
|
|
+ if (val & ARMV8_PMU_PMCR_C)
|
|
|
+ kvm_pmu_set_counter_value(vcpu, ARMV8_PMU_CYCLE_IDX, 0);
|
|
|
+
|
|
|
+ if (val & ARMV8_PMU_PMCR_P) {
|
|
|
+ for (i = 0; i < ARMV8_PMU_CYCLE_IDX; i++)
|
|
|
+ kvm_pmu_set_counter_value(vcpu, i, 0);
|
|
|
+ }
|
|
|
+
|
|
|
+ if (val & ARMV8_PMU_PMCR_LC) {
|
|
|
+ pmc = &pmu->pmc[ARMV8_PMU_CYCLE_IDX];
|
|
|
+ pmc->bitmask = 0xffffffffffffffffUL;
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
static bool kvm_pmu_counter_is_enabled(struct kvm_vcpu *vcpu, u64 select_idx)
|
|
|
{
|
|
|
return (vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E) &&
|