pmu.c 6.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234
  1. /*
  2. * Copyright (C) 2015 Linaro Ltd.
  3. * Author: Shannon Zhao <shannon.zhao@linaro.org>
  4. *
  5. * This program is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU General Public License version 2 as
  7. * published by the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  16. */
  17. #include <linux/cpu.h>
  18. #include <linux/kvm.h>
  19. #include <linux/kvm_host.h>
  20. #include <linux/perf_event.h>
  21. #include <asm/kvm_emulate.h>
  22. #include <kvm/arm_pmu.h>
  23. /**
  24. * kvm_pmu_get_counter_value - get PMU counter value
  25. * @vcpu: The vcpu pointer
  26. * @select_idx: The counter index
  27. */
  28. u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u64 select_idx)
  29. {
  30. u64 counter, reg, enabled, running;
  31. struct kvm_pmu *pmu = &vcpu->arch.pmu;
  32. struct kvm_pmc *pmc = &pmu->pmc[select_idx];
  33. reg = (select_idx == ARMV8_PMU_CYCLE_IDX)
  34. ? PMCCNTR_EL0 : PMEVCNTR0_EL0 + select_idx;
  35. counter = vcpu_sys_reg(vcpu, reg);
  36. /* The real counter value is equal to the value of counter register plus
  37. * the value perf event counts.
  38. */
  39. if (pmc->perf_event)
  40. counter += perf_event_read_value(pmc->perf_event, &enabled,
  41. &running);
  42. return counter & pmc->bitmask;
  43. }
  44. /**
  45. * kvm_pmu_set_counter_value - set PMU counter value
  46. * @vcpu: The vcpu pointer
  47. * @select_idx: The counter index
  48. * @val: The counter value
  49. */
  50. void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, u64 select_idx, u64 val)
  51. {
  52. u64 reg;
  53. reg = (select_idx == ARMV8_PMU_CYCLE_IDX)
  54. ? PMCCNTR_EL0 : PMEVCNTR0_EL0 + select_idx;
  55. vcpu_sys_reg(vcpu, reg) += (s64)val - kvm_pmu_get_counter_value(vcpu, select_idx);
  56. }
  57. /**
  58. * kvm_pmu_stop_counter - stop PMU counter
  59. * @pmc: The PMU counter pointer
  60. *
  61. * If this counter has been configured to monitor some event, release it here.
  62. */
  63. static void kvm_pmu_stop_counter(struct kvm_vcpu *vcpu, struct kvm_pmc *pmc)
  64. {
  65. u64 counter, reg;
  66. if (pmc->perf_event) {
  67. counter = kvm_pmu_get_counter_value(vcpu, pmc->idx);
  68. reg = (pmc->idx == ARMV8_PMU_CYCLE_IDX)
  69. ? PMCCNTR_EL0 : PMEVCNTR0_EL0 + pmc->idx;
  70. vcpu_sys_reg(vcpu, reg) = counter;
  71. perf_event_disable(pmc->perf_event);
  72. perf_event_release_kernel(pmc->perf_event);
  73. pmc->perf_event = NULL;
  74. }
  75. }
  76. u64 kvm_pmu_valid_counter_mask(struct kvm_vcpu *vcpu)
  77. {
  78. u64 val = vcpu_sys_reg(vcpu, PMCR_EL0) >> ARMV8_PMU_PMCR_N_SHIFT;
  79. val &= ARMV8_PMU_PMCR_N_MASK;
  80. if (val == 0)
  81. return BIT(ARMV8_PMU_CYCLE_IDX);
  82. else
  83. return GENMASK(val - 1, 0) | BIT(ARMV8_PMU_CYCLE_IDX);
  84. }
  85. /**
  86. * kvm_pmu_enable_counter - enable selected PMU counter
  87. * @vcpu: The vcpu pointer
  88. * @val: the value guest writes to PMCNTENSET register
  89. *
  90. * Call perf_event_enable to start counting the perf event
  91. */
  92. void kvm_pmu_enable_counter(struct kvm_vcpu *vcpu, u64 val)
  93. {
  94. int i;
  95. struct kvm_pmu *pmu = &vcpu->arch.pmu;
  96. struct kvm_pmc *pmc;
  97. if (!(vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E) || !val)
  98. return;
  99. for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++) {
  100. if (!(val & BIT(i)))
  101. continue;
  102. pmc = &pmu->pmc[i];
  103. if (pmc->perf_event) {
  104. perf_event_enable(pmc->perf_event);
  105. if (pmc->perf_event->state != PERF_EVENT_STATE_ACTIVE)
  106. kvm_debug("fail to enable perf event\n");
  107. }
  108. }
  109. }
  110. /**
  111. * kvm_pmu_disable_counter - disable selected PMU counter
  112. * @vcpu: The vcpu pointer
  113. * @val: the value guest writes to PMCNTENCLR register
  114. *
  115. * Call perf_event_disable to stop counting the perf event
  116. */
  117. void kvm_pmu_disable_counter(struct kvm_vcpu *vcpu, u64 val)
  118. {
  119. int i;
  120. struct kvm_pmu *pmu = &vcpu->arch.pmu;
  121. struct kvm_pmc *pmc;
  122. if (!val)
  123. return;
  124. for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++) {
  125. if (!(val & BIT(i)))
  126. continue;
  127. pmc = &pmu->pmc[i];
  128. if (pmc->perf_event)
  129. perf_event_disable(pmc->perf_event);
  130. }
  131. }
  132. static u64 kvm_pmu_overflow_status(struct kvm_vcpu *vcpu)
  133. {
  134. u64 reg = 0;
  135. if ((vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E))
  136. reg = vcpu_sys_reg(vcpu, PMOVSSET_EL0);
  137. reg &= vcpu_sys_reg(vcpu, PMCNTENSET_EL0);
  138. reg &= vcpu_sys_reg(vcpu, PMINTENSET_EL1);
  139. reg &= kvm_pmu_valid_counter_mask(vcpu);
  140. return reg;
  141. }
  142. /**
  143. * kvm_pmu_overflow_set - set PMU overflow interrupt
  144. * @vcpu: The vcpu pointer
  145. * @val: the value guest writes to PMOVSSET register
  146. */
  147. void kvm_pmu_overflow_set(struct kvm_vcpu *vcpu, u64 val)
  148. {
  149. u64 reg;
  150. if (val == 0)
  151. return;
  152. vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= val;
  153. reg = kvm_pmu_overflow_status(vcpu);
  154. if (reg != 0)
  155. kvm_vcpu_kick(vcpu);
  156. }
  157. static bool kvm_pmu_counter_is_enabled(struct kvm_vcpu *vcpu, u64 select_idx)
  158. {
  159. return (vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E) &&
  160. (vcpu_sys_reg(vcpu, PMCNTENSET_EL0) & BIT(select_idx));
  161. }
  162. /**
  163. * kvm_pmu_set_counter_event_type - set selected counter to monitor some event
  164. * @vcpu: The vcpu pointer
  165. * @data: The data guest writes to PMXEVTYPER_EL0
  166. * @select_idx: The number of selected counter
  167. *
  168. * When OS accesses PMXEVTYPER_EL0, that means it wants to set a PMC to count an
  169. * event with given hardware event number. Here we call perf_event API to
  170. * emulate this action and create a kernel perf event for it.
  171. */
  172. void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data,
  173. u64 select_idx)
  174. {
  175. struct kvm_pmu *pmu = &vcpu->arch.pmu;
  176. struct kvm_pmc *pmc = &pmu->pmc[select_idx];
  177. struct perf_event *event;
  178. struct perf_event_attr attr;
  179. u64 eventsel, counter;
  180. kvm_pmu_stop_counter(vcpu, pmc);
  181. eventsel = data & ARMV8_PMU_EVTYPE_EVENT;
  182. memset(&attr, 0, sizeof(struct perf_event_attr));
  183. attr.type = PERF_TYPE_RAW;
  184. attr.size = sizeof(attr);
  185. attr.pinned = 1;
  186. attr.disabled = !kvm_pmu_counter_is_enabled(vcpu, select_idx);
  187. attr.exclude_user = data & ARMV8_PMU_EXCLUDE_EL0 ? 1 : 0;
  188. attr.exclude_kernel = data & ARMV8_PMU_EXCLUDE_EL1 ? 1 : 0;
  189. attr.exclude_hv = 1; /* Don't count EL2 events */
  190. attr.exclude_host = 1; /* Don't count host events */
  191. attr.config = eventsel;
  192. counter = kvm_pmu_get_counter_value(vcpu, select_idx);
  193. /* The initial sample period (overflow count) of an event. */
  194. attr.sample_period = (-counter) & pmc->bitmask;
  195. event = perf_event_create_kernel_counter(&attr, -1, current, NULL, pmc);
  196. if (IS_ERR(event)) {
  197. pr_err_once("kvm: pmu event creation failed %ld\n",
  198. PTR_ERR(event));
  199. return;
  200. }
  201. pmc->perf_event = event;
  202. }