arm_pmu.h 4.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123
  1. /*
  2. * Copyright (C) 2015 Linaro Ltd.
  3. * Author: Shannon Zhao <shannon.zhao@linaro.org>
  4. *
  5. * This program is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU General Public License version 2 as
  7. * published by the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  16. */
  17. #ifndef __ASM_ARM_KVM_PMU_H
  18. #define __ASM_ARM_KVM_PMU_H
  19. #include <linux/perf_event.h>
  20. #include <asm/perf_event.h>
  21. #define ARMV8_PMU_CYCLE_IDX (ARMV8_PMU_MAX_COUNTERS - 1)
  22. #ifdef CONFIG_KVM_ARM_PMU
  23. struct kvm_pmc {
  24. u8 idx; /* index into the pmu->pmc array */
  25. struct perf_event *perf_event;
  26. u64 bitmask;
  27. };
  28. struct kvm_pmu {
  29. int irq_num;
  30. struct kvm_pmc pmc[ARMV8_PMU_MAX_COUNTERS];
  31. bool ready;
  32. bool created;
  33. bool irq_level;
  34. };
  35. #define kvm_arm_pmu_v3_ready(v) ((v)->arch.pmu.ready)
  36. #define kvm_arm_pmu_irq_initialized(v) ((v)->arch.pmu.irq_num >= VGIC_NR_SGIS)
  37. u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u64 select_idx);
  38. void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, u64 select_idx, u64 val);
  39. u64 kvm_pmu_valid_counter_mask(struct kvm_vcpu *vcpu);
  40. void kvm_pmu_vcpu_reset(struct kvm_vcpu *vcpu);
  41. void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu);
  42. void kvm_pmu_disable_counter(struct kvm_vcpu *vcpu, u64 val);
  43. void kvm_pmu_enable_counter(struct kvm_vcpu *vcpu, u64 val);
  44. void kvm_pmu_overflow_set(struct kvm_vcpu *vcpu, u64 val);
  45. void kvm_pmu_flush_hwstate(struct kvm_vcpu *vcpu);
  46. void kvm_pmu_sync_hwstate(struct kvm_vcpu *vcpu);
  47. bool kvm_pmu_should_notify_user(struct kvm_vcpu *vcpu);
  48. void kvm_pmu_update_run(struct kvm_vcpu *vcpu);
  49. void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val);
  50. void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val);
  51. void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data,
  52. u64 select_idx);
  53. bool kvm_arm_support_pmu_v3(void);
  54. int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu,
  55. struct kvm_device_attr *attr);
  56. int kvm_arm_pmu_v3_get_attr(struct kvm_vcpu *vcpu,
  57. struct kvm_device_attr *attr);
  58. int kvm_arm_pmu_v3_has_attr(struct kvm_vcpu *vcpu,
  59. struct kvm_device_attr *attr);
  60. int kvm_arm_pmu_v3_enable(struct kvm_vcpu *vcpu);
  61. #else
  62. struct kvm_pmu {
  63. };
  64. #define kvm_arm_pmu_v3_ready(v) (false)
  65. #define kvm_arm_pmu_irq_initialized(v) (false)
  66. static inline u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu,
  67. u64 select_idx)
  68. {
  69. return 0;
  70. }
  71. static inline void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu,
  72. u64 select_idx, u64 val) {}
  73. static inline u64 kvm_pmu_valid_counter_mask(struct kvm_vcpu *vcpu)
  74. {
  75. return 0;
  76. }
  77. static inline void kvm_pmu_vcpu_reset(struct kvm_vcpu *vcpu) {}
  78. static inline void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu) {}
  79. static inline void kvm_pmu_disable_counter(struct kvm_vcpu *vcpu, u64 val) {}
  80. static inline void kvm_pmu_enable_counter(struct kvm_vcpu *vcpu, u64 val) {}
  81. static inline void kvm_pmu_overflow_set(struct kvm_vcpu *vcpu, u64 val) {}
  82. static inline void kvm_pmu_flush_hwstate(struct kvm_vcpu *vcpu) {}
  83. static inline void kvm_pmu_sync_hwstate(struct kvm_vcpu *vcpu) {}
  84. static inline bool kvm_pmu_should_notify_user(struct kvm_vcpu *vcpu)
  85. {
  86. return false;
  87. }
  88. static inline void kvm_pmu_update_run(struct kvm_vcpu *vcpu) {}
  89. static inline void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val) {}
  90. static inline void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val) {}
  91. static inline void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu,
  92. u64 data, u64 select_idx) {}
  93. static inline bool kvm_arm_support_pmu_v3(void) { return false; }
  94. static inline int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu,
  95. struct kvm_device_attr *attr)
  96. {
  97. return -ENXIO;
  98. }
  99. static inline int kvm_arm_pmu_v3_get_attr(struct kvm_vcpu *vcpu,
  100. struct kvm_device_attr *attr)
  101. {
  102. return -ENXIO;
  103. }
  104. static inline int kvm_arm_pmu_v3_has_attr(struct kvm_vcpu *vcpu,
  105. struct kvm_device_attr *attr)
  106. {
  107. return -ENXIO;
  108. }
  109. static inline int kvm_arm_pmu_v3_enable(struct kvm_vcpu *vcpu)
  110. {
  111. return 0;
  112. }
  113. #endif
  114. #endif