hv-gpci.c 6.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294
  1. /*
  2. * Hypervisor supplied "gpci" ("get performance counter info") performance
  3. * counter support
  4. *
  5. * Author: Cody P Schafer <cody@linux.vnet.ibm.com>
  6. * Copyright 2014 IBM Corporation.
  7. *
  8. * This program is free software; you can redistribute it and/or
  9. * modify it under the terms of the GNU General Public License
  10. * as published by the Free Software Foundation; either version
  11. * 2 of the License, or (at your option) any later version.
  12. */
  13. #define pr_fmt(fmt) "hv-gpci: " fmt
  14. #include <linux/init.h>
  15. #include <linux/perf_event.h>
  16. #include <asm/firmware.h>
  17. #include <asm/hvcall.h>
  18. #include <asm/io.h>
  19. #include "hv-gpci.h"
  20. #include "hv-common.h"
  21. /*
  22. * Example usage:
  23. * perf stat -e 'hv_gpci/counter_info_version=3,offset=0,length=8,
  24. * secondary_index=0,starting_index=0xffffffff,request=0x10/' ...
  25. */
  26. /* u32 */
  27. EVENT_DEFINE_RANGE_FORMAT(request, config, 0, 31);
  28. /* u32 */
  29. EVENT_DEFINE_RANGE_FORMAT(starting_index, config, 32, 63);
  30. /* u16 */
  31. EVENT_DEFINE_RANGE_FORMAT(secondary_index, config1, 0, 15);
  32. /* u8 */
  33. EVENT_DEFINE_RANGE_FORMAT(counter_info_version, config1, 16, 23);
  34. /* u8, bytes of data (1-8) */
  35. EVENT_DEFINE_RANGE_FORMAT(length, config1, 24, 31);
  36. /* u32, byte offset */
  37. EVENT_DEFINE_RANGE_FORMAT(offset, config1, 32, 63);
  38. static struct attribute *format_attrs[] = {
  39. &format_attr_request.attr,
  40. &format_attr_starting_index.attr,
  41. &format_attr_secondary_index.attr,
  42. &format_attr_counter_info_version.attr,
  43. &format_attr_offset.attr,
  44. &format_attr_length.attr,
  45. NULL,
  46. };
  47. static struct attribute_group format_group = {
  48. .name = "format",
  49. .attrs = format_attrs,
  50. };
  51. #define HV_CAPS_ATTR(_name, _format) \
  52. static ssize_t _name##_show(struct device *dev, \
  53. struct device_attribute *attr, \
  54. char *page) \
  55. { \
  56. struct hv_perf_caps caps; \
  57. unsigned long hret = hv_perf_caps_get(&caps); \
  58. if (hret) \
  59. return -EIO; \
  60. \
  61. return sprintf(page, _format, caps._name); \
  62. } \
  63. static struct device_attribute hv_caps_attr_##_name = __ATTR_RO(_name)
  64. static ssize_t kernel_version_show(struct device *dev,
  65. struct device_attribute *attr,
  66. char *page)
  67. {
  68. return sprintf(page, "0x%x\n", COUNTER_INFO_VERSION_CURRENT);
  69. }
  70. static DEVICE_ATTR_RO(kernel_version);
  71. HV_CAPS_ATTR(version, "0x%x\n");
  72. HV_CAPS_ATTR(ga, "%d\n");
  73. HV_CAPS_ATTR(expanded, "%d\n");
  74. HV_CAPS_ATTR(lab, "%d\n");
  75. HV_CAPS_ATTR(collect_privileged, "%d\n");
  76. static struct attribute *interface_attrs[] = {
  77. &dev_attr_kernel_version.attr,
  78. &hv_caps_attr_version.attr,
  79. &hv_caps_attr_ga.attr,
  80. &hv_caps_attr_expanded.attr,
  81. &hv_caps_attr_lab.attr,
  82. &hv_caps_attr_collect_privileged.attr,
  83. NULL,
  84. };
  85. static struct attribute_group interface_group = {
  86. .name = "interface",
  87. .attrs = interface_attrs,
  88. };
  89. static const struct attribute_group *attr_groups[] = {
  90. &format_group,
  91. &interface_group,
  92. NULL,
  93. };
  94. #define GPCI_MAX_DATA_BYTES \
  95. (1024 - sizeof(struct hv_get_perf_counter_info_params))
  96. static unsigned long single_gpci_request(u32 req, u32 starting_index,
  97. u16 secondary_index, u8 version_in, u32 offset, u8 length,
  98. u64 *value)
  99. {
  100. unsigned long ret;
  101. size_t i;
  102. u64 count;
  103. struct {
  104. struct hv_get_perf_counter_info_params params;
  105. uint8_t bytes[GPCI_MAX_DATA_BYTES];
  106. } __packed __aligned(sizeof(uint64_t)) arg = {
  107. .params = {
  108. .counter_request = cpu_to_be32(req),
  109. .starting_index = cpu_to_be32(starting_index),
  110. .secondary_index = cpu_to_be16(secondary_index),
  111. .counter_info_version_in = version_in,
  112. }
  113. };
  114. ret = plpar_hcall_norets(H_GET_PERF_COUNTER_INFO,
  115. virt_to_phys(&arg), sizeof(arg));
  116. if (ret) {
  117. pr_devel("hcall failed: 0x%lx\n", ret);
  118. return ret;
  119. }
  120. /*
  121. * we verify offset and length are within the zeroed buffer at event
  122. * init.
  123. */
  124. count = 0;
  125. for (i = offset; i < offset + length; i++)
  126. count |= arg.bytes[i] << (i - offset);
  127. *value = count;
  128. return ret;
  129. }
  130. static u64 h_gpci_get_value(struct perf_event *event)
  131. {
  132. u64 count;
  133. unsigned long ret = single_gpci_request(event_get_request(event),
  134. event_get_starting_index(event),
  135. event_get_secondary_index(event),
  136. event_get_counter_info_version(event),
  137. event_get_offset(event),
  138. event_get_length(event),
  139. &count);
  140. if (ret)
  141. return 0;
  142. return count;
  143. }
  144. static void h_gpci_event_update(struct perf_event *event)
  145. {
  146. s64 prev;
  147. u64 now = h_gpci_get_value(event);
  148. prev = local64_xchg(&event->hw.prev_count, now);
  149. local64_add(now - prev, &event->count);
  150. }
  151. static void h_gpci_event_start(struct perf_event *event, int flags)
  152. {
  153. local64_set(&event->hw.prev_count, h_gpci_get_value(event));
  154. }
  155. static void h_gpci_event_stop(struct perf_event *event, int flags)
  156. {
  157. h_gpci_event_update(event);
  158. }
  159. static int h_gpci_event_add(struct perf_event *event, int flags)
  160. {
  161. if (flags & PERF_EF_START)
  162. h_gpci_event_start(event, flags);
  163. return 0;
  164. }
  165. static int h_gpci_event_init(struct perf_event *event)
  166. {
  167. u64 count;
  168. u8 length;
  169. /* Not our event */
  170. if (event->attr.type != event->pmu->type)
  171. return -ENOENT;
  172. /* config2 is unused */
  173. if (event->attr.config2) {
  174. pr_devel("config2 set when reserved\n");
  175. return -EINVAL;
  176. }
  177. /* unsupported modes and filters */
  178. if (event->attr.exclude_user ||
  179. event->attr.exclude_kernel ||
  180. event->attr.exclude_hv ||
  181. event->attr.exclude_idle ||
  182. event->attr.exclude_host ||
  183. event->attr.exclude_guest ||
  184. is_sampling_event(event)) /* no sampling */
  185. return -EINVAL;
  186. /* no branch sampling */
  187. if (has_branch_stack(event))
  188. return -EOPNOTSUPP;
  189. length = event_get_length(event);
  190. if (length < 1 || length > 8) {
  191. pr_devel("length invalid\n");
  192. return -EINVAL;
  193. }
  194. /* last byte within the buffer? */
  195. if ((event_get_offset(event) + length) > GPCI_MAX_DATA_BYTES) {
  196. pr_devel("request outside of buffer: %zu > %zu\n",
  197. (size_t)event_get_offset(event) + length,
  198. GPCI_MAX_DATA_BYTES);
  199. return -EINVAL;
  200. }
  201. /* check if the request works... */
  202. if (single_gpci_request(event_get_request(event),
  203. event_get_starting_index(event),
  204. event_get_secondary_index(event),
  205. event_get_counter_info_version(event),
  206. event_get_offset(event),
  207. length,
  208. &count)) {
  209. pr_devel("gpci hcall failed\n");
  210. return -EINVAL;
  211. }
  212. return 0;
  213. }
  214. static int h_gpci_event_idx(struct perf_event *event)
  215. {
  216. return 0;
  217. }
  218. static struct pmu h_gpci_pmu = {
  219. .task_ctx_nr = perf_invalid_context,
  220. .name = "hv_gpci",
  221. .attr_groups = attr_groups,
  222. .event_init = h_gpci_event_init,
  223. .add = h_gpci_event_add,
  224. .del = h_gpci_event_stop,
  225. .start = h_gpci_event_start,
  226. .stop = h_gpci_event_stop,
  227. .read = h_gpci_event_update,
  228. .event_idx = h_gpci_event_idx,
  229. };
  230. static int hv_gpci_init(void)
  231. {
  232. int r;
  233. unsigned long hret;
  234. struct hv_perf_caps caps;
  235. if (!firmware_has_feature(FW_FEATURE_LPAR)) {
  236. pr_debug("not a virtualized system, not enabling\n");
  237. return -ENODEV;
  238. }
  239. hret = hv_perf_caps_get(&caps);
  240. if (hret) {
  241. pr_debug("could not obtain capabilities, not enabling, rc=%ld\n",
  242. hret);
  243. return -ENODEV;
  244. }
  245. r = perf_pmu_register(&h_gpci_pmu, h_gpci_pmu.name, -1);
  246. if (r)
  247. return r;
  248. return 0;
  249. }
  250. device_initcall(hv_gpci_init);