hv-24x7.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523
  1. /*
  2. * Hypervisor supplied "24x7" performance counter support
  3. *
  4. * Author: Cody P Schafer <cody@linux.vnet.ibm.com>
  5. * Copyright 2014 IBM Corporation.
  6. *
  7. * This program is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU General Public License
  9. * as published by the Free Software Foundation; either version
  10. * 2 of the License, or (at your option) any later version.
  11. */
  12. #define pr_fmt(fmt) "hv-24x7: " fmt
  13. #include <linux/perf_event.h>
  14. #include <linux/module.h>
  15. #include <linux/slab.h>
  16. #include <asm/firmware.h>
  17. #include <asm/hvcall.h>
  18. #include <asm/io.h>
  19. #include "hv-24x7.h"
  20. #include "hv-24x7-catalog.h"
  21. #include "hv-common.h"
  22. /*
  23. * TODO: Merging events:
  24. * - Think of the hcall as an interface to a 4d array of counters:
  25. * - x = domains
  26. * - y = indexes in the domain (core, chip, vcpu, node, etc)
  27. * - z = offset into the counter space
  28. * - w = lpars (guest vms, "logical partitions")
  29. * - A single request is: x,y,y_last,z,z_last,w,w_last
  30. * - this means we can retrieve a rectangle of counters in y,z for a single x.
  31. *
  32. * - Things to consider (ignoring w):
  33. * - input cost_per_request = 16
  34. * - output cost_per_result(ys,zs) = 8 + 8 * ys + ys * zs
  35. * - limited number of requests per hcall (must fit into 4K bytes)
  36. * - 4k = 16 [buffer header] - 16 [request size] * request_count
  37. * - 255 requests per hcall
  38. * - sometimes it will be more efficient to read extra data and discard
  39. */
  40. /*
  41. * Example usage:
  42. * perf stat -e 'hv_24x7/domain=2,offset=8,starting_index=0,lpar=0xffffffff/'
  43. */
  44. /* u3 0-6, one of HV_24X7_PERF_DOMAIN */
  45. EVENT_DEFINE_RANGE_FORMAT(domain, config, 0, 3);
  46. /* u16 */
  47. EVENT_DEFINE_RANGE_FORMAT(starting_index, config, 16, 31);
  48. /* u32, see "data_offset" */
  49. EVENT_DEFINE_RANGE_FORMAT(offset, config, 32, 63);
  50. /* u16 */
  51. EVENT_DEFINE_RANGE_FORMAT(lpar, config1, 0, 15);
  52. EVENT_DEFINE_RANGE(reserved1, config, 4, 15);
  53. EVENT_DEFINE_RANGE(reserved2, config1, 16, 63);
  54. EVENT_DEFINE_RANGE(reserved3, config2, 0, 63);
  55. static struct attribute *format_attrs[] = {
  56. &format_attr_domain.attr,
  57. &format_attr_offset.attr,
  58. &format_attr_starting_index.attr,
  59. &format_attr_lpar.attr,
  60. NULL,
  61. };
  62. static struct attribute_group format_group = {
  63. .name = "format",
  64. .attrs = format_attrs,
  65. };
  66. static struct kmem_cache *hv_page_cache;
  67. /*
  68. * read_offset_data - copy data from one buffer to another while treating the
  69. * source buffer as a small view on the total avaliable
  70. * source data.
  71. *
  72. * @dest: buffer to copy into
  73. * @dest_len: length of @dest in bytes
  74. * @requested_offset: the offset within the source data we want. Must be > 0
  75. * @src: buffer to copy data from
  76. * @src_len: length of @src in bytes
  77. * @source_offset: the offset in the sorce data that (src,src_len) refers to.
  78. * Must be > 0
  79. *
  80. * returns the number of bytes copied.
  81. *
  82. * The following ascii art shows the various buffer possitioning we need to
  83. * handle, assigns some arbitrary varibles to points on the buffer, and then
  84. * shows how we fiddle with those values to get things we care about (copy
  85. * start in src and copy len)
  86. *
  87. * s = @src buffer
  88. * d = @dest buffer
  89. * '.' areas in d are written to.
  90. *
  91. * u
  92. * x w v z
  93. * d |.........|
  94. * s |----------------------|
  95. *
  96. * u
  97. * x w z v
  98. * d |........------|
  99. * s |------------------|
  100. *
  101. * x w u,z,v
  102. * d |........|
  103. * s |------------------|
  104. *
  105. * x,w u,v,z
  106. * d |..................|
  107. * s |------------------|
  108. *
  109. * x u
  110. * w v z
  111. * d |........|
  112. * s |------------------|
  113. *
  114. * x z w v
  115. * d |------|
  116. * s |------|
  117. *
  118. * x = source_offset
  119. * w = requested_offset
  120. * z = source_offset + src_len
  121. * v = requested_offset + dest_len
  122. *
  123. * w_offset_in_s = w - x = requested_offset - source_offset
  124. * z_offset_in_s = z - x = src_len
  125. * v_offset_in_s = v - x = request_offset + dest_len - src_len
  126. */
  127. static ssize_t read_offset_data(void *dest, size_t dest_len,
  128. loff_t requested_offset, void *src,
  129. size_t src_len, loff_t source_offset)
  130. {
  131. size_t w_offset_in_s = requested_offset - source_offset;
  132. size_t z_offset_in_s = src_len;
  133. size_t v_offset_in_s = requested_offset + dest_len - src_len;
  134. size_t u_offset_in_s = min(z_offset_in_s, v_offset_in_s);
  135. size_t copy_len = u_offset_in_s - w_offset_in_s;
  136. if (requested_offset < 0 || source_offset < 0)
  137. return -EINVAL;
  138. if (z_offset_in_s <= w_offset_in_s)
  139. return 0;
  140. memcpy(dest, src + w_offset_in_s, copy_len);
  141. return copy_len;
  142. }
  143. static unsigned long h_get_24x7_catalog_page_(unsigned long phys_4096,
  144. unsigned long version,
  145. unsigned long index)
  146. {
  147. pr_devel("h_get_24x7_catalog_page(0x%lx, %lu, %lu)",
  148. phys_4096,
  149. version,
  150. index);
  151. WARN_ON(!IS_ALIGNED(phys_4096, 4096));
  152. return plpar_hcall_norets(H_GET_24X7_CATALOG_PAGE,
  153. phys_4096,
  154. version,
  155. index);
  156. }
  157. static unsigned long h_get_24x7_catalog_page(char page[],
  158. u64 version, u32 index)
  159. {
  160. return h_get_24x7_catalog_page_(virt_to_phys(page),
  161. version, index);
  162. }
  163. static ssize_t catalog_read(struct file *filp, struct kobject *kobj,
  164. struct bin_attribute *bin_attr, char *buf,
  165. loff_t offset, size_t count)
  166. {
  167. unsigned long hret;
  168. ssize_t ret = 0;
  169. size_t catalog_len = 0, catalog_page_len = 0, page_count = 0;
  170. loff_t page_offset = 0;
  171. uint64_t catalog_version_num = 0;
  172. void *page = kmem_cache_alloc(hv_page_cache, GFP_USER);
  173. struct hv_24x7_catalog_page_0 *page_0 = page;
  174. if (!page)
  175. return -ENOMEM;
  176. hret = h_get_24x7_catalog_page(page, 0, 0);
  177. if (hret) {
  178. ret = -EIO;
  179. goto e_free;
  180. }
  181. catalog_version_num = be64_to_cpu(page_0->version);
  182. catalog_page_len = be32_to_cpu(page_0->length);
  183. catalog_len = catalog_page_len * 4096;
  184. page_offset = offset / 4096;
  185. page_count = count / 4096;
  186. if (page_offset >= catalog_page_len)
  187. goto e_free;
  188. if (page_offset != 0) {
  189. hret = h_get_24x7_catalog_page(page, catalog_version_num,
  190. page_offset);
  191. if (hret) {
  192. ret = -EIO;
  193. goto e_free;
  194. }
  195. }
  196. ret = read_offset_data(buf, count, offset,
  197. page, 4096, page_offset * 4096);
  198. e_free:
  199. if (hret)
  200. pr_err("h_get_24x7_catalog_page(ver=%lld, page=%lld) failed:"
  201. " rc=%ld\n",
  202. catalog_version_num, page_offset, hret);
  203. kfree(page);
  204. pr_devel("catalog_read: offset=%lld(%lld) count=%zu(%zu) catalog_len=%zu(%zu) => %zd\n",
  205. offset, page_offset, count, page_count, catalog_len,
  206. catalog_page_len, ret);
  207. return ret;
  208. }
  209. #define PAGE_0_ATTR(_name, _fmt, _expr) \
  210. static ssize_t _name##_show(struct device *dev, \
  211. struct device_attribute *dev_attr, \
  212. char *buf) \
  213. { \
  214. unsigned long hret; \
  215. ssize_t ret = 0; \
  216. void *page = kmem_cache_alloc(hv_page_cache, GFP_USER); \
  217. struct hv_24x7_catalog_page_0 *page_0 = page; \
  218. if (!page) \
  219. return -ENOMEM; \
  220. hret = h_get_24x7_catalog_page(page, 0, 0); \
  221. if (hret) { \
  222. ret = -EIO; \
  223. goto e_free; \
  224. } \
  225. ret = sprintf(buf, _fmt, _expr); \
  226. e_free: \
  227. kfree(page); \
  228. return ret; \
  229. } \
  230. static DEVICE_ATTR_RO(_name)
  231. PAGE_0_ATTR(catalog_version, "%lld\n",
  232. (unsigned long long)be64_to_cpu(page_0->version));
  233. PAGE_0_ATTR(catalog_len, "%lld\n",
  234. (unsigned long long)be32_to_cpu(page_0->length) * 4096);
  235. static BIN_ATTR_RO(catalog, 0/* real length varies */);
  236. static struct bin_attribute *if_bin_attrs[] = {
  237. &bin_attr_catalog,
  238. NULL,
  239. };
  240. static struct attribute *if_attrs[] = {
  241. &dev_attr_catalog_len.attr,
  242. &dev_attr_catalog_version.attr,
  243. NULL,
  244. };
  245. static struct attribute_group if_group = {
  246. .name = "interface",
  247. .bin_attrs = if_bin_attrs,
  248. .attrs = if_attrs,
  249. };
  250. static const struct attribute_group *attr_groups[] = {
  251. &format_group,
  252. &if_group,
  253. NULL,
  254. };
  255. static bool is_physical_domain(int domain)
  256. {
  257. return domain == HV_24X7_PERF_DOMAIN_PHYSICAL_CHIP ||
  258. domain == HV_24X7_PERF_DOMAIN_PHYSICAL_CORE;
  259. }
  260. static unsigned long single_24x7_request(u8 domain, u32 offset, u16 ix,
  261. u16 lpar, u64 *res,
  262. bool success_expected)
  263. {
  264. unsigned long ret;
  265. /*
  266. * request_buffer and result_buffer are not required to be 4k aligned,
  267. * but are not allowed to cross any 4k boundary. Aligning them to 4k is
  268. * the simplest way to ensure that.
  269. */
  270. struct reqb {
  271. struct hv_24x7_request_buffer buf;
  272. struct hv_24x7_request req;
  273. } __packed __aligned(4096) request_buffer = {
  274. .buf = {
  275. .interface_version = HV_24X7_IF_VERSION_CURRENT,
  276. .num_requests = 1,
  277. },
  278. .req = {
  279. .performance_domain = domain,
  280. .data_size = cpu_to_be16(8),
  281. .data_offset = cpu_to_be32(offset),
  282. .starting_lpar_ix = cpu_to_be16(lpar),
  283. .max_num_lpars = cpu_to_be16(1),
  284. .starting_ix = cpu_to_be16(ix),
  285. .max_ix = cpu_to_be16(1),
  286. }
  287. };
  288. struct resb {
  289. struct hv_24x7_data_result_buffer buf;
  290. struct hv_24x7_result res;
  291. struct hv_24x7_result_element elem;
  292. __be64 result;
  293. } __packed __aligned(4096) result_buffer = {};
  294. ret = plpar_hcall_norets(H_GET_24X7_DATA,
  295. virt_to_phys(&request_buffer), sizeof(request_buffer),
  296. virt_to_phys(&result_buffer), sizeof(result_buffer));
  297. if (ret) {
  298. if (success_expected)
  299. pr_err_ratelimited("hcall failed: %d %#x %#x %d => 0x%lx (%ld) detail=0x%x failing ix=%x\n",
  300. domain, offset, ix, lpar,
  301. ret, ret,
  302. result_buffer.buf.detailed_rc,
  303. result_buffer.buf.failing_request_ix);
  304. return ret;
  305. }
  306. *res = be64_to_cpu(result_buffer.result);
  307. return ret;
  308. }
  309. static unsigned long event_24x7_request(struct perf_event *event, u64 *res,
  310. bool success_expected)
  311. {
  312. return single_24x7_request(event_get_domain(event),
  313. event_get_offset(event),
  314. event_get_starting_index(event),
  315. event_get_lpar(event),
  316. res,
  317. success_expected);
  318. }
  319. static int h_24x7_event_init(struct perf_event *event)
  320. {
  321. struct hv_perf_caps caps;
  322. unsigned domain;
  323. unsigned long hret;
  324. u64 ct;
  325. /* Not our event */
  326. if (event->attr.type != event->pmu->type)
  327. return -ENOENT;
  328. /* Unused areas must be 0 */
  329. if (event_get_reserved1(event) ||
  330. event_get_reserved2(event) ||
  331. event_get_reserved3(event)) {
  332. pr_devel("reserved set when forbidden 0x%llx(0x%llx) 0x%llx(0x%llx) 0x%llx(0x%llx)\n",
  333. event->attr.config,
  334. event_get_reserved1(event),
  335. event->attr.config1,
  336. event_get_reserved2(event),
  337. event->attr.config2,
  338. event_get_reserved3(event));
  339. return -EINVAL;
  340. }
  341. /* unsupported modes and filters */
  342. if (event->attr.exclude_user ||
  343. event->attr.exclude_kernel ||
  344. event->attr.exclude_hv ||
  345. event->attr.exclude_idle ||
  346. event->attr.exclude_host ||
  347. event->attr.exclude_guest ||
  348. is_sampling_event(event)) /* no sampling */
  349. return -EINVAL;
  350. /* no branch sampling */
  351. if (has_branch_stack(event))
  352. return -EOPNOTSUPP;
  353. /* offset must be 8 byte aligned */
  354. if (event_get_offset(event) % 8) {
  355. pr_devel("bad alignment\n");
  356. return -EINVAL;
  357. }
  358. /* Domains above 6 are invalid */
  359. domain = event_get_domain(event);
  360. if (domain > 6) {
  361. pr_devel("invalid domain %d\n", domain);
  362. return -EINVAL;
  363. }
  364. hret = hv_perf_caps_get(&caps);
  365. if (hret) {
  366. pr_devel("could not get capabilities: rc=%ld\n", hret);
  367. return -EIO;
  368. }
  369. /* PHYSICAL domains & other lpars require extra capabilities */
  370. if (!caps.collect_privileged && (is_physical_domain(domain) ||
  371. (event_get_lpar(event) != event_get_lpar_max()))) {
  372. pr_devel("hv permisions disallow: is_physical_domain:%d, lpar=0x%llx\n",
  373. is_physical_domain(domain),
  374. event_get_lpar(event));
  375. return -EACCES;
  376. }
  377. /* see if the event complains */
  378. if (event_24x7_request(event, &ct, false)) {
  379. pr_devel("test hcall failed\n");
  380. return -EIO;
  381. }
  382. return 0;
  383. }
  384. static u64 h_24x7_get_value(struct perf_event *event)
  385. {
  386. unsigned long ret;
  387. u64 ct;
  388. ret = event_24x7_request(event, &ct, true);
  389. if (ret)
  390. /* We checked this in event init, shouldn't fail here... */
  391. return 0;
  392. return ct;
  393. }
  394. static void h_24x7_event_update(struct perf_event *event)
  395. {
  396. s64 prev;
  397. u64 now;
  398. now = h_24x7_get_value(event);
  399. prev = local64_xchg(&event->hw.prev_count, now);
  400. local64_add(now - prev, &event->count);
  401. }
  402. static void h_24x7_event_start(struct perf_event *event, int flags)
  403. {
  404. if (flags & PERF_EF_RELOAD)
  405. local64_set(&event->hw.prev_count, h_24x7_get_value(event));
  406. }
  407. static void h_24x7_event_stop(struct perf_event *event, int flags)
  408. {
  409. h_24x7_event_update(event);
  410. }
  411. static int h_24x7_event_add(struct perf_event *event, int flags)
  412. {
  413. if (flags & PERF_EF_START)
  414. h_24x7_event_start(event, flags);
  415. return 0;
  416. }
  417. static int h_24x7_event_idx(struct perf_event *event)
  418. {
  419. return 0;
  420. }
  421. static struct pmu h_24x7_pmu = {
  422. .task_ctx_nr = perf_invalid_context,
  423. .name = "hv_24x7",
  424. .attr_groups = attr_groups,
  425. .event_init = h_24x7_event_init,
  426. .add = h_24x7_event_add,
  427. .del = h_24x7_event_stop,
  428. .start = h_24x7_event_start,
  429. .stop = h_24x7_event_stop,
  430. .read = h_24x7_event_update,
  431. .event_idx = h_24x7_event_idx,
  432. };
  433. static int hv_24x7_init(void)
  434. {
  435. int r;
  436. unsigned long hret;
  437. struct hv_perf_caps caps;
  438. if (!firmware_has_feature(FW_FEATURE_LPAR)) {
  439. pr_debug("not a virtualized system, not enabling\n");
  440. return -ENODEV;
  441. }
  442. hret = hv_perf_caps_get(&caps);
  443. if (hret) {
  444. pr_debug("could not obtain capabilities, not enabling, rc=%ld\n",
  445. hret);
  446. return -ENODEV;
  447. }
  448. hv_page_cache = kmem_cache_create("hv-page-4096", 4096, 4096, 0, NULL);
  449. if (!hv_page_cache)
  450. return -ENOMEM;
  451. r = perf_pmu_register(&h_24x7_pmu, h_24x7_pmu.name, -1);
  452. if (r)
  453. return r;
  454. return 0;
  455. }
  456. device_initcall(hv_24x7_init);