hv-24x7.c 32 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310
  1. /*
  2. * Hypervisor supplied "24x7" performance counter support
  3. *
  4. * Author: Cody P Schafer <cody@linux.vnet.ibm.com>
  5. * Copyright 2014 IBM Corporation.
  6. *
  7. * This program is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU General Public License
  9. * as published by the Free Software Foundation; either version
  10. * 2 of the License, or (at your option) any later version.
  11. */
  12. #define pr_fmt(fmt) "hv-24x7: " fmt
  13. #include <linux/perf_event.h>
  14. #include <linux/rbtree.h>
  15. #include <linux/module.h>
  16. #include <linux/slab.h>
  17. #include <linux/vmalloc.h>
  18. #include <asm/firmware.h>
  19. #include <asm/hvcall.h>
  20. #include <asm/io.h>
  21. #include <linux/byteorder/generic.h>
  22. #include "hv-24x7.h"
  23. #include "hv-24x7-catalog.h"
  24. #include "hv-common.h"
  25. static const char *event_domain_suffix(unsigned domain)
  26. {
  27. switch (domain) {
  28. #define DOMAIN(n, v, x, c) \
  29. case HV_PERF_DOMAIN_##n: \
  30. return "__" #n;
  31. #include "hv-24x7-domains.h"
  32. #undef DOMAIN
  33. default:
  34. WARN(1, "unknown domain %d\n", domain);
  35. return "__UNKNOWN_DOMAIN_SUFFIX";
  36. }
  37. }
  38. static bool domain_is_valid(unsigned domain)
  39. {
  40. switch (domain) {
  41. #define DOMAIN(n, v, x, c) \
  42. case HV_PERF_DOMAIN_##n: \
  43. /* fall through */
  44. #include "hv-24x7-domains.h"
  45. #undef DOMAIN
  46. return true;
  47. default:
  48. return false;
  49. }
  50. }
  51. static bool is_physical_domain(unsigned domain)
  52. {
  53. switch (domain) {
  54. #define DOMAIN(n, v, x, c) \
  55. case HV_PERF_DOMAIN_##n: \
  56. return c;
  57. #include "hv-24x7-domains.h"
  58. #undef DOMAIN
  59. default:
  60. return false;
  61. }
  62. }
  63. static bool catalog_entry_domain_is_valid(unsigned domain)
  64. {
  65. return is_physical_domain(domain);
  66. }
  67. /*
  68. * TODO: Merging events:
  69. * - Think of the hcall as an interface to a 4d array of counters:
  70. * - x = domains
  71. * - y = indexes in the domain (core, chip, vcpu, node, etc)
  72. * - z = offset into the counter space
  73. * - w = lpars (guest vms, "logical partitions")
  74. * - A single request is: x,y,y_last,z,z_last,w,w_last
  75. * - this means we can retrieve a rectangle of counters in y,z for a single x.
  76. *
  77. * - Things to consider (ignoring w):
  78. * - input cost_per_request = 16
  79. * - output cost_per_result(ys,zs) = 8 + 8 * ys + ys * zs
  80. * - limited number of requests per hcall (must fit into 4K bytes)
  81. * - 4k = 16 [buffer header] - 16 [request size] * request_count
  82. * - 255 requests per hcall
  83. * - sometimes it will be more efficient to read extra data and discard
  84. */
  85. /*
  86. * Example usage:
  87. * perf stat -e 'hv_24x7/domain=2,offset=8,vcpu=0,lpar=0xffffffff/'
  88. */
  89. /* u3 0-6, one of HV_24X7_PERF_DOMAIN */
  90. EVENT_DEFINE_RANGE_FORMAT(domain, config, 0, 3);
  91. /* u16 */
  92. EVENT_DEFINE_RANGE_FORMAT(core, config, 16, 31);
  93. EVENT_DEFINE_RANGE_FORMAT(vcpu, config, 16, 31);
  94. /* u32, see "data_offset" */
  95. EVENT_DEFINE_RANGE_FORMAT(offset, config, 32, 63);
  96. /* u16 */
  97. EVENT_DEFINE_RANGE_FORMAT(lpar, config1, 0, 15);
  98. EVENT_DEFINE_RANGE(reserved1, config, 4, 15);
  99. EVENT_DEFINE_RANGE(reserved2, config1, 16, 63);
  100. EVENT_DEFINE_RANGE(reserved3, config2, 0, 63);
  101. static struct attribute *format_attrs[] = {
  102. &format_attr_domain.attr,
  103. &format_attr_offset.attr,
  104. &format_attr_core.attr,
  105. &format_attr_vcpu.attr,
  106. &format_attr_lpar.attr,
  107. NULL,
  108. };
  109. static struct attribute_group format_group = {
  110. .name = "format",
  111. .attrs = format_attrs,
  112. };
  113. static struct attribute_group event_group = {
  114. .name = "events",
  115. /* .attrs is set in init */
  116. };
  117. static struct attribute_group event_desc_group = {
  118. .name = "event_descs",
  119. /* .attrs is set in init */
  120. };
  121. static struct attribute_group event_long_desc_group = {
  122. .name = "event_long_descs",
  123. /* .attrs is set in init */
  124. };
  125. static struct kmem_cache *hv_page_cache;
  126. /*
  127. * request_buffer and result_buffer are not required to be 4k aligned,
  128. * but are not allowed to cross any 4k boundary. Aligning them to 4k is
  129. * the simplest way to ensure that.
  130. */
  131. #define H24x7_DATA_BUFFER_SIZE 4096
  132. DEFINE_PER_CPU(char, hv_24x7_reqb[H24x7_DATA_BUFFER_SIZE]) __aligned(4096);
  133. DEFINE_PER_CPU(char, hv_24x7_resb[H24x7_DATA_BUFFER_SIZE]) __aligned(4096);
  134. static char *event_name(struct hv_24x7_event_data *ev, int *len)
  135. {
  136. *len = be16_to_cpu(ev->event_name_len) - 2;
  137. return (char *)ev->remainder;
  138. }
  139. static char *event_desc(struct hv_24x7_event_data *ev, int *len)
  140. {
  141. unsigned nl = be16_to_cpu(ev->event_name_len);
  142. __be16 *desc_len = (__be16 *)(ev->remainder + nl - 2);
  143. *len = be16_to_cpu(*desc_len) - 2;
  144. return (char *)ev->remainder + nl;
  145. }
  146. static char *event_long_desc(struct hv_24x7_event_data *ev, int *len)
  147. {
  148. unsigned nl = be16_to_cpu(ev->event_name_len);
  149. __be16 *desc_len_ = (__be16 *)(ev->remainder + nl - 2);
  150. unsigned desc_len = be16_to_cpu(*desc_len_);
  151. __be16 *long_desc_len = (__be16 *)(ev->remainder + nl + desc_len - 2);
  152. *len = be16_to_cpu(*long_desc_len) - 2;
  153. return (char *)ev->remainder + nl + desc_len;
  154. }
  155. static bool event_fixed_portion_is_within(struct hv_24x7_event_data *ev,
  156. void *end)
  157. {
  158. void *start = ev;
  159. return (start + offsetof(struct hv_24x7_event_data, remainder)) < end;
  160. }
  161. /*
  162. * Things we don't check:
  163. * - padding for desc, name, and long/detailed desc is required to be '\0'
  164. * bytes.
  165. *
  166. * Return NULL if we pass end,
  167. * Otherwise return the address of the byte just following the event.
  168. */
  169. static void *event_end(struct hv_24x7_event_data *ev, void *end)
  170. {
  171. void *start = ev;
  172. __be16 *dl_, *ldl_;
  173. unsigned dl, ldl;
  174. unsigned nl = be16_to_cpu(ev->event_name_len);
  175. if (nl < 2) {
  176. pr_debug("%s: name length too short: %d", __func__, nl);
  177. return NULL;
  178. }
  179. if (start + nl > end) {
  180. pr_debug("%s: start=%p + nl=%u > end=%p",
  181. __func__, start, nl, end);
  182. return NULL;
  183. }
  184. dl_ = (__be16 *)(ev->remainder + nl - 2);
  185. if (!IS_ALIGNED((uintptr_t)dl_, 2))
  186. pr_warn("desc len not aligned %p", dl_);
  187. dl = be16_to_cpu(*dl_);
  188. if (dl < 2) {
  189. pr_debug("%s: desc len too short: %d", __func__, dl);
  190. return NULL;
  191. }
  192. if (start + nl + dl > end) {
  193. pr_debug("%s: (start=%p + nl=%u + dl=%u)=%p > end=%p",
  194. __func__, start, nl, dl, start + nl + dl, end);
  195. return NULL;
  196. }
  197. ldl_ = (__be16 *)(ev->remainder + nl + dl - 2);
  198. if (!IS_ALIGNED((uintptr_t)ldl_, 2))
  199. pr_warn("long desc len not aligned %p", ldl_);
  200. ldl = be16_to_cpu(*ldl_);
  201. if (ldl < 2) {
  202. pr_debug("%s: long desc len too short (ldl=%u)",
  203. __func__, ldl);
  204. return NULL;
  205. }
  206. if (start + nl + dl + ldl > end) {
  207. pr_debug("%s: start=%p + nl=%u + dl=%u + ldl=%u > end=%p",
  208. __func__, start, nl, dl, ldl, end);
  209. return NULL;
  210. }
  211. return start + nl + dl + ldl;
  212. }
  213. static unsigned long h_get_24x7_catalog_page_(unsigned long phys_4096,
  214. unsigned long version,
  215. unsigned long index)
  216. {
  217. pr_devel("h_get_24x7_catalog_page(0x%lx, %lu, %lu)",
  218. phys_4096, version, index);
  219. WARN_ON(!IS_ALIGNED(phys_4096, 4096));
  220. return plpar_hcall_norets(H_GET_24X7_CATALOG_PAGE,
  221. phys_4096, version, index);
  222. }
  223. static unsigned long h_get_24x7_catalog_page(char page[],
  224. u64 version, u32 index)
  225. {
  226. return h_get_24x7_catalog_page_(virt_to_phys(page),
  227. version, index);
  228. }
  229. static unsigned core_domains[] = {
  230. HV_PERF_DOMAIN_PHYS_CORE,
  231. HV_PERF_DOMAIN_VCPU_HOME_CORE,
  232. HV_PERF_DOMAIN_VCPU_HOME_CHIP,
  233. HV_PERF_DOMAIN_VCPU_HOME_NODE,
  234. HV_PERF_DOMAIN_VCPU_REMOTE_NODE,
  235. };
  236. /* chip event data always yeilds a single event, core yeilds multiple */
  237. #define MAX_EVENTS_PER_EVENT_DATA ARRAY_SIZE(core_domains)
  238. static char *event_fmt(struct hv_24x7_event_data *event, unsigned domain)
  239. {
  240. const char *sindex;
  241. const char *lpar;
  242. if (is_physical_domain(domain)) {
  243. lpar = "0x0";
  244. sindex = "core";
  245. } else {
  246. lpar = "?";
  247. sindex = "vcpu";
  248. }
  249. return kasprintf(GFP_KERNEL,
  250. "domain=0x%x,offset=0x%x,%s=?,lpar=%s",
  251. domain,
  252. be16_to_cpu(event->event_counter_offs) +
  253. be16_to_cpu(event->event_group_record_offs),
  254. sindex,
  255. lpar);
  256. }
  257. /* Avoid trusting fw to NUL terminate strings */
  258. static char *memdup_to_str(char *maybe_str, int max_len, gfp_t gfp)
  259. {
  260. return kasprintf(gfp, "%.*s", max_len, maybe_str);
  261. }
  262. static ssize_t device_show_string(struct device *dev,
  263. struct device_attribute *attr, char *buf)
  264. {
  265. struct dev_ext_attribute *d;
  266. d = container_of(attr, struct dev_ext_attribute, attr);
  267. return sprintf(buf, "%s\n", (char *)d->var);
  268. }
  269. static struct attribute *device_str_attr_create_(char *name, char *str)
  270. {
  271. struct dev_ext_attribute *attr = kzalloc(sizeof(*attr), GFP_KERNEL);
  272. if (!attr)
  273. return NULL;
  274. sysfs_attr_init(&attr->attr.attr);
  275. attr->var = str;
  276. attr->attr.attr.name = name;
  277. attr->attr.attr.mode = 0444;
  278. attr->attr.show = device_show_string;
  279. return &attr->attr.attr;
  280. }
  281. static struct attribute *device_str_attr_create(char *name, int name_max,
  282. int name_nonce,
  283. char *str, size_t str_max)
  284. {
  285. char *n;
  286. char *s = memdup_to_str(str, str_max, GFP_KERNEL);
  287. struct attribute *a;
  288. if (!s)
  289. return NULL;
  290. if (!name_nonce)
  291. n = kasprintf(GFP_KERNEL, "%.*s", name_max, name);
  292. else
  293. n = kasprintf(GFP_KERNEL, "%.*s__%d", name_max, name,
  294. name_nonce);
  295. if (!n)
  296. goto out_s;
  297. a = device_str_attr_create_(n, s);
  298. if (!a)
  299. goto out_n;
  300. return a;
  301. out_n:
  302. kfree(n);
  303. out_s:
  304. kfree(s);
  305. return NULL;
  306. }
  307. static void device_str_attr_destroy(struct attribute *attr)
  308. {
  309. struct dev_ext_attribute *d;
  310. d = container_of(attr, struct dev_ext_attribute, attr.attr);
  311. kfree(d->var);
  312. kfree(d->attr.attr.name);
  313. kfree(d);
  314. }
  315. static struct attribute *event_to_attr(unsigned ix,
  316. struct hv_24x7_event_data *event,
  317. unsigned domain,
  318. int nonce)
  319. {
  320. int event_name_len;
  321. char *ev_name, *a_ev_name, *val;
  322. const char *ev_suffix;
  323. struct attribute *attr;
  324. if (!domain_is_valid(domain)) {
  325. pr_warn("catalog event %u has invalid domain %u\n",
  326. ix, domain);
  327. return NULL;
  328. }
  329. val = event_fmt(event, domain);
  330. if (!val)
  331. return NULL;
  332. ev_suffix = event_domain_suffix(domain);
  333. ev_name = event_name(event, &event_name_len);
  334. if (!nonce)
  335. a_ev_name = kasprintf(GFP_KERNEL, "%.*s%s",
  336. (int)event_name_len, ev_name, ev_suffix);
  337. else
  338. a_ev_name = kasprintf(GFP_KERNEL, "%.*s%s__%d",
  339. (int)event_name_len, ev_name, ev_suffix, nonce);
  340. if (!a_ev_name)
  341. goto out_val;
  342. attr = device_str_attr_create_(a_ev_name, val);
  343. if (!attr)
  344. goto out_name;
  345. return attr;
  346. out_name:
  347. kfree(a_ev_name);
  348. out_val:
  349. kfree(val);
  350. return NULL;
  351. }
  352. static struct attribute *event_to_desc_attr(struct hv_24x7_event_data *event,
  353. int nonce)
  354. {
  355. int nl, dl;
  356. char *name = event_name(event, &nl);
  357. char *desc = event_desc(event, &dl);
  358. /* If there isn't a description, don't create the sysfs file */
  359. if (!dl)
  360. return NULL;
  361. return device_str_attr_create(name, nl, nonce, desc, dl);
  362. }
  363. static struct attribute *
  364. event_to_long_desc_attr(struct hv_24x7_event_data *event, int nonce)
  365. {
  366. int nl, dl;
  367. char *name = event_name(event, &nl);
  368. char *desc = event_long_desc(event, &dl);
  369. /* If there isn't a description, don't create the sysfs file */
  370. if (!dl)
  371. return NULL;
  372. return device_str_attr_create(name, nl, nonce, desc, dl);
  373. }
  374. static ssize_t event_data_to_attrs(unsigned ix, struct attribute **attrs,
  375. struct hv_24x7_event_data *event, int nonce)
  376. {
  377. unsigned i;
  378. switch (event->domain) {
  379. case HV_PERF_DOMAIN_PHYS_CHIP:
  380. *attrs = event_to_attr(ix, event, event->domain, nonce);
  381. return 1;
  382. case HV_PERF_DOMAIN_PHYS_CORE:
  383. for (i = 0; i < ARRAY_SIZE(core_domains); i++) {
  384. attrs[i] = event_to_attr(ix, event, core_domains[i],
  385. nonce);
  386. if (!attrs[i]) {
  387. pr_warn("catalog event %u: individual attr %u "
  388. "creation failure\n", ix, i);
  389. for (; i; i--)
  390. device_str_attr_destroy(attrs[i - 1]);
  391. return -1;
  392. }
  393. }
  394. return i;
  395. default:
  396. pr_warn("catalog event %u: domain %u is not allowed in the "
  397. "catalog\n", ix, event->domain);
  398. return -1;
  399. }
  400. }
  401. static size_t event_to_attr_ct(struct hv_24x7_event_data *event)
  402. {
  403. switch (event->domain) {
  404. case HV_PERF_DOMAIN_PHYS_CHIP:
  405. return 1;
  406. case HV_PERF_DOMAIN_PHYS_CORE:
  407. return ARRAY_SIZE(core_domains);
  408. default:
  409. return 0;
  410. }
  411. }
  412. static unsigned long vmalloc_to_phys(void *v)
  413. {
  414. struct page *p = vmalloc_to_page(v);
  415. BUG_ON(!p);
  416. return page_to_phys(p) + offset_in_page(v);
  417. }
  418. /* */
  419. struct event_uniq {
  420. struct rb_node node;
  421. const char *name;
  422. int nl;
  423. unsigned ct;
  424. unsigned domain;
  425. };
  426. static int memord(const void *d1, size_t s1, const void *d2, size_t s2)
  427. {
  428. if (s1 < s2)
  429. return 1;
  430. if (s2 > s1)
  431. return -1;
  432. return memcmp(d1, d2, s1);
  433. }
  434. static int ev_uniq_ord(const void *v1, size_t s1, unsigned d1, const void *v2,
  435. size_t s2, unsigned d2)
  436. {
  437. int r = memord(v1, s1, v2, s2);
  438. if (r)
  439. return r;
  440. if (d1 > d2)
  441. return 1;
  442. if (d2 > d1)
  443. return -1;
  444. return 0;
  445. }
  446. static int event_uniq_add(struct rb_root *root, const char *name, int nl,
  447. unsigned domain)
  448. {
  449. struct rb_node **new = &(root->rb_node), *parent = NULL;
  450. struct event_uniq *data;
  451. /* Figure out where to put new node */
  452. while (*new) {
  453. struct event_uniq *it;
  454. int result;
  455. it = container_of(*new, struct event_uniq, node);
  456. result = ev_uniq_ord(name, nl, domain, it->name, it->nl,
  457. it->domain);
  458. parent = *new;
  459. if (result < 0)
  460. new = &((*new)->rb_left);
  461. else if (result > 0)
  462. new = &((*new)->rb_right);
  463. else {
  464. it->ct++;
  465. pr_info("found a duplicate event %.*s, ct=%u\n", nl,
  466. name, it->ct);
  467. return it->ct;
  468. }
  469. }
  470. data = kmalloc(sizeof(*data), GFP_KERNEL);
  471. if (!data)
  472. return -ENOMEM;
  473. *data = (struct event_uniq) {
  474. .name = name,
  475. .nl = nl,
  476. .ct = 0,
  477. .domain = domain,
  478. };
  479. /* Add new node and rebalance tree. */
  480. rb_link_node(&data->node, parent, new);
  481. rb_insert_color(&data->node, root);
  482. /* data->ct */
  483. return 0;
  484. }
  485. static void event_uniq_destroy(struct rb_root *root)
  486. {
  487. /*
  488. * the strings we point to are in the giant block of memory filled by
  489. * the catalog, and are freed separately.
  490. */
  491. struct event_uniq *pos, *n;
  492. rbtree_postorder_for_each_entry_safe(pos, n, root, node)
  493. kfree(pos);
  494. }
  495. /*
  496. * ensure the event structure's sizes are self consistent and don't cause us to
  497. * read outside of the event
  498. *
  499. * On success, return the event length in bytes.
  500. * Otherwise, return -1 (and print as appropriate).
  501. */
  502. static ssize_t catalog_event_len_validate(struct hv_24x7_event_data *event,
  503. size_t event_idx,
  504. size_t event_data_bytes,
  505. size_t event_entry_count,
  506. size_t offset, void *end)
  507. {
  508. ssize_t ev_len;
  509. void *ev_end, *calc_ev_end;
  510. if (offset >= event_data_bytes)
  511. return -1;
  512. if (event_idx >= event_entry_count) {
  513. pr_devel("catalog event data has %zu bytes of padding after last event\n",
  514. event_data_bytes - offset);
  515. return -1;
  516. }
  517. if (!event_fixed_portion_is_within(event, end)) {
  518. pr_warn("event %zu fixed portion is not within range\n",
  519. event_idx);
  520. return -1;
  521. }
  522. ev_len = be16_to_cpu(event->length);
  523. if (ev_len % 16)
  524. pr_info("event %zu has length %zu not divisible by 16: event=%pK\n",
  525. event_idx, ev_len, event);
  526. ev_end = (__u8 *)event + ev_len;
  527. if (ev_end > end) {
  528. pr_warn("event %zu has .length=%zu, ends after buffer end: ev_end=%pK > end=%pK, offset=%zu\n",
  529. event_idx, ev_len, ev_end, end,
  530. offset);
  531. return -1;
  532. }
  533. calc_ev_end = event_end(event, end);
  534. if (!calc_ev_end) {
  535. pr_warn("event %zu has a calculated length which exceeds buffer length %zu: event=%pK end=%pK, offset=%zu\n",
  536. event_idx, event_data_bytes, event, end,
  537. offset);
  538. return -1;
  539. }
  540. if (calc_ev_end > ev_end) {
  541. pr_warn("event %zu exceeds it's own length: event=%pK, end=%pK, offset=%zu, calc_ev_end=%pK\n",
  542. event_idx, event, ev_end, offset, calc_ev_end);
  543. return -1;
  544. }
  545. return ev_len;
  546. }
  547. #define MAX_4K (SIZE_MAX / 4096)
  548. static int create_events_from_catalog(struct attribute ***events_,
  549. struct attribute ***event_descs_,
  550. struct attribute ***event_long_descs_)
  551. {
  552. unsigned long hret;
  553. size_t catalog_len, catalog_page_len, event_entry_count,
  554. event_data_len, event_data_offs,
  555. event_data_bytes, junk_events, event_idx, event_attr_ct, i,
  556. attr_max, event_idx_last, desc_ct, long_desc_ct;
  557. ssize_t ct, ev_len;
  558. uint32_t catalog_version_num;
  559. struct attribute **events, **event_descs, **event_long_descs;
  560. struct hv_24x7_catalog_page_0 *page_0 =
  561. kmem_cache_alloc(hv_page_cache, GFP_KERNEL);
  562. void *page = page_0;
  563. void *event_data, *end;
  564. struct hv_24x7_event_data *event;
  565. struct rb_root ev_uniq = RB_ROOT;
  566. int ret = 0;
  567. if (!page) {
  568. ret = -ENOMEM;
  569. goto e_out;
  570. }
  571. hret = h_get_24x7_catalog_page(page, 0, 0);
  572. if (hret) {
  573. ret = -EIO;
  574. goto e_free;
  575. }
  576. catalog_version_num = be64_to_cpu(page_0->version);
  577. catalog_page_len = be32_to_cpu(page_0->length);
  578. if (MAX_4K < catalog_page_len) {
  579. pr_err("invalid page count: %zu\n", catalog_page_len);
  580. ret = -EIO;
  581. goto e_free;
  582. }
  583. catalog_len = catalog_page_len * 4096;
  584. event_entry_count = be16_to_cpu(page_0->event_entry_count);
  585. event_data_offs = be16_to_cpu(page_0->event_data_offs);
  586. event_data_len = be16_to_cpu(page_0->event_data_len);
  587. pr_devel("cv %zu cl %zu eec %zu edo %zu edl %zu\n",
  588. (size_t)catalog_version_num, catalog_len,
  589. event_entry_count, event_data_offs, event_data_len);
  590. if ((MAX_4K < event_data_len)
  591. || (MAX_4K < event_data_offs)
  592. || (MAX_4K - event_data_offs < event_data_len)) {
  593. pr_err("invalid event data offs %zu and/or len %zu\n",
  594. event_data_offs, event_data_len);
  595. ret = -EIO;
  596. goto e_free;
  597. }
  598. if ((event_data_offs + event_data_len) > catalog_page_len) {
  599. pr_err("event data %zu-%zu does not fit inside catalog 0-%zu\n",
  600. event_data_offs,
  601. event_data_offs + event_data_len,
  602. catalog_page_len);
  603. ret = -EIO;
  604. goto e_free;
  605. }
  606. if (SIZE_MAX / MAX_EVENTS_PER_EVENT_DATA - 1 < event_entry_count) {
  607. pr_err("event_entry_count %zu is invalid\n",
  608. event_entry_count);
  609. ret = -EIO;
  610. goto e_free;
  611. }
  612. event_data_bytes = event_data_len * 4096;
  613. /*
  614. * event data can span several pages, events can cross between these
  615. * pages. Use vmalloc to make this easier.
  616. */
  617. event_data = vmalloc(event_data_bytes);
  618. if (!event_data) {
  619. pr_err("could not allocate event data\n");
  620. ret = -ENOMEM;
  621. goto e_free;
  622. }
  623. end = event_data + event_data_bytes;
  624. /*
  625. * using vmalloc_to_phys() like this only works if PAGE_SIZE is
  626. * divisible by 4096
  627. */
  628. BUILD_BUG_ON(PAGE_SIZE % 4096);
  629. for (i = 0; i < event_data_len; i++) {
  630. hret = h_get_24x7_catalog_page_(
  631. vmalloc_to_phys(event_data + i * 4096),
  632. catalog_version_num,
  633. i + event_data_offs);
  634. if (hret) {
  635. pr_err("failed to get event data in page %zu\n",
  636. i + event_data_offs);
  637. ret = -EIO;
  638. goto e_event_data;
  639. }
  640. }
  641. /*
  642. * scan the catalog to determine the number of attributes we need, and
  643. * verify it at the same time.
  644. */
  645. for (junk_events = 0, event = event_data, event_idx = 0, attr_max = 0;
  646. ;
  647. event_idx++, event = (void *)event + ev_len) {
  648. size_t offset = (void *)event - (void *)event_data;
  649. char *name;
  650. int nl;
  651. ev_len = catalog_event_len_validate(event, event_idx,
  652. event_data_bytes,
  653. event_entry_count,
  654. offset, end);
  655. if (ev_len < 0)
  656. break;
  657. name = event_name(event, &nl);
  658. if (event->event_group_record_len == 0) {
  659. pr_devel("invalid event %zu (%.*s): group_record_len == 0, skipping\n",
  660. event_idx, nl, name);
  661. junk_events++;
  662. continue;
  663. }
  664. if (!catalog_entry_domain_is_valid(event->domain)) {
  665. pr_info("event %zu (%.*s) has invalid domain %d\n",
  666. event_idx, nl, name, event->domain);
  667. junk_events++;
  668. continue;
  669. }
  670. attr_max += event_to_attr_ct(event);
  671. }
  672. event_idx_last = event_idx;
  673. if (event_idx_last != event_entry_count)
  674. pr_warn("event buffer ended before listed # of events were parsed (got %zu, wanted %zu, junk %zu)\n",
  675. event_idx_last, event_entry_count, junk_events);
  676. events = kmalloc_array(attr_max + 1, sizeof(*events), GFP_KERNEL);
  677. if (!events) {
  678. ret = -ENOMEM;
  679. goto e_event_data;
  680. }
  681. event_descs = kmalloc_array(event_idx + 1, sizeof(*event_descs),
  682. GFP_KERNEL);
  683. if (!event_descs) {
  684. ret = -ENOMEM;
  685. goto e_event_attrs;
  686. }
  687. event_long_descs = kmalloc_array(event_idx + 1,
  688. sizeof(*event_long_descs), GFP_KERNEL);
  689. if (!event_long_descs) {
  690. ret = -ENOMEM;
  691. goto e_event_descs;
  692. }
  693. /* Iterate over the catalog filling in the attribute vector */
  694. for (junk_events = 0, event_attr_ct = 0, desc_ct = 0, long_desc_ct = 0,
  695. event = event_data, event_idx = 0;
  696. event_idx < event_idx_last;
  697. event_idx++, ev_len = be16_to_cpu(event->length),
  698. event = (void *)event + ev_len) {
  699. char *name;
  700. int nl;
  701. int nonce;
  702. /*
  703. * these are the only "bad" events that are intermixed and that
  704. * we can ignore without issue. make sure to skip them here
  705. */
  706. if (event->event_group_record_len == 0)
  707. continue;
  708. if (!catalog_entry_domain_is_valid(event->domain))
  709. continue;
  710. name = event_name(event, &nl);
  711. nonce = event_uniq_add(&ev_uniq, name, nl, event->domain);
  712. ct = event_data_to_attrs(event_idx, events + event_attr_ct,
  713. event, nonce);
  714. if (ct <= 0) {
  715. pr_warn("event %zu (%.*s) creation failure, skipping\n",
  716. event_idx, nl, name);
  717. junk_events++;
  718. } else {
  719. event_attr_ct += ct;
  720. event_descs[desc_ct] = event_to_desc_attr(event, nonce);
  721. if (event_descs[desc_ct])
  722. desc_ct++;
  723. event_long_descs[long_desc_ct] =
  724. event_to_long_desc_attr(event, nonce);
  725. if (event_long_descs[long_desc_ct])
  726. long_desc_ct++;
  727. }
  728. }
  729. pr_info("read %zu catalog entries, created %zu event attrs (%zu failures), %zu descs\n",
  730. event_idx, event_attr_ct, junk_events, desc_ct);
  731. events[event_attr_ct] = NULL;
  732. event_descs[desc_ct] = NULL;
  733. event_long_descs[long_desc_ct] = NULL;
  734. event_uniq_destroy(&ev_uniq);
  735. vfree(event_data);
  736. kmem_cache_free(hv_page_cache, page);
  737. *events_ = events;
  738. *event_descs_ = event_descs;
  739. *event_long_descs_ = event_long_descs;
  740. return 0;
  741. e_event_descs:
  742. kfree(event_descs);
  743. e_event_attrs:
  744. kfree(events);
  745. e_event_data:
  746. vfree(event_data);
  747. e_free:
  748. kmem_cache_free(hv_page_cache, page);
  749. e_out:
  750. *events_ = NULL;
  751. *event_descs_ = NULL;
  752. *event_long_descs_ = NULL;
  753. return ret;
  754. }
  755. static ssize_t catalog_read(struct file *filp, struct kobject *kobj,
  756. struct bin_attribute *bin_attr, char *buf,
  757. loff_t offset, size_t count)
  758. {
  759. unsigned long hret;
  760. ssize_t ret = 0;
  761. size_t catalog_len = 0, catalog_page_len = 0;
  762. loff_t page_offset = 0;
  763. loff_t offset_in_page;
  764. size_t copy_len;
  765. uint64_t catalog_version_num = 0;
  766. void *page = kmem_cache_alloc(hv_page_cache, GFP_USER);
  767. struct hv_24x7_catalog_page_0 *page_0 = page;
  768. if (!page)
  769. return -ENOMEM;
  770. hret = h_get_24x7_catalog_page(page, 0, 0);
  771. if (hret) {
  772. ret = -EIO;
  773. goto e_free;
  774. }
  775. catalog_version_num = be64_to_cpu(page_0->version);
  776. catalog_page_len = be32_to_cpu(page_0->length);
  777. catalog_len = catalog_page_len * 4096;
  778. page_offset = offset / 4096;
  779. offset_in_page = offset % 4096;
  780. if (page_offset >= catalog_page_len)
  781. goto e_free;
  782. if (page_offset != 0) {
  783. hret = h_get_24x7_catalog_page(page, catalog_version_num,
  784. page_offset);
  785. if (hret) {
  786. ret = -EIO;
  787. goto e_free;
  788. }
  789. }
  790. copy_len = 4096 - offset_in_page;
  791. if (copy_len > count)
  792. copy_len = count;
  793. memcpy(buf, page+offset_in_page, copy_len);
  794. ret = copy_len;
  795. e_free:
  796. if (hret)
  797. pr_err("h_get_24x7_catalog_page(ver=%lld, page=%lld) failed:"
  798. " rc=%ld\n",
  799. catalog_version_num, page_offset, hret);
  800. kmem_cache_free(hv_page_cache, page);
  801. pr_devel("catalog_read: offset=%lld(%lld) count=%zu "
  802. "catalog_len=%zu(%zu) => %zd\n", offset, page_offset,
  803. count, catalog_len, catalog_page_len, ret);
  804. return ret;
  805. }
  806. #define PAGE_0_ATTR(_name, _fmt, _expr) \
  807. static ssize_t _name##_show(struct device *dev, \
  808. struct device_attribute *dev_attr, \
  809. char *buf) \
  810. { \
  811. unsigned long hret; \
  812. ssize_t ret = 0; \
  813. void *page = kmem_cache_alloc(hv_page_cache, GFP_USER); \
  814. struct hv_24x7_catalog_page_0 *page_0 = page; \
  815. if (!page) \
  816. return -ENOMEM; \
  817. hret = h_get_24x7_catalog_page(page, 0, 0); \
  818. if (hret) { \
  819. ret = -EIO; \
  820. goto e_free; \
  821. } \
  822. ret = sprintf(buf, _fmt, _expr); \
  823. e_free: \
  824. kmem_cache_free(hv_page_cache, page); \
  825. return ret; \
  826. } \
  827. static DEVICE_ATTR_RO(_name)
  828. PAGE_0_ATTR(catalog_version, "%lld\n",
  829. (unsigned long long)be64_to_cpu(page_0->version));
  830. PAGE_0_ATTR(catalog_len, "%lld\n",
  831. (unsigned long long)be32_to_cpu(page_0->length) * 4096);
  832. static BIN_ATTR_RO(catalog, 0/* real length varies */);
  833. static struct bin_attribute *if_bin_attrs[] = {
  834. &bin_attr_catalog,
  835. NULL,
  836. };
  837. static struct attribute *if_attrs[] = {
  838. &dev_attr_catalog_len.attr,
  839. &dev_attr_catalog_version.attr,
  840. NULL,
  841. };
  842. static struct attribute_group if_group = {
  843. .name = "interface",
  844. .bin_attrs = if_bin_attrs,
  845. .attrs = if_attrs,
  846. };
  847. static const struct attribute_group *attr_groups[] = {
  848. &format_group,
  849. &event_group,
  850. &event_desc_group,
  851. &event_long_desc_group,
  852. &if_group,
  853. NULL,
  854. };
  855. static void log_24x7_hcall(struct hv_24x7_request_buffer *request_buffer,
  856. struct hv_24x7_data_result_buffer *result_buffer,
  857. unsigned long ret)
  858. {
  859. struct hv_24x7_request *req;
  860. req = &request_buffer->requests[0];
  861. pr_notice_ratelimited("hcall failed: [%d %#x %#x %d] => "
  862. "ret 0x%lx (%ld) detail=0x%x failing ix=%x\n",
  863. req->performance_domain, req->data_offset,
  864. req->starting_ix, req->starting_lpar_ix, ret, ret,
  865. result_buffer->detailed_rc,
  866. result_buffer->failing_request_ix);
  867. }
  868. /*
  869. * Start the process for a new H_GET_24x7_DATA hcall.
  870. */
  871. static void init_24x7_request(struct hv_24x7_request_buffer *request_buffer,
  872. struct hv_24x7_data_result_buffer *result_buffer)
  873. {
  874. memset(request_buffer, 0, 4096);
  875. memset(result_buffer, 0, 4096);
  876. request_buffer->interface_version = HV_24X7_IF_VERSION_CURRENT;
  877. /* memset above set request_buffer->num_requests to 0 */
  878. }
  879. /*
  880. * Commit (i.e perform) the H_GET_24x7_DATA hcall using the data collected
  881. * by 'init_24x7_request()' and 'add_event_to_24x7_request()'.
  882. */
  883. static int make_24x7_request(struct hv_24x7_request_buffer *request_buffer,
  884. struct hv_24x7_data_result_buffer *result_buffer)
  885. {
  886. unsigned long ret;
  887. /*
  888. * NOTE: Due to variable number of array elements in request and
  889. * result buffer(s), sizeof() is not reliable. Use the actual
  890. * allocated buffer size, H24x7_DATA_BUFFER_SIZE.
  891. */
  892. ret = plpar_hcall_norets(H_GET_24X7_DATA,
  893. virt_to_phys(request_buffer), H24x7_DATA_BUFFER_SIZE,
  894. virt_to_phys(result_buffer), H24x7_DATA_BUFFER_SIZE);
  895. if (ret)
  896. log_24x7_hcall(request_buffer, result_buffer, ret);
  897. return ret;
  898. }
  899. /*
  900. * Add the given @event to the next slot in the 24x7 request_buffer.
  901. *
  902. * Note that H_GET_24X7_DATA hcall allows reading several counters'
  903. * values in a single HCALL. We expect the caller to add events to the
  904. * request buffer one by one, make the HCALL and process the results.
  905. */
  906. static int add_event_to_24x7_request(struct perf_event *event,
  907. struct hv_24x7_request_buffer *request_buffer)
  908. {
  909. u16 idx;
  910. int i;
  911. struct hv_24x7_request *req;
  912. if (request_buffer->num_requests > 254) {
  913. pr_devel("Too many requests for 24x7 HCALL %d\n",
  914. request_buffer->num_requests);
  915. return -EINVAL;
  916. }
  917. if (is_physical_domain(event_get_domain(event)))
  918. idx = event_get_core(event);
  919. else
  920. idx = event_get_vcpu(event);
  921. i = request_buffer->num_requests++;
  922. req = &request_buffer->requests[i];
  923. req->performance_domain = event_get_domain(event);
  924. req->data_size = cpu_to_be16(8);
  925. req->data_offset = cpu_to_be32(event_get_offset(event));
  926. req->starting_lpar_ix = cpu_to_be16(event_get_lpar(event)),
  927. req->max_num_lpars = cpu_to_be16(1);
  928. req->starting_ix = cpu_to_be16(idx);
  929. req->max_ix = cpu_to_be16(1);
  930. return 0;
  931. }
  932. static unsigned long single_24x7_request(struct perf_event *event, u64 *count)
  933. {
  934. unsigned long ret;
  935. struct hv_24x7_request_buffer *request_buffer;
  936. struct hv_24x7_data_result_buffer *result_buffer;
  937. BUILD_BUG_ON(sizeof(*request_buffer) > 4096);
  938. BUILD_BUG_ON(sizeof(*result_buffer) > 4096);
  939. request_buffer = (void *)get_cpu_var(hv_24x7_reqb);
  940. result_buffer = (void *)get_cpu_var(hv_24x7_resb);
  941. init_24x7_request(request_buffer, result_buffer);
  942. ret = add_event_to_24x7_request(event, request_buffer);
  943. if (ret)
  944. goto out;
  945. ret = make_24x7_request(request_buffer, result_buffer);
  946. if (ret) {
  947. log_24x7_hcall(request_buffer, result_buffer, ret);
  948. goto out;
  949. }
  950. /* process result from hcall */
  951. *count = be64_to_cpu(result_buffer->results[0].elements[0].element_data[0]);
  952. out:
  953. put_cpu_var(hv_24x7_reqb);
  954. put_cpu_var(hv_24x7_resb);
  955. return ret;
  956. }
  957. static int h_24x7_event_init(struct perf_event *event)
  958. {
  959. struct hv_perf_caps caps;
  960. unsigned domain;
  961. unsigned long hret;
  962. u64 ct;
  963. /* Not our event */
  964. if (event->attr.type != event->pmu->type)
  965. return -ENOENT;
  966. /* Unused areas must be 0 */
  967. if (event_get_reserved1(event) ||
  968. event_get_reserved2(event) ||
  969. event_get_reserved3(event)) {
  970. pr_devel("reserved set when forbidden 0x%llx(0x%llx) 0x%llx(0x%llx) 0x%llx(0x%llx)\n",
  971. event->attr.config,
  972. event_get_reserved1(event),
  973. event->attr.config1,
  974. event_get_reserved2(event),
  975. event->attr.config2,
  976. event_get_reserved3(event));
  977. return -EINVAL;
  978. }
  979. /* unsupported modes and filters */
  980. if (event->attr.exclude_user ||
  981. event->attr.exclude_kernel ||
  982. event->attr.exclude_hv ||
  983. event->attr.exclude_idle ||
  984. event->attr.exclude_host ||
  985. event->attr.exclude_guest)
  986. return -EINVAL;
  987. /* no branch sampling */
  988. if (has_branch_stack(event))
  989. return -EOPNOTSUPP;
  990. /* offset must be 8 byte aligned */
  991. if (event_get_offset(event) % 8) {
  992. pr_devel("bad alignment\n");
  993. return -EINVAL;
  994. }
  995. /* Domains above 6 are invalid */
  996. domain = event_get_domain(event);
  997. if (domain > 6) {
  998. pr_devel("invalid domain %d\n", domain);
  999. return -EINVAL;
  1000. }
  1001. hret = hv_perf_caps_get(&caps);
  1002. if (hret) {
  1003. pr_devel("could not get capabilities: rc=%ld\n", hret);
  1004. return -EIO;
  1005. }
  1006. /* Physical domains & other lpars require extra capabilities */
  1007. if (!caps.collect_privileged && (is_physical_domain(domain) ||
  1008. (event_get_lpar(event) != event_get_lpar_max()))) {
  1009. pr_devel("hv permissions disallow: is_physical_domain:%d, lpar=0x%llx\n",
  1010. is_physical_domain(domain),
  1011. event_get_lpar(event));
  1012. return -EACCES;
  1013. }
  1014. /* see if the event complains */
  1015. if (single_24x7_request(event, &ct)) {
  1016. pr_devel("test hcall failed\n");
  1017. return -EIO;
  1018. }
  1019. return 0;
  1020. }
  1021. static u64 h_24x7_get_value(struct perf_event *event)
  1022. {
  1023. unsigned long ret;
  1024. u64 ct;
  1025. ret = single_24x7_request(event, &ct);
  1026. if (ret)
  1027. /* We checked this in event init, shouldn't fail here... */
  1028. return 0;
  1029. return ct;
  1030. }
  1031. static void update_event_count(struct perf_event *event, u64 now)
  1032. {
  1033. s64 prev;
  1034. prev = local64_xchg(&event->hw.prev_count, now);
  1035. local64_add(now - prev, &event->count);
  1036. }
  1037. static void h_24x7_event_read(struct perf_event *event)
  1038. {
  1039. u64 now;
  1040. now = h_24x7_get_value(event);
  1041. update_event_count(event, now);
  1042. }
  1043. static void h_24x7_event_start(struct perf_event *event, int flags)
  1044. {
  1045. if (flags & PERF_EF_RELOAD)
  1046. local64_set(&event->hw.prev_count, h_24x7_get_value(event));
  1047. }
  1048. static void h_24x7_event_stop(struct perf_event *event, int flags)
  1049. {
  1050. h_24x7_event_read(event);
  1051. }
  1052. static int h_24x7_event_add(struct perf_event *event, int flags)
  1053. {
  1054. if (flags & PERF_EF_START)
  1055. h_24x7_event_start(event, flags);
  1056. return 0;
  1057. }
  1058. static struct pmu h_24x7_pmu = {
  1059. .task_ctx_nr = perf_invalid_context,
  1060. .name = "hv_24x7",
  1061. .attr_groups = attr_groups,
  1062. .event_init = h_24x7_event_init,
  1063. .add = h_24x7_event_add,
  1064. .del = h_24x7_event_stop,
  1065. .start = h_24x7_event_start,
  1066. .stop = h_24x7_event_stop,
  1067. .read = h_24x7_event_read,
  1068. };
  1069. static int hv_24x7_init(void)
  1070. {
  1071. int r;
  1072. unsigned long hret;
  1073. struct hv_perf_caps caps;
  1074. if (!firmware_has_feature(FW_FEATURE_LPAR)) {
  1075. pr_debug("not a virtualized system, not enabling\n");
  1076. return -ENODEV;
  1077. }
  1078. hret = hv_perf_caps_get(&caps);
  1079. if (hret) {
  1080. pr_debug("could not obtain capabilities, not enabling, rc=%ld\n",
  1081. hret);
  1082. return -ENODEV;
  1083. }
  1084. hv_page_cache = kmem_cache_create("hv-page-4096", 4096, 4096, 0, NULL);
  1085. if (!hv_page_cache)
  1086. return -ENOMEM;
  1087. /* sampling not supported */
  1088. h_24x7_pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT;
  1089. r = create_events_from_catalog(&event_group.attrs,
  1090. &event_desc_group.attrs,
  1091. &event_long_desc_group.attrs);
  1092. if (r)
  1093. return r;
  1094. r = perf_pmu_register(&h_24x7_pmu, h_24x7_pmu.name, -1);
  1095. if (r)
  1096. return r;
  1097. return 0;
  1098. }
  1099. device_initcall(hv_24x7_init);