trace_events_hist.c 24 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096
  1. /*
  2. * trace_events_hist - trace event hist triggers
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License as published by
  6. * the Free Software Foundation; either version 2 of the License, or
  7. * (at your option) any later version.
  8. *
  9. * This program is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. *
  14. * Copyright (C) 2015 Tom Zanussi <tom.zanussi@linux.intel.com>
  15. */
  16. #include <linux/module.h>
  17. #include <linux/kallsyms.h>
  18. #include <linux/mutex.h>
  19. #include <linux/slab.h>
  20. #include <linux/stacktrace.h>
  21. #include "tracing_map.h"
  22. #include "trace.h"
  23. struct hist_field;
  24. typedef u64 (*hist_field_fn_t) (struct hist_field *field, void *event);
  25. struct hist_field {
  26. struct ftrace_event_field *field;
  27. unsigned long flags;
  28. hist_field_fn_t fn;
  29. unsigned int size;
  30. unsigned int offset;
  31. };
  32. static u64 hist_field_counter(struct hist_field *field, void *event)
  33. {
  34. return 1;
  35. }
  36. static u64 hist_field_string(struct hist_field *hist_field, void *event)
  37. {
  38. char *addr = (char *)(event + hist_field->field->offset);
  39. return (u64)(unsigned long)addr;
  40. }
  41. #define DEFINE_HIST_FIELD_FN(type) \
  42. static u64 hist_field_##type(struct hist_field *hist_field, void *event)\
  43. { \
  44. type *addr = (type *)(event + hist_field->field->offset); \
  45. \
  46. return (u64)*addr; \
  47. }
  48. DEFINE_HIST_FIELD_FN(s64);
  49. DEFINE_HIST_FIELD_FN(u64);
  50. DEFINE_HIST_FIELD_FN(s32);
  51. DEFINE_HIST_FIELD_FN(u32);
  52. DEFINE_HIST_FIELD_FN(s16);
  53. DEFINE_HIST_FIELD_FN(u16);
  54. DEFINE_HIST_FIELD_FN(s8);
  55. DEFINE_HIST_FIELD_FN(u8);
  56. #define for_each_hist_field(i, hist_data) \
  57. for ((i) = 0; (i) < (hist_data)->n_fields; (i)++)
  58. #define for_each_hist_val_field(i, hist_data) \
  59. for ((i) = 0; (i) < (hist_data)->n_vals; (i)++)
  60. #define for_each_hist_key_field(i, hist_data) \
  61. for ((i) = (hist_data)->n_vals; (i) < (hist_data)->n_fields; (i)++)
  62. #define HITCOUNT_IDX 0
  63. #define HIST_KEY_SIZE_MAX (MAX_FILTER_STR_VAL + sizeof(u64))
  64. enum hist_field_flags {
  65. HIST_FIELD_FL_HITCOUNT = 1,
  66. HIST_FIELD_FL_KEY = 2,
  67. HIST_FIELD_FL_STRING = 4,
  68. };
  69. struct hist_trigger_attrs {
  70. char *keys_str;
  71. char *vals_str;
  72. char *sort_key_str;
  73. bool pause;
  74. bool cont;
  75. bool clear;
  76. unsigned int map_bits;
  77. };
  78. struct hist_trigger_data {
  79. struct hist_field *fields[TRACING_MAP_FIELDS_MAX];
  80. unsigned int n_vals;
  81. unsigned int n_keys;
  82. unsigned int n_fields;
  83. unsigned int key_size;
  84. struct tracing_map_sort_key sort_keys[TRACING_MAP_SORT_KEYS_MAX];
  85. unsigned int n_sort_keys;
  86. struct trace_event_file *event_file;
  87. struct hist_trigger_attrs *attrs;
  88. struct tracing_map *map;
  89. };
  90. static hist_field_fn_t select_value_fn(int field_size, int field_is_signed)
  91. {
  92. hist_field_fn_t fn = NULL;
  93. switch (field_size) {
  94. case 8:
  95. if (field_is_signed)
  96. fn = hist_field_s64;
  97. else
  98. fn = hist_field_u64;
  99. break;
  100. case 4:
  101. if (field_is_signed)
  102. fn = hist_field_s32;
  103. else
  104. fn = hist_field_u32;
  105. break;
  106. case 2:
  107. if (field_is_signed)
  108. fn = hist_field_s16;
  109. else
  110. fn = hist_field_u16;
  111. break;
  112. case 1:
  113. if (field_is_signed)
  114. fn = hist_field_s8;
  115. else
  116. fn = hist_field_u8;
  117. break;
  118. }
  119. return fn;
  120. }
  121. static int parse_map_size(char *str)
  122. {
  123. unsigned long size, map_bits;
  124. int ret;
  125. strsep(&str, "=");
  126. if (!str) {
  127. ret = -EINVAL;
  128. goto out;
  129. }
  130. ret = kstrtoul(str, 0, &size);
  131. if (ret)
  132. goto out;
  133. map_bits = ilog2(roundup_pow_of_two(size));
  134. if (map_bits < TRACING_MAP_BITS_MIN ||
  135. map_bits > TRACING_MAP_BITS_MAX)
  136. ret = -EINVAL;
  137. else
  138. ret = map_bits;
  139. out:
  140. return ret;
  141. }
  142. static void destroy_hist_trigger_attrs(struct hist_trigger_attrs *attrs)
  143. {
  144. if (!attrs)
  145. return;
  146. kfree(attrs->sort_key_str);
  147. kfree(attrs->keys_str);
  148. kfree(attrs->vals_str);
  149. kfree(attrs);
  150. }
  151. static struct hist_trigger_attrs *parse_hist_trigger_attrs(char *trigger_str)
  152. {
  153. struct hist_trigger_attrs *attrs;
  154. int ret = 0;
  155. attrs = kzalloc(sizeof(*attrs), GFP_KERNEL);
  156. if (!attrs)
  157. return ERR_PTR(-ENOMEM);
  158. while (trigger_str) {
  159. char *str = strsep(&trigger_str, ":");
  160. if ((strncmp(str, "key=", strlen("key=")) == 0) ||
  161. (strncmp(str, "keys=", strlen("keys=")) == 0))
  162. attrs->keys_str = kstrdup(str, GFP_KERNEL);
  163. else if ((strncmp(str, "val=", strlen("val=")) == 0) ||
  164. (strncmp(str, "vals=", strlen("vals=")) == 0) ||
  165. (strncmp(str, "values=", strlen("values=")) == 0))
  166. attrs->vals_str = kstrdup(str, GFP_KERNEL);
  167. else if (strncmp(str, "sort=", strlen("sort=")) == 0)
  168. attrs->sort_key_str = kstrdup(str, GFP_KERNEL);
  169. else if (strcmp(str, "pause") == 0)
  170. attrs->pause = true;
  171. else if ((strcmp(str, "cont") == 0) ||
  172. (strcmp(str, "continue") == 0))
  173. attrs->cont = true;
  174. else if (strcmp(str, "clear") == 0)
  175. attrs->clear = true;
  176. else if (strncmp(str, "size=", strlen("size=")) == 0) {
  177. int map_bits = parse_map_size(str);
  178. if (map_bits < 0) {
  179. ret = map_bits;
  180. goto free;
  181. }
  182. attrs->map_bits = map_bits;
  183. } else {
  184. ret = -EINVAL;
  185. goto free;
  186. }
  187. }
  188. if (!attrs->keys_str) {
  189. ret = -EINVAL;
  190. goto free;
  191. }
  192. return attrs;
  193. free:
  194. destroy_hist_trigger_attrs(attrs);
  195. return ERR_PTR(ret);
  196. }
  197. static void destroy_hist_field(struct hist_field *hist_field)
  198. {
  199. kfree(hist_field);
  200. }
  201. static struct hist_field *create_hist_field(struct ftrace_event_field *field,
  202. unsigned long flags)
  203. {
  204. struct hist_field *hist_field;
  205. if (field && is_function_field(field))
  206. return NULL;
  207. hist_field = kzalloc(sizeof(struct hist_field), GFP_KERNEL);
  208. if (!hist_field)
  209. return NULL;
  210. if (flags & HIST_FIELD_FL_HITCOUNT) {
  211. hist_field->fn = hist_field_counter;
  212. goto out;
  213. }
  214. if (is_string_field(field)) {
  215. flags |= HIST_FIELD_FL_STRING;
  216. hist_field->fn = hist_field_string;
  217. } else {
  218. hist_field->fn = select_value_fn(field->size,
  219. field->is_signed);
  220. if (!hist_field->fn) {
  221. destroy_hist_field(hist_field);
  222. return NULL;
  223. }
  224. }
  225. out:
  226. hist_field->field = field;
  227. hist_field->flags = flags;
  228. return hist_field;
  229. }
  230. static void destroy_hist_fields(struct hist_trigger_data *hist_data)
  231. {
  232. unsigned int i;
  233. for (i = 0; i < TRACING_MAP_FIELDS_MAX; i++) {
  234. if (hist_data->fields[i]) {
  235. destroy_hist_field(hist_data->fields[i]);
  236. hist_data->fields[i] = NULL;
  237. }
  238. }
  239. }
  240. static int create_hitcount_val(struct hist_trigger_data *hist_data)
  241. {
  242. hist_data->fields[HITCOUNT_IDX] =
  243. create_hist_field(NULL, HIST_FIELD_FL_HITCOUNT);
  244. if (!hist_data->fields[HITCOUNT_IDX])
  245. return -ENOMEM;
  246. hist_data->n_vals++;
  247. if (WARN_ON(hist_data->n_vals > TRACING_MAP_VALS_MAX))
  248. return -EINVAL;
  249. return 0;
  250. }
  251. static int create_val_field(struct hist_trigger_data *hist_data,
  252. unsigned int val_idx,
  253. struct trace_event_file *file,
  254. char *field_str)
  255. {
  256. struct ftrace_event_field *field = NULL;
  257. unsigned long flags = 0;
  258. int ret = 0;
  259. if (WARN_ON(val_idx >= TRACING_MAP_VALS_MAX))
  260. return -EINVAL;
  261. field = trace_find_event_field(file->event_call, field_str);
  262. if (!field) {
  263. ret = -EINVAL;
  264. goto out;
  265. }
  266. hist_data->fields[val_idx] = create_hist_field(field, flags);
  267. if (!hist_data->fields[val_idx]) {
  268. ret = -ENOMEM;
  269. goto out;
  270. }
  271. ++hist_data->n_vals;
  272. if (WARN_ON(hist_data->n_vals > TRACING_MAP_VALS_MAX))
  273. ret = -EINVAL;
  274. out:
  275. return ret;
  276. }
  277. static int create_val_fields(struct hist_trigger_data *hist_data,
  278. struct trace_event_file *file)
  279. {
  280. char *fields_str, *field_str;
  281. unsigned int i, j;
  282. int ret;
  283. ret = create_hitcount_val(hist_data);
  284. if (ret)
  285. goto out;
  286. fields_str = hist_data->attrs->vals_str;
  287. if (!fields_str)
  288. goto out;
  289. strsep(&fields_str, "=");
  290. if (!fields_str)
  291. goto out;
  292. for (i = 0, j = 1; i < TRACING_MAP_VALS_MAX &&
  293. j < TRACING_MAP_VALS_MAX; i++) {
  294. field_str = strsep(&fields_str, ",");
  295. if (!field_str)
  296. break;
  297. if (strcmp(field_str, "hitcount") == 0)
  298. continue;
  299. ret = create_val_field(hist_data, j++, file, field_str);
  300. if (ret)
  301. goto out;
  302. }
  303. if (fields_str && (strcmp(fields_str, "hitcount") != 0))
  304. ret = -EINVAL;
  305. out:
  306. return ret;
  307. }
  308. static int create_key_field(struct hist_trigger_data *hist_data,
  309. unsigned int key_idx,
  310. unsigned int key_offset,
  311. struct trace_event_file *file,
  312. char *field_str)
  313. {
  314. struct ftrace_event_field *field = NULL;
  315. unsigned long flags = 0;
  316. unsigned int key_size;
  317. int ret = 0;
  318. if (WARN_ON(key_idx >= TRACING_MAP_FIELDS_MAX))
  319. return -EINVAL;
  320. flags |= HIST_FIELD_FL_KEY;
  321. field = trace_find_event_field(file->event_call, field_str);
  322. if (!field) {
  323. ret = -EINVAL;
  324. goto out;
  325. }
  326. key_size = field->size;
  327. hist_data->fields[key_idx] = create_hist_field(field, flags);
  328. if (!hist_data->fields[key_idx]) {
  329. ret = -ENOMEM;
  330. goto out;
  331. }
  332. key_size = ALIGN(key_size, sizeof(u64));
  333. hist_data->fields[key_idx]->size = key_size;
  334. hist_data->fields[key_idx]->offset = key_offset;
  335. hist_data->key_size += key_size;
  336. if (hist_data->key_size > HIST_KEY_SIZE_MAX) {
  337. ret = -EINVAL;
  338. goto out;
  339. }
  340. hist_data->n_keys++;
  341. if (WARN_ON(hist_data->n_keys > TRACING_MAP_KEYS_MAX))
  342. return -EINVAL;
  343. ret = key_size;
  344. out:
  345. return ret;
  346. }
  347. static int create_key_fields(struct hist_trigger_data *hist_data,
  348. struct trace_event_file *file)
  349. {
  350. unsigned int i, key_offset = 0, n_vals = hist_data->n_vals;
  351. char *fields_str, *field_str;
  352. int ret = -EINVAL;
  353. fields_str = hist_data->attrs->keys_str;
  354. if (!fields_str)
  355. goto out;
  356. strsep(&fields_str, "=");
  357. if (!fields_str)
  358. goto out;
  359. for (i = n_vals; i < n_vals + TRACING_MAP_KEYS_MAX; i++) {
  360. field_str = strsep(&fields_str, ",");
  361. if (!field_str)
  362. break;
  363. ret = create_key_field(hist_data, i, key_offset,
  364. file, field_str);
  365. if (ret < 0)
  366. goto out;
  367. key_offset += ret;
  368. }
  369. if (fields_str) {
  370. ret = -EINVAL;
  371. goto out;
  372. }
  373. ret = 0;
  374. out:
  375. return ret;
  376. }
  377. static int create_hist_fields(struct hist_trigger_data *hist_data,
  378. struct trace_event_file *file)
  379. {
  380. int ret;
  381. ret = create_val_fields(hist_data, file);
  382. if (ret)
  383. goto out;
  384. ret = create_key_fields(hist_data, file);
  385. if (ret)
  386. goto out;
  387. hist_data->n_fields = hist_data->n_vals + hist_data->n_keys;
  388. out:
  389. return ret;
  390. }
  391. static int is_descending(const char *str)
  392. {
  393. if (!str)
  394. return 0;
  395. if (strcmp(str, "descending") == 0)
  396. return 1;
  397. if (strcmp(str, "ascending") == 0)
  398. return 0;
  399. return -EINVAL;
  400. }
  401. static int create_sort_keys(struct hist_trigger_data *hist_data)
  402. {
  403. char *fields_str = hist_data->attrs->sort_key_str;
  404. struct ftrace_event_field *field = NULL;
  405. struct tracing_map_sort_key *sort_key;
  406. int descending, ret = 0;
  407. unsigned int i, j;
  408. hist_data->n_sort_keys = 1; /* we always have at least one, hitcount */
  409. if (!fields_str)
  410. goto out;
  411. strsep(&fields_str, "=");
  412. if (!fields_str) {
  413. ret = -EINVAL;
  414. goto out;
  415. }
  416. for (i = 0; i < TRACING_MAP_SORT_KEYS_MAX; i++) {
  417. char *field_str, *field_name;
  418. sort_key = &hist_data->sort_keys[i];
  419. field_str = strsep(&fields_str, ",");
  420. if (!field_str) {
  421. if (i == 0)
  422. ret = -EINVAL;
  423. break;
  424. }
  425. if ((i == TRACING_MAP_SORT_KEYS_MAX - 1) && fields_str) {
  426. ret = -EINVAL;
  427. break;
  428. }
  429. field_name = strsep(&field_str, ".");
  430. if (!field_name) {
  431. ret = -EINVAL;
  432. break;
  433. }
  434. if (strcmp(field_name, "hitcount") == 0) {
  435. descending = is_descending(field_str);
  436. if (descending < 0) {
  437. ret = descending;
  438. break;
  439. }
  440. sort_key->descending = descending;
  441. continue;
  442. }
  443. for (j = 1; j < hist_data->n_fields; j++) {
  444. field = hist_data->fields[j]->field;
  445. if (field && (strcmp(field_name, field->name) == 0)) {
  446. sort_key->field_idx = j;
  447. descending = is_descending(field_str);
  448. if (descending < 0) {
  449. ret = descending;
  450. goto out;
  451. }
  452. sort_key->descending = descending;
  453. break;
  454. }
  455. }
  456. if (j == hist_data->n_fields) {
  457. ret = -EINVAL;
  458. break;
  459. }
  460. }
  461. hist_data->n_sort_keys = i;
  462. out:
  463. return ret;
  464. }
  465. static void destroy_hist_data(struct hist_trigger_data *hist_data)
  466. {
  467. destroy_hist_trigger_attrs(hist_data->attrs);
  468. destroy_hist_fields(hist_data);
  469. tracing_map_destroy(hist_data->map);
  470. kfree(hist_data);
  471. }
  472. static int create_tracing_map_fields(struct hist_trigger_data *hist_data)
  473. {
  474. struct tracing_map *map = hist_data->map;
  475. struct ftrace_event_field *field;
  476. struct hist_field *hist_field;
  477. unsigned int i, idx;
  478. for_each_hist_field(i, hist_data) {
  479. hist_field = hist_data->fields[i];
  480. if (hist_field->flags & HIST_FIELD_FL_KEY) {
  481. tracing_map_cmp_fn_t cmp_fn;
  482. field = hist_field->field;
  483. if (is_string_field(field))
  484. cmp_fn = tracing_map_cmp_string;
  485. else
  486. cmp_fn = tracing_map_cmp_num(field->size,
  487. field->is_signed);
  488. idx = tracing_map_add_key_field(map,
  489. hist_field->offset,
  490. cmp_fn);
  491. } else
  492. idx = tracing_map_add_sum_field(map);
  493. if (idx < 0)
  494. return idx;
  495. }
  496. return 0;
  497. }
  498. static struct hist_trigger_data *
  499. create_hist_data(unsigned int map_bits,
  500. struct hist_trigger_attrs *attrs,
  501. struct trace_event_file *file)
  502. {
  503. struct hist_trigger_data *hist_data;
  504. int ret = 0;
  505. hist_data = kzalloc(sizeof(*hist_data), GFP_KERNEL);
  506. if (!hist_data)
  507. return ERR_PTR(-ENOMEM);
  508. hist_data->attrs = attrs;
  509. ret = create_hist_fields(hist_data, file);
  510. if (ret)
  511. goto free;
  512. ret = create_sort_keys(hist_data);
  513. if (ret)
  514. goto free;
  515. hist_data->map = tracing_map_create(map_bits, hist_data->key_size,
  516. NULL, hist_data);
  517. if (IS_ERR(hist_data->map)) {
  518. ret = PTR_ERR(hist_data->map);
  519. hist_data->map = NULL;
  520. goto free;
  521. }
  522. ret = create_tracing_map_fields(hist_data);
  523. if (ret)
  524. goto free;
  525. ret = tracing_map_init(hist_data->map);
  526. if (ret)
  527. goto free;
  528. hist_data->event_file = file;
  529. out:
  530. return hist_data;
  531. free:
  532. hist_data->attrs = NULL;
  533. destroy_hist_data(hist_data);
  534. hist_data = ERR_PTR(ret);
  535. goto out;
  536. }
  537. static void hist_trigger_elt_update(struct hist_trigger_data *hist_data,
  538. struct tracing_map_elt *elt,
  539. void *rec)
  540. {
  541. struct hist_field *hist_field;
  542. unsigned int i;
  543. u64 hist_val;
  544. for_each_hist_val_field(i, hist_data) {
  545. hist_field = hist_data->fields[i];
  546. hist_val = hist_field->fn(hist_field, rec);
  547. tracing_map_update_sum(elt, i, hist_val);
  548. }
  549. }
  550. static void event_hist_trigger(struct event_trigger_data *data, void *rec)
  551. {
  552. struct hist_trigger_data *hist_data = data->private_data;
  553. char compound_key[HIST_KEY_SIZE_MAX];
  554. struct hist_field *key_field;
  555. struct tracing_map_elt *elt;
  556. u64 field_contents;
  557. void *key = NULL;
  558. unsigned int i;
  559. if (hist_data->n_keys > 1)
  560. memset(compound_key, 0, hist_data->key_size);
  561. for_each_hist_key_field(i, hist_data) {
  562. key_field = hist_data->fields[i];
  563. field_contents = key_field->fn(key_field, rec);
  564. if (key_field->flags & HIST_FIELD_FL_STRING)
  565. key = (void *)(unsigned long)field_contents;
  566. else
  567. key = (void *)&field_contents;
  568. if (hist_data->n_keys > 1) {
  569. memcpy(compound_key + key_field->offset, key,
  570. key_field->size);
  571. }
  572. }
  573. if (hist_data->n_keys > 1)
  574. key = compound_key;
  575. elt = tracing_map_insert(hist_data->map, key);
  576. if (elt)
  577. hist_trigger_elt_update(hist_data, elt, rec);
  578. }
  579. static void
  580. hist_trigger_entry_print(struct seq_file *m,
  581. struct hist_trigger_data *hist_data, void *key,
  582. struct tracing_map_elt *elt)
  583. {
  584. struct hist_field *key_field;
  585. unsigned int i;
  586. u64 uval;
  587. seq_puts(m, "{ ");
  588. for_each_hist_key_field(i, hist_data) {
  589. key_field = hist_data->fields[i];
  590. if (i > hist_data->n_vals)
  591. seq_puts(m, ", ");
  592. if (key_field->flags & HIST_FIELD_FL_STRING) {
  593. seq_printf(m, "%s: %-50s", key_field->field->name,
  594. (char *)(key + key_field->offset));
  595. } else {
  596. uval = *(u64 *)(key + key_field->offset);
  597. seq_printf(m, "%s: %10llu", key_field->field->name,
  598. uval);
  599. }
  600. }
  601. seq_puts(m, " }");
  602. seq_printf(m, " hitcount: %10llu",
  603. tracing_map_read_sum(elt, HITCOUNT_IDX));
  604. for (i = 1; i < hist_data->n_vals; i++) {
  605. seq_printf(m, " %s: %10llu",
  606. hist_data->fields[i]->field->name,
  607. tracing_map_read_sum(elt, i));
  608. }
  609. seq_puts(m, "\n");
  610. }
  611. static int print_entries(struct seq_file *m,
  612. struct hist_trigger_data *hist_data)
  613. {
  614. struct tracing_map_sort_entry **sort_entries = NULL;
  615. struct tracing_map *map = hist_data->map;
  616. unsigned int i, n_entries;
  617. n_entries = tracing_map_sort_entries(map, hist_data->sort_keys,
  618. hist_data->n_sort_keys,
  619. &sort_entries);
  620. if (n_entries < 0)
  621. return n_entries;
  622. for (i = 0; i < n_entries; i++)
  623. hist_trigger_entry_print(m, hist_data,
  624. sort_entries[i]->key,
  625. sort_entries[i]->elt);
  626. tracing_map_destroy_sort_entries(sort_entries, n_entries);
  627. return n_entries;
  628. }
  629. static int hist_show(struct seq_file *m, void *v)
  630. {
  631. struct event_trigger_data *test, *data = NULL;
  632. struct trace_event_file *event_file;
  633. struct hist_trigger_data *hist_data;
  634. int n_entries, ret = 0;
  635. mutex_lock(&event_mutex);
  636. event_file = event_file_data(m->private);
  637. if (unlikely(!event_file)) {
  638. ret = -ENODEV;
  639. goto out_unlock;
  640. }
  641. list_for_each_entry_rcu(test, &event_file->triggers, list) {
  642. if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
  643. data = test;
  644. break;
  645. }
  646. }
  647. if (!data)
  648. goto out_unlock;
  649. seq_puts(m, "# event histogram\n#\n# trigger info: ");
  650. data->ops->print(m, data->ops, data);
  651. seq_puts(m, "\n");
  652. hist_data = data->private_data;
  653. n_entries = print_entries(m, hist_data);
  654. if (n_entries < 0) {
  655. ret = n_entries;
  656. n_entries = 0;
  657. }
  658. seq_printf(m, "\nTotals:\n Hits: %llu\n Entries: %u\n Dropped: %llu\n",
  659. (u64)atomic64_read(&hist_data->map->hits),
  660. n_entries, (u64)atomic64_read(&hist_data->map->drops));
  661. out_unlock:
  662. mutex_unlock(&event_mutex);
  663. return ret;
  664. }
  665. static int event_hist_open(struct inode *inode, struct file *file)
  666. {
  667. return single_open(file, hist_show, file);
  668. }
  669. const struct file_operations event_hist_fops = {
  670. .open = event_hist_open,
  671. .read = seq_read,
  672. .llseek = seq_lseek,
  673. .release = single_release,
  674. };
  675. static void hist_field_print(struct seq_file *m, struct hist_field *hist_field)
  676. {
  677. seq_printf(m, "%s", hist_field->field->name);
  678. }
  679. static int event_hist_trigger_print(struct seq_file *m,
  680. struct event_trigger_ops *ops,
  681. struct event_trigger_data *data)
  682. {
  683. struct hist_trigger_data *hist_data = data->private_data;
  684. struct hist_field *key_field;
  685. unsigned int i;
  686. seq_puts(m, "hist:keys=");
  687. for_each_hist_key_field(i, hist_data) {
  688. key_field = hist_data->fields[i];
  689. if (i > hist_data->n_vals)
  690. seq_puts(m, ",");
  691. hist_field_print(m, key_field);
  692. }
  693. seq_puts(m, ":vals=");
  694. for_each_hist_val_field(i, hist_data) {
  695. if (i == HITCOUNT_IDX)
  696. seq_puts(m, "hitcount");
  697. else {
  698. seq_puts(m, ",");
  699. hist_field_print(m, hist_data->fields[i]);
  700. }
  701. }
  702. seq_puts(m, ":sort=");
  703. for (i = 0; i < hist_data->n_sort_keys; i++) {
  704. struct tracing_map_sort_key *sort_key;
  705. sort_key = &hist_data->sort_keys[i];
  706. if (i > 0)
  707. seq_puts(m, ",");
  708. if (sort_key->field_idx == HITCOUNT_IDX)
  709. seq_puts(m, "hitcount");
  710. else {
  711. unsigned int idx = sort_key->field_idx;
  712. if (WARN_ON(idx >= TRACING_MAP_FIELDS_MAX))
  713. return -EINVAL;
  714. hist_field_print(m, hist_data->fields[idx]);
  715. }
  716. if (sort_key->descending)
  717. seq_puts(m, ".descending");
  718. }
  719. seq_printf(m, ":size=%u", (1 << hist_data->map->map_bits));
  720. if (data->filter_str)
  721. seq_printf(m, " if %s", data->filter_str);
  722. if (data->paused)
  723. seq_puts(m, " [paused]");
  724. else
  725. seq_puts(m, " [active]");
  726. seq_putc(m, '\n');
  727. return 0;
  728. }
  729. static void event_hist_trigger_free(struct event_trigger_ops *ops,
  730. struct event_trigger_data *data)
  731. {
  732. struct hist_trigger_data *hist_data = data->private_data;
  733. if (WARN_ON_ONCE(data->ref <= 0))
  734. return;
  735. data->ref--;
  736. if (!data->ref) {
  737. trigger_data_free(data);
  738. destroy_hist_data(hist_data);
  739. }
  740. }
  741. static struct event_trigger_ops event_hist_trigger_ops = {
  742. .func = event_hist_trigger,
  743. .print = event_hist_trigger_print,
  744. .init = event_trigger_init,
  745. .free = event_hist_trigger_free,
  746. };
  747. static struct event_trigger_ops *event_hist_get_trigger_ops(char *cmd,
  748. char *param)
  749. {
  750. return &event_hist_trigger_ops;
  751. }
  752. static void hist_clear(struct event_trigger_data *data)
  753. {
  754. struct hist_trigger_data *hist_data = data->private_data;
  755. bool paused;
  756. paused = data->paused;
  757. data->paused = true;
  758. synchronize_sched();
  759. tracing_map_clear(hist_data->map);
  760. data->paused = paused;
  761. }
  762. static int hist_register_trigger(char *glob, struct event_trigger_ops *ops,
  763. struct event_trigger_data *data,
  764. struct trace_event_file *file)
  765. {
  766. struct hist_trigger_data *hist_data = data->private_data;
  767. struct event_trigger_data *test;
  768. int ret = 0;
  769. list_for_each_entry_rcu(test, &file->triggers, list) {
  770. if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
  771. if (hist_data->attrs->pause)
  772. test->paused = true;
  773. else if (hist_data->attrs->cont)
  774. test->paused = false;
  775. else if (hist_data->attrs->clear)
  776. hist_clear(test);
  777. else
  778. ret = -EEXIST;
  779. goto out;
  780. }
  781. }
  782. if (hist_data->attrs->cont || hist_data->attrs->clear) {
  783. ret = -ENOENT;
  784. goto out;
  785. }
  786. if (hist_data->attrs->pause)
  787. data->paused = true;
  788. if (data->ops->init) {
  789. ret = data->ops->init(data->ops, data);
  790. if (ret < 0)
  791. goto out;
  792. }
  793. list_add_rcu(&data->list, &file->triggers);
  794. ret++;
  795. update_cond_flag(file);
  796. if (trace_event_trigger_enable_disable(file, 1) < 0) {
  797. list_del_rcu(&data->list);
  798. update_cond_flag(file);
  799. ret--;
  800. }
  801. out:
  802. return ret;
  803. }
  804. static int event_hist_trigger_func(struct event_command *cmd_ops,
  805. struct trace_event_file *file,
  806. char *glob, char *cmd, char *param)
  807. {
  808. unsigned int hist_trigger_bits = TRACING_MAP_BITS_DEFAULT;
  809. struct event_trigger_data *trigger_data;
  810. struct hist_trigger_attrs *attrs;
  811. struct event_trigger_ops *trigger_ops;
  812. struct hist_trigger_data *hist_data;
  813. char *trigger;
  814. int ret = 0;
  815. if (!param)
  816. return -EINVAL;
  817. /* separate the trigger from the filter (k:v [if filter]) */
  818. trigger = strsep(&param, " \t");
  819. if (!trigger)
  820. return -EINVAL;
  821. attrs = parse_hist_trigger_attrs(trigger);
  822. if (IS_ERR(attrs))
  823. return PTR_ERR(attrs);
  824. if (attrs->map_bits)
  825. hist_trigger_bits = attrs->map_bits;
  826. hist_data = create_hist_data(hist_trigger_bits, attrs, file);
  827. if (IS_ERR(hist_data)) {
  828. destroy_hist_trigger_attrs(attrs);
  829. return PTR_ERR(hist_data);
  830. }
  831. trigger_ops = cmd_ops->get_trigger_ops(cmd, trigger);
  832. ret = -ENOMEM;
  833. trigger_data = kzalloc(sizeof(*trigger_data), GFP_KERNEL);
  834. if (!trigger_data)
  835. goto out_free;
  836. trigger_data->count = -1;
  837. trigger_data->ops = trigger_ops;
  838. trigger_data->cmd_ops = cmd_ops;
  839. INIT_LIST_HEAD(&trigger_data->list);
  840. RCU_INIT_POINTER(trigger_data->filter, NULL);
  841. trigger_data->private_data = hist_data;
  842. if (glob[0] == '!') {
  843. cmd_ops->unreg(glob+1, trigger_ops, trigger_data, file);
  844. ret = 0;
  845. goto out_free;
  846. }
  847. if (!param) /* if param is non-empty, it's supposed to be a filter */
  848. goto out_reg;
  849. if (!cmd_ops->set_filter)
  850. goto out_reg;
  851. ret = cmd_ops->set_filter(param, trigger_data, file);
  852. if (ret < 0)
  853. goto out_free;
  854. out_reg:
  855. ret = cmd_ops->reg(glob, trigger_ops, trigger_data, file);
  856. /*
  857. * The above returns on success the # of triggers registered,
  858. * but if it didn't register any it returns zero. Consider no
  859. * triggers registered a failure too.
  860. */
  861. if (!ret) {
  862. if (!(attrs->pause || attrs->cont || attrs->clear))
  863. ret = -ENOENT;
  864. goto out_free;
  865. } else if (ret < 0)
  866. goto out_free;
  867. /* Just return zero, not the number of registered triggers */
  868. ret = 0;
  869. out:
  870. return ret;
  871. out_free:
  872. if (cmd_ops->set_filter)
  873. cmd_ops->set_filter(NULL, trigger_data, NULL);
  874. kfree(trigger_data);
  875. destroy_hist_data(hist_data);
  876. goto out;
  877. }
  878. static struct event_command trigger_hist_cmd = {
  879. .name = "hist",
  880. .trigger_type = ETT_EVENT_HIST,
  881. .flags = EVENT_CMD_FL_NEEDS_REC,
  882. .func = event_hist_trigger_func,
  883. .reg = hist_register_trigger,
  884. .unreg = unregister_trigger,
  885. .get_trigger_ops = event_hist_get_trigger_ops,
  886. .set_filter = set_trigger_filter,
  887. };
  888. __init int register_trigger_hist_cmd(void)
  889. {
  890. int ret;
  891. ret = register_event_command(&trigger_hist_cmd);
  892. WARN_ON(ret < 0);
  893. return ret;
  894. }