trace_events_hist.c 42 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800
  1. /*
  2. * trace_events_hist - trace event hist triggers
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License as published by
  6. * the Free Software Foundation; either version 2 of the License, or
  7. * (at your option) any later version.
  8. *
  9. * This program is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. *
  14. * Copyright (C) 2015 Tom Zanussi <tom.zanussi@linux.intel.com>
  15. */
  16. #include <linux/module.h>
  17. #include <linux/kallsyms.h>
  18. #include <linux/mutex.h>
  19. #include <linux/slab.h>
  20. #include <linux/stacktrace.h>
  21. #include <linux/rculist.h>
  22. #include "tracing_map.h"
  23. #include "trace.h"
  24. struct hist_field;
  25. typedef u64 (*hist_field_fn_t) (struct hist_field *field, void *event);
  26. #define HIST_FIELD_OPERANDS_MAX 2
  27. struct hist_field {
  28. struct ftrace_event_field *field;
  29. unsigned long flags;
  30. hist_field_fn_t fn;
  31. unsigned int size;
  32. unsigned int offset;
  33. unsigned int is_signed;
  34. struct hist_field *operands[HIST_FIELD_OPERANDS_MAX];
  35. };
  36. static u64 hist_field_none(struct hist_field *field, void *event)
  37. {
  38. return 0;
  39. }
  40. static u64 hist_field_counter(struct hist_field *field, void *event)
  41. {
  42. return 1;
  43. }
  44. static u64 hist_field_string(struct hist_field *hist_field, void *event)
  45. {
  46. char *addr = (char *)(event + hist_field->field->offset);
  47. return (u64)(unsigned long)addr;
  48. }
  49. static u64 hist_field_dynstring(struct hist_field *hist_field, void *event)
  50. {
  51. u32 str_item = *(u32 *)(event + hist_field->field->offset);
  52. int str_loc = str_item & 0xffff;
  53. char *addr = (char *)(event + str_loc);
  54. return (u64)(unsigned long)addr;
  55. }
  56. static u64 hist_field_pstring(struct hist_field *hist_field, void *event)
  57. {
  58. char **addr = (char **)(event + hist_field->field->offset);
  59. return (u64)(unsigned long)*addr;
  60. }
  61. static u64 hist_field_log2(struct hist_field *hist_field, void *event)
  62. {
  63. struct hist_field *operand = hist_field->operands[0];
  64. u64 val = operand->fn(operand, event);
  65. return (u64) ilog2(roundup_pow_of_two(val));
  66. }
  67. #define DEFINE_HIST_FIELD_FN(type) \
  68. static u64 hist_field_##type(struct hist_field *hist_field, void *event)\
  69. { \
  70. type *addr = (type *)(event + hist_field->field->offset); \
  71. \
  72. return (u64)(unsigned long)*addr; \
  73. }
  74. DEFINE_HIST_FIELD_FN(s64);
  75. DEFINE_HIST_FIELD_FN(u64);
  76. DEFINE_HIST_FIELD_FN(s32);
  77. DEFINE_HIST_FIELD_FN(u32);
  78. DEFINE_HIST_FIELD_FN(s16);
  79. DEFINE_HIST_FIELD_FN(u16);
  80. DEFINE_HIST_FIELD_FN(s8);
  81. DEFINE_HIST_FIELD_FN(u8);
  82. #define for_each_hist_field(i, hist_data) \
  83. for ((i) = 0; (i) < (hist_data)->n_fields; (i)++)
  84. #define for_each_hist_val_field(i, hist_data) \
  85. for ((i) = 0; (i) < (hist_data)->n_vals; (i)++)
  86. #define for_each_hist_key_field(i, hist_data) \
  87. for ((i) = (hist_data)->n_vals; (i) < (hist_data)->n_fields; (i)++)
  88. #define HIST_STACKTRACE_DEPTH 16
  89. #define HIST_STACKTRACE_SIZE (HIST_STACKTRACE_DEPTH * sizeof(unsigned long))
  90. #define HIST_STACKTRACE_SKIP 5
  91. #define HITCOUNT_IDX 0
  92. #define HIST_KEY_SIZE_MAX (MAX_FILTER_STR_VAL + HIST_STACKTRACE_SIZE)
  93. enum hist_field_flags {
  94. HIST_FIELD_FL_HITCOUNT = 1 << 0,
  95. HIST_FIELD_FL_KEY = 1 << 1,
  96. HIST_FIELD_FL_STRING = 1 << 2,
  97. HIST_FIELD_FL_HEX = 1 << 3,
  98. HIST_FIELD_FL_SYM = 1 << 4,
  99. HIST_FIELD_FL_SYM_OFFSET = 1 << 5,
  100. HIST_FIELD_FL_EXECNAME = 1 << 6,
  101. HIST_FIELD_FL_SYSCALL = 1 << 7,
  102. HIST_FIELD_FL_STACKTRACE = 1 << 8,
  103. HIST_FIELD_FL_LOG2 = 1 << 9,
  104. };
  105. struct hist_trigger_attrs {
  106. char *keys_str;
  107. char *vals_str;
  108. char *sort_key_str;
  109. char *name;
  110. bool pause;
  111. bool cont;
  112. bool clear;
  113. unsigned int map_bits;
  114. };
  115. struct hist_trigger_data {
  116. struct hist_field *fields[TRACING_MAP_FIELDS_MAX];
  117. unsigned int n_vals;
  118. unsigned int n_keys;
  119. unsigned int n_fields;
  120. unsigned int key_size;
  121. struct tracing_map_sort_key sort_keys[TRACING_MAP_SORT_KEYS_MAX];
  122. unsigned int n_sort_keys;
  123. struct trace_event_file *event_file;
  124. struct hist_trigger_attrs *attrs;
  125. struct tracing_map *map;
  126. };
  127. static const char *hist_field_name(struct hist_field *field,
  128. unsigned int level)
  129. {
  130. const char *field_name = "";
  131. if (level > 1)
  132. return field_name;
  133. if (field->field)
  134. field_name = field->field->name;
  135. else if (field->flags & HIST_FIELD_FL_LOG2)
  136. field_name = hist_field_name(field->operands[0], ++level);
  137. if (field_name == NULL)
  138. field_name = "";
  139. return field_name;
  140. }
  141. static hist_field_fn_t select_value_fn(int field_size, int field_is_signed)
  142. {
  143. hist_field_fn_t fn = NULL;
  144. switch (field_size) {
  145. case 8:
  146. if (field_is_signed)
  147. fn = hist_field_s64;
  148. else
  149. fn = hist_field_u64;
  150. break;
  151. case 4:
  152. if (field_is_signed)
  153. fn = hist_field_s32;
  154. else
  155. fn = hist_field_u32;
  156. break;
  157. case 2:
  158. if (field_is_signed)
  159. fn = hist_field_s16;
  160. else
  161. fn = hist_field_u16;
  162. break;
  163. case 1:
  164. if (field_is_signed)
  165. fn = hist_field_s8;
  166. else
  167. fn = hist_field_u8;
  168. break;
  169. }
  170. return fn;
  171. }
  172. static int parse_map_size(char *str)
  173. {
  174. unsigned long size, map_bits;
  175. int ret;
  176. strsep(&str, "=");
  177. if (!str) {
  178. ret = -EINVAL;
  179. goto out;
  180. }
  181. ret = kstrtoul(str, 0, &size);
  182. if (ret)
  183. goto out;
  184. map_bits = ilog2(roundup_pow_of_two(size));
  185. if (map_bits < TRACING_MAP_BITS_MIN ||
  186. map_bits > TRACING_MAP_BITS_MAX)
  187. ret = -EINVAL;
  188. else
  189. ret = map_bits;
  190. out:
  191. return ret;
  192. }
  193. static void destroy_hist_trigger_attrs(struct hist_trigger_attrs *attrs)
  194. {
  195. if (!attrs)
  196. return;
  197. kfree(attrs->name);
  198. kfree(attrs->sort_key_str);
  199. kfree(attrs->keys_str);
  200. kfree(attrs->vals_str);
  201. kfree(attrs);
  202. }
  203. static struct hist_trigger_attrs *parse_hist_trigger_attrs(char *trigger_str)
  204. {
  205. struct hist_trigger_attrs *attrs;
  206. int ret = 0;
  207. attrs = kzalloc(sizeof(*attrs), GFP_KERNEL);
  208. if (!attrs)
  209. return ERR_PTR(-ENOMEM);
  210. while (trigger_str) {
  211. char *str = strsep(&trigger_str, ":");
  212. if ((strncmp(str, "key=", strlen("key=")) == 0) ||
  213. (strncmp(str, "keys=", strlen("keys=")) == 0))
  214. attrs->keys_str = kstrdup(str, GFP_KERNEL);
  215. else if ((strncmp(str, "val=", strlen("val=")) == 0) ||
  216. (strncmp(str, "vals=", strlen("vals=")) == 0) ||
  217. (strncmp(str, "values=", strlen("values=")) == 0))
  218. attrs->vals_str = kstrdup(str, GFP_KERNEL);
  219. else if (strncmp(str, "sort=", strlen("sort=")) == 0)
  220. attrs->sort_key_str = kstrdup(str, GFP_KERNEL);
  221. else if (strncmp(str, "name=", strlen("name=")) == 0)
  222. attrs->name = kstrdup(str, GFP_KERNEL);
  223. else if (strcmp(str, "pause") == 0)
  224. attrs->pause = true;
  225. else if ((strcmp(str, "cont") == 0) ||
  226. (strcmp(str, "continue") == 0))
  227. attrs->cont = true;
  228. else if (strcmp(str, "clear") == 0)
  229. attrs->clear = true;
  230. else if (strncmp(str, "size=", strlen("size=")) == 0) {
  231. int map_bits = parse_map_size(str);
  232. if (map_bits < 0) {
  233. ret = map_bits;
  234. goto free;
  235. }
  236. attrs->map_bits = map_bits;
  237. } else {
  238. ret = -EINVAL;
  239. goto free;
  240. }
  241. }
  242. if (!attrs->keys_str) {
  243. ret = -EINVAL;
  244. goto free;
  245. }
  246. return attrs;
  247. free:
  248. destroy_hist_trigger_attrs(attrs);
  249. return ERR_PTR(ret);
  250. }
  251. static inline void save_comm(char *comm, struct task_struct *task)
  252. {
  253. if (!task->pid) {
  254. strcpy(comm, "<idle>");
  255. return;
  256. }
  257. if (WARN_ON_ONCE(task->pid < 0)) {
  258. strcpy(comm, "<XXX>");
  259. return;
  260. }
  261. memcpy(comm, task->comm, TASK_COMM_LEN);
  262. }
  263. static void hist_trigger_elt_comm_free(struct tracing_map_elt *elt)
  264. {
  265. kfree((char *)elt->private_data);
  266. }
  267. static int hist_trigger_elt_comm_alloc(struct tracing_map_elt *elt)
  268. {
  269. struct hist_trigger_data *hist_data = elt->map->private_data;
  270. struct hist_field *key_field;
  271. unsigned int i;
  272. for_each_hist_key_field(i, hist_data) {
  273. key_field = hist_data->fields[i];
  274. if (key_field->flags & HIST_FIELD_FL_EXECNAME) {
  275. unsigned int size = TASK_COMM_LEN + 1;
  276. elt->private_data = kzalloc(size, GFP_KERNEL);
  277. if (!elt->private_data)
  278. return -ENOMEM;
  279. break;
  280. }
  281. }
  282. return 0;
  283. }
  284. static void hist_trigger_elt_comm_copy(struct tracing_map_elt *to,
  285. struct tracing_map_elt *from)
  286. {
  287. char *comm_from = from->private_data;
  288. char *comm_to = to->private_data;
  289. if (comm_from)
  290. memcpy(comm_to, comm_from, TASK_COMM_LEN + 1);
  291. }
  292. static void hist_trigger_elt_comm_init(struct tracing_map_elt *elt)
  293. {
  294. char *comm = elt->private_data;
  295. if (comm)
  296. save_comm(comm, current);
  297. }
  298. static const struct tracing_map_ops hist_trigger_elt_comm_ops = {
  299. .elt_alloc = hist_trigger_elt_comm_alloc,
  300. .elt_copy = hist_trigger_elt_comm_copy,
  301. .elt_free = hist_trigger_elt_comm_free,
  302. .elt_init = hist_trigger_elt_comm_init,
  303. };
  304. static void destroy_hist_field(struct hist_field *hist_field,
  305. unsigned int level)
  306. {
  307. unsigned int i;
  308. if (level > 2)
  309. return;
  310. if (!hist_field)
  311. return;
  312. for (i = 0; i < HIST_FIELD_OPERANDS_MAX; i++)
  313. destroy_hist_field(hist_field->operands[i], level + 1);
  314. kfree(hist_field);
  315. }
  316. static struct hist_field *create_hist_field(struct ftrace_event_field *field,
  317. unsigned long flags)
  318. {
  319. struct hist_field *hist_field;
  320. if (field && is_function_field(field))
  321. return NULL;
  322. hist_field = kzalloc(sizeof(struct hist_field), GFP_KERNEL);
  323. if (!hist_field)
  324. return NULL;
  325. if (flags & HIST_FIELD_FL_HITCOUNT) {
  326. hist_field->fn = hist_field_counter;
  327. goto out;
  328. }
  329. if (flags & HIST_FIELD_FL_STACKTRACE) {
  330. hist_field->fn = hist_field_none;
  331. goto out;
  332. }
  333. if (flags & HIST_FIELD_FL_LOG2) {
  334. unsigned long fl = flags & ~HIST_FIELD_FL_LOG2;
  335. hist_field->fn = hist_field_log2;
  336. hist_field->operands[0] = create_hist_field(field, fl);
  337. hist_field->size = hist_field->operands[0]->size;
  338. goto out;
  339. }
  340. if (WARN_ON_ONCE(!field))
  341. goto out;
  342. if (is_string_field(field)) {
  343. flags |= HIST_FIELD_FL_STRING;
  344. if (field->filter_type == FILTER_STATIC_STRING)
  345. hist_field->fn = hist_field_string;
  346. else if (field->filter_type == FILTER_DYN_STRING)
  347. hist_field->fn = hist_field_dynstring;
  348. else
  349. hist_field->fn = hist_field_pstring;
  350. } else {
  351. hist_field->fn = select_value_fn(field->size,
  352. field->is_signed);
  353. if (!hist_field->fn) {
  354. destroy_hist_field(hist_field, 0);
  355. return NULL;
  356. }
  357. }
  358. out:
  359. hist_field->field = field;
  360. hist_field->flags = flags;
  361. return hist_field;
  362. }
  363. static void destroy_hist_fields(struct hist_trigger_data *hist_data)
  364. {
  365. unsigned int i;
  366. for (i = 0; i < TRACING_MAP_FIELDS_MAX; i++) {
  367. if (hist_data->fields[i]) {
  368. destroy_hist_field(hist_data->fields[i], 0);
  369. hist_data->fields[i] = NULL;
  370. }
  371. }
  372. }
  373. static int create_hitcount_val(struct hist_trigger_data *hist_data)
  374. {
  375. hist_data->fields[HITCOUNT_IDX] =
  376. create_hist_field(NULL, HIST_FIELD_FL_HITCOUNT);
  377. if (!hist_data->fields[HITCOUNT_IDX])
  378. return -ENOMEM;
  379. hist_data->n_vals++;
  380. if (WARN_ON(hist_data->n_vals > TRACING_MAP_VALS_MAX))
  381. return -EINVAL;
  382. return 0;
  383. }
  384. static int create_val_field(struct hist_trigger_data *hist_data,
  385. unsigned int val_idx,
  386. struct trace_event_file *file,
  387. char *field_str)
  388. {
  389. struct ftrace_event_field *field = NULL;
  390. unsigned long flags = 0;
  391. char *field_name;
  392. int ret = 0;
  393. if (WARN_ON(val_idx >= TRACING_MAP_VALS_MAX))
  394. return -EINVAL;
  395. field_name = strsep(&field_str, ".");
  396. if (field_str) {
  397. if (strcmp(field_str, "hex") == 0)
  398. flags |= HIST_FIELD_FL_HEX;
  399. else {
  400. ret = -EINVAL;
  401. goto out;
  402. }
  403. }
  404. field = trace_find_event_field(file->event_call, field_name);
  405. if (!field || !field->size) {
  406. ret = -EINVAL;
  407. goto out;
  408. }
  409. hist_data->fields[val_idx] = create_hist_field(field, flags);
  410. if (!hist_data->fields[val_idx]) {
  411. ret = -ENOMEM;
  412. goto out;
  413. }
  414. ++hist_data->n_vals;
  415. if (WARN_ON(hist_data->n_vals > TRACING_MAP_VALS_MAX))
  416. ret = -EINVAL;
  417. out:
  418. return ret;
  419. }
  420. static int create_val_fields(struct hist_trigger_data *hist_data,
  421. struct trace_event_file *file)
  422. {
  423. char *fields_str, *field_str;
  424. unsigned int i, j;
  425. int ret;
  426. ret = create_hitcount_val(hist_data);
  427. if (ret)
  428. goto out;
  429. fields_str = hist_data->attrs->vals_str;
  430. if (!fields_str)
  431. goto out;
  432. strsep(&fields_str, "=");
  433. if (!fields_str)
  434. goto out;
  435. for (i = 0, j = 1; i < TRACING_MAP_VALS_MAX &&
  436. j < TRACING_MAP_VALS_MAX; i++) {
  437. field_str = strsep(&fields_str, ",");
  438. if (!field_str)
  439. break;
  440. if (strcmp(field_str, "hitcount") == 0)
  441. continue;
  442. ret = create_val_field(hist_data, j++, file, field_str);
  443. if (ret)
  444. goto out;
  445. }
  446. if (fields_str && (strcmp(fields_str, "hitcount") != 0))
  447. ret = -EINVAL;
  448. out:
  449. return ret;
  450. }
  451. static int create_key_field(struct hist_trigger_data *hist_data,
  452. unsigned int key_idx,
  453. unsigned int key_offset,
  454. struct trace_event_file *file,
  455. char *field_str)
  456. {
  457. struct ftrace_event_field *field = NULL;
  458. unsigned long flags = 0;
  459. unsigned int key_size;
  460. int ret = 0;
  461. if (WARN_ON(key_idx >= TRACING_MAP_FIELDS_MAX))
  462. return -EINVAL;
  463. flags |= HIST_FIELD_FL_KEY;
  464. if (strcmp(field_str, "stacktrace") == 0) {
  465. flags |= HIST_FIELD_FL_STACKTRACE;
  466. key_size = sizeof(unsigned long) * HIST_STACKTRACE_DEPTH;
  467. } else {
  468. char *field_name = strsep(&field_str, ".");
  469. if (field_str) {
  470. if (strcmp(field_str, "hex") == 0)
  471. flags |= HIST_FIELD_FL_HEX;
  472. else if (strcmp(field_str, "sym") == 0)
  473. flags |= HIST_FIELD_FL_SYM;
  474. else if (strcmp(field_str, "sym-offset") == 0)
  475. flags |= HIST_FIELD_FL_SYM_OFFSET;
  476. else if ((strcmp(field_str, "execname") == 0) &&
  477. (strcmp(field_name, "common_pid") == 0))
  478. flags |= HIST_FIELD_FL_EXECNAME;
  479. else if (strcmp(field_str, "syscall") == 0)
  480. flags |= HIST_FIELD_FL_SYSCALL;
  481. else if (strcmp(field_str, "log2") == 0)
  482. flags |= HIST_FIELD_FL_LOG2;
  483. else {
  484. ret = -EINVAL;
  485. goto out;
  486. }
  487. }
  488. field = trace_find_event_field(file->event_call, field_name);
  489. if (!field || !field->size) {
  490. ret = -EINVAL;
  491. goto out;
  492. }
  493. if (is_string_field(field))
  494. key_size = MAX_FILTER_STR_VAL;
  495. else
  496. key_size = field->size;
  497. }
  498. hist_data->fields[key_idx] = create_hist_field(field, flags);
  499. if (!hist_data->fields[key_idx]) {
  500. ret = -ENOMEM;
  501. goto out;
  502. }
  503. key_size = ALIGN(key_size, sizeof(u64));
  504. hist_data->fields[key_idx]->size = key_size;
  505. hist_data->fields[key_idx]->offset = key_offset;
  506. hist_data->key_size += key_size;
  507. if (hist_data->key_size > HIST_KEY_SIZE_MAX) {
  508. ret = -EINVAL;
  509. goto out;
  510. }
  511. hist_data->n_keys++;
  512. if (WARN_ON(hist_data->n_keys > TRACING_MAP_KEYS_MAX))
  513. return -EINVAL;
  514. ret = key_size;
  515. out:
  516. return ret;
  517. }
  518. static int create_key_fields(struct hist_trigger_data *hist_data,
  519. struct trace_event_file *file)
  520. {
  521. unsigned int i, key_offset = 0, n_vals = hist_data->n_vals;
  522. char *fields_str, *field_str;
  523. int ret = -EINVAL;
  524. fields_str = hist_data->attrs->keys_str;
  525. if (!fields_str)
  526. goto out;
  527. strsep(&fields_str, "=");
  528. if (!fields_str)
  529. goto out;
  530. for (i = n_vals; i < n_vals + TRACING_MAP_KEYS_MAX; i++) {
  531. field_str = strsep(&fields_str, ",");
  532. if (!field_str)
  533. break;
  534. ret = create_key_field(hist_data, i, key_offset,
  535. file, field_str);
  536. if (ret < 0)
  537. goto out;
  538. key_offset += ret;
  539. }
  540. if (fields_str) {
  541. ret = -EINVAL;
  542. goto out;
  543. }
  544. ret = 0;
  545. out:
  546. return ret;
  547. }
  548. static int create_hist_fields(struct hist_trigger_data *hist_data,
  549. struct trace_event_file *file)
  550. {
  551. int ret;
  552. ret = create_val_fields(hist_data, file);
  553. if (ret)
  554. goto out;
  555. ret = create_key_fields(hist_data, file);
  556. if (ret)
  557. goto out;
  558. hist_data->n_fields = hist_data->n_vals + hist_data->n_keys;
  559. out:
  560. return ret;
  561. }
  562. static int is_descending(const char *str)
  563. {
  564. if (!str)
  565. return 0;
  566. if (strcmp(str, "descending") == 0)
  567. return 1;
  568. if (strcmp(str, "ascending") == 0)
  569. return 0;
  570. return -EINVAL;
  571. }
  572. static int create_sort_keys(struct hist_trigger_data *hist_data)
  573. {
  574. char *fields_str = hist_data->attrs->sort_key_str;
  575. struct tracing_map_sort_key *sort_key;
  576. int descending, ret = 0;
  577. unsigned int i, j;
  578. hist_data->n_sort_keys = 1; /* we always have at least one, hitcount */
  579. if (!fields_str)
  580. goto out;
  581. strsep(&fields_str, "=");
  582. if (!fields_str) {
  583. ret = -EINVAL;
  584. goto out;
  585. }
  586. for (i = 0; i < TRACING_MAP_SORT_KEYS_MAX; i++) {
  587. struct hist_field *hist_field;
  588. char *field_str, *field_name;
  589. const char *test_name;
  590. sort_key = &hist_data->sort_keys[i];
  591. field_str = strsep(&fields_str, ",");
  592. if (!field_str) {
  593. if (i == 0)
  594. ret = -EINVAL;
  595. break;
  596. }
  597. if ((i == TRACING_MAP_SORT_KEYS_MAX - 1) && fields_str) {
  598. ret = -EINVAL;
  599. break;
  600. }
  601. field_name = strsep(&field_str, ".");
  602. if (!field_name) {
  603. ret = -EINVAL;
  604. break;
  605. }
  606. if (strcmp(field_name, "hitcount") == 0) {
  607. descending = is_descending(field_str);
  608. if (descending < 0) {
  609. ret = descending;
  610. break;
  611. }
  612. sort_key->descending = descending;
  613. continue;
  614. }
  615. for (j = 1; j < hist_data->n_fields; j++) {
  616. hist_field = hist_data->fields[j];
  617. test_name = hist_field_name(hist_field, 0);
  618. if (strcmp(field_name, test_name) == 0) {
  619. sort_key->field_idx = j;
  620. descending = is_descending(field_str);
  621. if (descending < 0) {
  622. ret = descending;
  623. goto out;
  624. }
  625. sort_key->descending = descending;
  626. break;
  627. }
  628. }
  629. if (j == hist_data->n_fields) {
  630. ret = -EINVAL;
  631. break;
  632. }
  633. }
  634. hist_data->n_sort_keys = i;
  635. out:
  636. return ret;
  637. }
  638. static void destroy_hist_data(struct hist_trigger_data *hist_data)
  639. {
  640. destroy_hist_trigger_attrs(hist_data->attrs);
  641. destroy_hist_fields(hist_data);
  642. tracing_map_destroy(hist_data->map);
  643. kfree(hist_data);
  644. }
  645. static int create_tracing_map_fields(struct hist_trigger_data *hist_data)
  646. {
  647. struct tracing_map *map = hist_data->map;
  648. struct ftrace_event_field *field;
  649. struct hist_field *hist_field;
  650. int i, idx;
  651. for_each_hist_field(i, hist_data) {
  652. hist_field = hist_data->fields[i];
  653. if (hist_field->flags & HIST_FIELD_FL_KEY) {
  654. tracing_map_cmp_fn_t cmp_fn;
  655. field = hist_field->field;
  656. if (hist_field->flags & HIST_FIELD_FL_STACKTRACE)
  657. cmp_fn = tracing_map_cmp_none;
  658. else if (is_string_field(field))
  659. cmp_fn = tracing_map_cmp_string;
  660. else
  661. cmp_fn = tracing_map_cmp_num(field->size,
  662. field->is_signed);
  663. idx = tracing_map_add_key_field(map,
  664. hist_field->offset,
  665. cmp_fn);
  666. } else
  667. idx = tracing_map_add_sum_field(map);
  668. if (idx < 0)
  669. return idx;
  670. }
  671. return 0;
  672. }
  673. static bool need_tracing_map_ops(struct hist_trigger_data *hist_data)
  674. {
  675. struct hist_field *key_field;
  676. unsigned int i;
  677. for_each_hist_key_field(i, hist_data) {
  678. key_field = hist_data->fields[i];
  679. if (key_field->flags & HIST_FIELD_FL_EXECNAME)
  680. return true;
  681. }
  682. return false;
  683. }
  684. static struct hist_trigger_data *
  685. create_hist_data(unsigned int map_bits,
  686. struct hist_trigger_attrs *attrs,
  687. struct trace_event_file *file)
  688. {
  689. const struct tracing_map_ops *map_ops = NULL;
  690. struct hist_trigger_data *hist_data;
  691. int ret = 0;
  692. hist_data = kzalloc(sizeof(*hist_data), GFP_KERNEL);
  693. if (!hist_data)
  694. return ERR_PTR(-ENOMEM);
  695. hist_data->attrs = attrs;
  696. ret = create_hist_fields(hist_data, file);
  697. if (ret)
  698. goto free;
  699. ret = create_sort_keys(hist_data);
  700. if (ret)
  701. goto free;
  702. if (need_tracing_map_ops(hist_data))
  703. map_ops = &hist_trigger_elt_comm_ops;
  704. hist_data->map = tracing_map_create(map_bits, hist_data->key_size,
  705. map_ops, hist_data);
  706. if (IS_ERR(hist_data->map)) {
  707. ret = PTR_ERR(hist_data->map);
  708. hist_data->map = NULL;
  709. goto free;
  710. }
  711. ret = create_tracing_map_fields(hist_data);
  712. if (ret)
  713. goto free;
  714. ret = tracing_map_init(hist_data->map);
  715. if (ret)
  716. goto free;
  717. hist_data->event_file = file;
  718. out:
  719. return hist_data;
  720. free:
  721. hist_data->attrs = NULL;
  722. destroy_hist_data(hist_data);
  723. hist_data = ERR_PTR(ret);
  724. goto out;
  725. }
  726. static void hist_trigger_elt_update(struct hist_trigger_data *hist_data,
  727. struct tracing_map_elt *elt,
  728. void *rec)
  729. {
  730. struct hist_field *hist_field;
  731. unsigned int i;
  732. u64 hist_val;
  733. for_each_hist_val_field(i, hist_data) {
  734. hist_field = hist_data->fields[i];
  735. hist_val = hist_field->fn(hist_field, rec);
  736. tracing_map_update_sum(elt, i, hist_val);
  737. }
  738. }
  739. static inline void add_to_key(char *compound_key, void *key,
  740. struct hist_field *key_field, void *rec)
  741. {
  742. size_t size = key_field->size;
  743. if (key_field->flags & HIST_FIELD_FL_STRING) {
  744. struct ftrace_event_field *field;
  745. field = key_field->field;
  746. if (field->filter_type == FILTER_DYN_STRING)
  747. size = *(u32 *)(rec + field->offset) >> 16;
  748. else if (field->filter_type == FILTER_PTR_STRING)
  749. size = strlen(key);
  750. else if (field->filter_type == FILTER_STATIC_STRING)
  751. size = field->size;
  752. /* ensure NULL-termination */
  753. if (size > key_field->size - 1)
  754. size = key_field->size - 1;
  755. }
  756. memcpy(compound_key + key_field->offset, key, size);
  757. }
  758. static void event_hist_trigger(struct event_trigger_data *data, void *rec)
  759. {
  760. struct hist_trigger_data *hist_data = data->private_data;
  761. bool use_compound_key = (hist_data->n_keys > 1);
  762. unsigned long entries[HIST_STACKTRACE_DEPTH];
  763. char compound_key[HIST_KEY_SIZE_MAX];
  764. struct stack_trace stacktrace;
  765. struct hist_field *key_field;
  766. struct tracing_map_elt *elt;
  767. u64 field_contents;
  768. void *key = NULL;
  769. unsigned int i;
  770. memset(compound_key, 0, hist_data->key_size);
  771. for_each_hist_key_field(i, hist_data) {
  772. key_field = hist_data->fields[i];
  773. if (key_field->flags & HIST_FIELD_FL_STACKTRACE) {
  774. stacktrace.max_entries = HIST_STACKTRACE_DEPTH;
  775. stacktrace.entries = entries;
  776. stacktrace.nr_entries = 0;
  777. stacktrace.skip = HIST_STACKTRACE_SKIP;
  778. memset(stacktrace.entries, 0, HIST_STACKTRACE_SIZE);
  779. save_stack_trace(&stacktrace);
  780. key = entries;
  781. } else {
  782. field_contents = key_field->fn(key_field, rec);
  783. if (key_field->flags & HIST_FIELD_FL_STRING) {
  784. key = (void *)(unsigned long)field_contents;
  785. use_compound_key = true;
  786. } else
  787. key = (void *)&field_contents;
  788. }
  789. if (use_compound_key)
  790. add_to_key(compound_key, key, key_field, rec);
  791. }
  792. if (use_compound_key)
  793. key = compound_key;
  794. elt = tracing_map_insert(hist_data->map, key);
  795. if (elt)
  796. hist_trigger_elt_update(hist_data, elt, rec);
  797. }
  798. static void hist_trigger_stacktrace_print(struct seq_file *m,
  799. unsigned long *stacktrace_entries,
  800. unsigned int max_entries)
  801. {
  802. char str[KSYM_SYMBOL_LEN];
  803. unsigned int spaces = 8;
  804. unsigned int i;
  805. for (i = 0; i < max_entries; i++) {
  806. if (stacktrace_entries[i] == ULONG_MAX)
  807. return;
  808. seq_printf(m, "%*c", 1 + spaces, ' ');
  809. sprint_symbol(str, stacktrace_entries[i]);
  810. seq_printf(m, "%s\n", str);
  811. }
  812. }
  813. static void
  814. hist_trigger_entry_print(struct seq_file *m,
  815. struct hist_trigger_data *hist_data, void *key,
  816. struct tracing_map_elt *elt)
  817. {
  818. struct hist_field *key_field;
  819. char str[KSYM_SYMBOL_LEN];
  820. bool multiline = false;
  821. const char *field_name;
  822. unsigned int i;
  823. u64 uval;
  824. seq_puts(m, "{ ");
  825. for_each_hist_key_field(i, hist_data) {
  826. key_field = hist_data->fields[i];
  827. if (i > hist_data->n_vals)
  828. seq_puts(m, ", ");
  829. field_name = hist_field_name(key_field, 0);
  830. if (key_field->flags & HIST_FIELD_FL_HEX) {
  831. uval = *(u64 *)(key + key_field->offset);
  832. seq_printf(m, "%s: %llx", field_name, uval);
  833. } else if (key_field->flags & HIST_FIELD_FL_SYM) {
  834. uval = *(u64 *)(key + key_field->offset);
  835. sprint_symbol_no_offset(str, uval);
  836. seq_printf(m, "%s: [%llx] %-45s", field_name,
  837. uval, str);
  838. } else if (key_field->flags & HIST_FIELD_FL_SYM_OFFSET) {
  839. uval = *(u64 *)(key + key_field->offset);
  840. sprint_symbol(str, uval);
  841. seq_printf(m, "%s: [%llx] %-55s", field_name,
  842. uval, str);
  843. } else if (key_field->flags & HIST_FIELD_FL_EXECNAME) {
  844. char *comm = elt->private_data;
  845. uval = *(u64 *)(key + key_field->offset);
  846. seq_printf(m, "%s: %-16s[%10llu]", field_name,
  847. comm, uval);
  848. } else if (key_field->flags & HIST_FIELD_FL_SYSCALL) {
  849. const char *syscall_name;
  850. uval = *(u64 *)(key + key_field->offset);
  851. syscall_name = get_syscall_name(uval);
  852. if (!syscall_name)
  853. syscall_name = "unknown_syscall";
  854. seq_printf(m, "%s: %-30s[%3llu]", field_name,
  855. syscall_name, uval);
  856. } else if (key_field->flags & HIST_FIELD_FL_STACKTRACE) {
  857. seq_puts(m, "stacktrace:\n");
  858. hist_trigger_stacktrace_print(m,
  859. key + key_field->offset,
  860. HIST_STACKTRACE_DEPTH);
  861. multiline = true;
  862. } else if (key_field->flags & HIST_FIELD_FL_LOG2) {
  863. seq_printf(m, "%s: ~ 2^%-2llu", field_name,
  864. *(u64 *)(key + key_field->offset));
  865. } else if (key_field->flags & HIST_FIELD_FL_STRING) {
  866. seq_printf(m, "%s: %-50s", field_name,
  867. (char *)(key + key_field->offset));
  868. } else {
  869. uval = *(u64 *)(key + key_field->offset);
  870. seq_printf(m, "%s: %10llu", field_name, uval);
  871. }
  872. }
  873. if (!multiline)
  874. seq_puts(m, " ");
  875. seq_puts(m, "}");
  876. seq_printf(m, " hitcount: %10llu",
  877. tracing_map_read_sum(elt, HITCOUNT_IDX));
  878. for (i = 1; i < hist_data->n_vals; i++) {
  879. field_name = hist_field_name(hist_data->fields[i], 0);
  880. if (hist_data->fields[i]->flags & HIST_FIELD_FL_HEX) {
  881. seq_printf(m, " %s: %10llx", field_name,
  882. tracing_map_read_sum(elt, i));
  883. } else {
  884. seq_printf(m, " %s: %10llu", field_name,
  885. tracing_map_read_sum(elt, i));
  886. }
  887. }
  888. seq_puts(m, "\n");
  889. }
  890. static int print_entries(struct seq_file *m,
  891. struct hist_trigger_data *hist_data)
  892. {
  893. struct tracing_map_sort_entry **sort_entries = NULL;
  894. struct tracing_map *map = hist_data->map;
  895. int i, n_entries;
  896. n_entries = tracing_map_sort_entries(map, hist_data->sort_keys,
  897. hist_data->n_sort_keys,
  898. &sort_entries);
  899. if (n_entries < 0)
  900. return n_entries;
  901. for (i = 0; i < n_entries; i++)
  902. hist_trigger_entry_print(m, hist_data,
  903. sort_entries[i]->key,
  904. sort_entries[i]->elt);
  905. tracing_map_destroy_sort_entries(sort_entries, n_entries);
  906. return n_entries;
  907. }
  908. static void hist_trigger_show(struct seq_file *m,
  909. struct event_trigger_data *data, int n)
  910. {
  911. struct hist_trigger_data *hist_data;
  912. int n_entries;
  913. if (n > 0)
  914. seq_puts(m, "\n\n");
  915. seq_puts(m, "# event histogram\n#\n# trigger info: ");
  916. data->ops->print(m, data->ops, data);
  917. seq_puts(m, "#\n\n");
  918. hist_data = data->private_data;
  919. n_entries = print_entries(m, hist_data);
  920. if (n_entries < 0)
  921. n_entries = 0;
  922. seq_printf(m, "\nTotals:\n Hits: %llu\n Entries: %u\n Dropped: %llu\n",
  923. (u64)atomic64_read(&hist_data->map->hits),
  924. n_entries, (u64)atomic64_read(&hist_data->map->drops));
  925. }
  926. static int hist_show(struct seq_file *m, void *v)
  927. {
  928. struct event_trigger_data *data;
  929. struct trace_event_file *event_file;
  930. int n = 0, ret = 0;
  931. mutex_lock(&event_mutex);
  932. event_file = event_file_data(m->private);
  933. if (unlikely(!event_file)) {
  934. ret = -ENODEV;
  935. goto out_unlock;
  936. }
  937. list_for_each_entry_rcu(data, &event_file->triggers, list) {
  938. if (data->cmd_ops->trigger_type == ETT_EVENT_HIST)
  939. hist_trigger_show(m, data, n++);
  940. }
  941. out_unlock:
  942. mutex_unlock(&event_mutex);
  943. return ret;
  944. }
  945. static int event_hist_open(struct inode *inode, struct file *file)
  946. {
  947. return single_open(file, hist_show, file);
  948. }
  949. const struct file_operations event_hist_fops = {
  950. .open = event_hist_open,
  951. .read = seq_read,
  952. .llseek = seq_lseek,
  953. .release = single_release,
  954. };
  955. static const char *get_hist_field_flags(struct hist_field *hist_field)
  956. {
  957. const char *flags_str = NULL;
  958. if (hist_field->flags & HIST_FIELD_FL_HEX)
  959. flags_str = "hex";
  960. else if (hist_field->flags & HIST_FIELD_FL_SYM)
  961. flags_str = "sym";
  962. else if (hist_field->flags & HIST_FIELD_FL_SYM_OFFSET)
  963. flags_str = "sym-offset";
  964. else if (hist_field->flags & HIST_FIELD_FL_EXECNAME)
  965. flags_str = "execname";
  966. else if (hist_field->flags & HIST_FIELD_FL_SYSCALL)
  967. flags_str = "syscall";
  968. else if (hist_field->flags & HIST_FIELD_FL_LOG2)
  969. flags_str = "log2";
  970. return flags_str;
  971. }
  972. static void hist_field_print(struct seq_file *m, struct hist_field *hist_field)
  973. {
  974. const char *field_name = hist_field_name(hist_field, 0);
  975. seq_printf(m, "%s", field_name);
  976. if (hist_field->flags) {
  977. const char *flags_str = get_hist_field_flags(hist_field);
  978. if (flags_str)
  979. seq_printf(m, ".%s", flags_str);
  980. }
  981. }
  982. static int event_hist_trigger_print(struct seq_file *m,
  983. struct event_trigger_ops *ops,
  984. struct event_trigger_data *data)
  985. {
  986. struct hist_trigger_data *hist_data = data->private_data;
  987. struct hist_field *key_field;
  988. unsigned int i;
  989. seq_puts(m, "hist:");
  990. if (data->name)
  991. seq_printf(m, "%s:", data->name);
  992. seq_puts(m, "keys=");
  993. for_each_hist_key_field(i, hist_data) {
  994. key_field = hist_data->fields[i];
  995. if (i > hist_data->n_vals)
  996. seq_puts(m, ",");
  997. if (key_field->flags & HIST_FIELD_FL_STACKTRACE)
  998. seq_puts(m, "stacktrace");
  999. else
  1000. hist_field_print(m, key_field);
  1001. }
  1002. seq_puts(m, ":vals=");
  1003. for_each_hist_val_field(i, hist_data) {
  1004. if (i == HITCOUNT_IDX)
  1005. seq_puts(m, "hitcount");
  1006. else {
  1007. seq_puts(m, ",");
  1008. hist_field_print(m, hist_data->fields[i]);
  1009. }
  1010. }
  1011. seq_puts(m, ":sort=");
  1012. for (i = 0; i < hist_data->n_sort_keys; i++) {
  1013. struct tracing_map_sort_key *sort_key;
  1014. sort_key = &hist_data->sort_keys[i];
  1015. if (i > 0)
  1016. seq_puts(m, ",");
  1017. if (sort_key->field_idx == HITCOUNT_IDX)
  1018. seq_puts(m, "hitcount");
  1019. else {
  1020. unsigned int idx = sort_key->field_idx;
  1021. if (WARN_ON(idx >= TRACING_MAP_FIELDS_MAX))
  1022. return -EINVAL;
  1023. hist_field_print(m, hist_data->fields[idx]);
  1024. }
  1025. if (sort_key->descending)
  1026. seq_puts(m, ".descending");
  1027. }
  1028. seq_printf(m, ":size=%u", (1 << hist_data->map->map_bits));
  1029. if (data->filter_str)
  1030. seq_printf(m, " if %s", data->filter_str);
  1031. if (data->paused)
  1032. seq_puts(m, " [paused]");
  1033. else
  1034. seq_puts(m, " [active]");
  1035. seq_putc(m, '\n');
  1036. return 0;
  1037. }
  1038. static int event_hist_trigger_init(struct event_trigger_ops *ops,
  1039. struct event_trigger_data *data)
  1040. {
  1041. struct hist_trigger_data *hist_data = data->private_data;
  1042. if (!data->ref && hist_data->attrs->name)
  1043. save_named_trigger(hist_data->attrs->name, data);
  1044. data->ref++;
  1045. return 0;
  1046. }
  1047. static void event_hist_trigger_free(struct event_trigger_ops *ops,
  1048. struct event_trigger_data *data)
  1049. {
  1050. struct hist_trigger_data *hist_data = data->private_data;
  1051. if (WARN_ON_ONCE(data->ref <= 0))
  1052. return;
  1053. data->ref--;
  1054. if (!data->ref) {
  1055. if (data->name)
  1056. del_named_trigger(data);
  1057. trigger_data_free(data);
  1058. destroy_hist_data(hist_data);
  1059. }
  1060. }
  1061. static struct event_trigger_ops event_hist_trigger_ops = {
  1062. .func = event_hist_trigger,
  1063. .print = event_hist_trigger_print,
  1064. .init = event_hist_trigger_init,
  1065. .free = event_hist_trigger_free,
  1066. };
  1067. static int event_hist_trigger_named_init(struct event_trigger_ops *ops,
  1068. struct event_trigger_data *data)
  1069. {
  1070. data->ref++;
  1071. save_named_trigger(data->named_data->name, data);
  1072. event_hist_trigger_init(ops, data->named_data);
  1073. return 0;
  1074. }
  1075. static void event_hist_trigger_named_free(struct event_trigger_ops *ops,
  1076. struct event_trigger_data *data)
  1077. {
  1078. if (WARN_ON_ONCE(data->ref <= 0))
  1079. return;
  1080. event_hist_trigger_free(ops, data->named_data);
  1081. data->ref--;
  1082. if (!data->ref) {
  1083. del_named_trigger(data);
  1084. trigger_data_free(data);
  1085. }
  1086. }
  1087. static struct event_trigger_ops event_hist_trigger_named_ops = {
  1088. .func = event_hist_trigger,
  1089. .print = event_hist_trigger_print,
  1090. .init = event_hist_trigger_named_init,
  1091. .free = event_hist_trigger_named_free,
  1092. };
  1093. static struct event_trigger_ops *event_hist_get_trigger_ops(char *cmd,
  1094. char *param)
  1095. {
  1096. return &event_hist_trigger_ops;
  1097. }
  1098. static void hist_clear(struct event_trigger_data *data)
  1099. {
  1100. struct hist_trigger_data *hist_data = data->private_data;
  1101. if (data->name)
  1102. pause_named_trigger(data);
  1103. synchronize_sched();
  1104. tracing_map_clear(hist_data->map);
  1105. if (data->name)
  1106. unpause_named_trigger(data);
  1107. }
  1108. static bool compatible_field(struct ftrace_event_field *field,
  1109. struct ftrace_event_field *test_field)
  1110. {
  1111. if (field == test_field)
  1112. return true;
  1113. if (field == NULL || test_field == NULL)
  1114. return false;
  1115. if (strcmp(field->name, test_field->name) != 0)
  1116. return false;
  1117. if (strcmp(field->type, test_field->type) != 0)
  1118. return false;
  1119. if (field->size != test_field->size)
  1120. return false;
  1121. if (field->is_signed != test_field->is_signed)
  1122. return false;
  1123. return true;
  1124. }
  1125. static bool hist_trigger_match(struct event_trigger_data *data,
  1126. struct event_trigger_data *data_test,
  1127. struct event_trigger_data *named_data,
  1128. bool ignore_filter)
  1129. {
  1130. struct tracing_map_sort_key *sort_key, *sort_key_test;
  1131. struct hist_trigger_data *hist_data, *hist_data_test;
  1132. struct hist_field *key_field, *key_field_test;
  1133. unsigned int i;
  1134. if (named_data && (named_data != data_test) &&
  1135. (named_data != data_test->named_data))
  1136. return false;
  1137. if (!named_data && is_named_trigger(data_test))
  1138. return false;
  1139. hist_data = data->private_data;
  1140. hist_data_test = data_test->private_data;
  1141. if (hist_data->n_vals != hist_data_test->n_vals ||
  1142. hist_data->n_fields != hist_data_test->n_fields ||
  1143. hist_data->n_sort_keys != hist_data_test->n_sort_keys)
  1144. return false;
  1145. if (!ignore_filter) {
  1146. if ((data->filter_str && !data_test->filter_str) ||
  1147. (!data->filter_str && data_test->filter_str))
  1148. return false;
  1149. }
  1150. for_each_hist_field(i, hist_data) {
  1151. key_field = hist_data->fields[i];
  1152. key_field_test = hist_data_test->fields[i];
  1153. if (key_field->flags != key_field_test->flags)
  1154. return false;
  1155. if (!compatible_field(key_field->field, key_field_test->field))
  1156. return false;
  1157. if (key_field->offset != key_field_test->offset)
  1158. return false;
  1159. }
  1160. for (i = 0; i < hist_data->n_sort_keys; i++) {
  1161. sort_key = &hist_data->sort_keys[i];
  1162. sort_key_test = &hist_data_test->sort_keys[i];
  1163. if (sort_key->field_idx != sort_key_test->field_idx ||
  1164. sort_key->descending != sort_key_test->descending)
  1165. return false;
  1166. }
  1167. if (!ignore_filter && data->filter_str &&
  1168. (strcmp(data->filter_str, data_test->filter_str) != 0))
  1169. return false;
  1170. return true;
  1171. }
  1172. static int hist_register_trigger(char *glob, struct event_trigger_ops *ops,
  1173. struct event_trigger_data *data,
  1174. struct trace_event_file *file)
  1175. {
  1176. struct hist_trigger_data *hist_data = data->private_data;
  1177. struct event_trigger_data *test, *named_data = NULL;
  1178. int ret = 0;
  1179. if (hist_data->attrs->name) {
  1180. named_data = find_named_trigger(hist_data->attrs->name);
  1181. if (named_data) {
  1182. if (!hist_trigger_match(data, named_data, named_data,
  1183. true)) {
  1184. ret = -EINVAL;
  1185. goto out;
  1186. }
  1187. }
  1188. }
  1189. if (hist_data->attrs->name && !named_data)
  1190. goto new;
  1191. list_for_each_entry_rcu(test, &file->triggers, list) {
  1192. if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
  1193. if (!hist_trigger_match(data, test, named_data, false))
  1194. continue;
  1195. if (hist_data->attrs->pause)
  1196. test->paused = true;
  1197. else if (hist_data->attrs->cont)
  1198. test->paused = false;
  1199. else if (hist_data->attrs->clear)
  1200. hist_clear(test);
  1201. else
  1202. ret = -EEXIST;
  1203. goto out;
  1204. }
  1205. }
  1206. new:
  1207. if (hist_data->attrs->cont || hist_data->attrs->clear) {
  1208. ret = -ENOENT;
  1209. goto out;
  1210. }
  1211. if (hist_data->attrs->pause)
  1212. data->paused = true;
  1213. if (named_data) {
  1214. destroy_hist_data(data->private_data);
  1215. data->private_data = named_data->private_data;
  1216. set_named_trigger_data(data, named_data);
  1217. data->ops = &event_hist_trigger_named_ops;
  1218. }
  1219. if (data->ops->init) {
  1220. ret = data->ops->init(data->ops, data);
  1221. if (ret < 0)
  1222. goto out;
  1223. }
  1224. list_add_rcu(&data->list, &file->triggers);
  1225. ret++;
  1226. update_cond_flag(file);
  1227. if (trace_event_trigger_enable_disable(file, 1) < 0) {
  1228. list_del_rcu(&data->list);
  1229. update_cond_flag(file);
  1230. ret--;
  1231. }
  1232. out:
  1233. return ret;
  1234. }
  1235. static void hist_unregister_trigger(char *glob, struct event_trigger_ops *ops,
  1236. struct event_trigger_data *data,
  1237. struct trace_event_file *file)
  1238. {
  1239. struct hist_trigger_data *hist_data = data->private_data;
  1240. struct event_trigger_data *test, *named_data = NULL;
  1241. bool unregistered = false;
  1242. if (hist_data->attrs->name)
  1243. named_data = find_named_trigger(hist_data->attrs->name);
  1244. list_for_each_entry_rcu(test, &file->triggers, list) {
  1245. if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
  1246. if (!hist_trigger_match(data, test, named_data, false))
  1247. continue;
  1248. unregistered = true;
  1249. list_del_rcu(&test->list);
  1250. trace_event_trigger_enable_disable(file, 0);
  1251. update_cond_flag(file);
  1252. break;
  1253. }
  1254. }
  1255. if (unregistered && test->ops->free)
  1256. test->ops->free(test->ops, test);
  1257. }
  1258. static void hist_unreg_all(struct trace_event_file *file)
  1259. {
  1260. struct event_trigger_data *test, *n;
  1261. list_for_each_entry_safe(test, n, &file->triggers, list) {
  1262. if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
  1263. list_del_rcu(&test->list);
  1264. trace_event_trigger_enable_disable(file, 0);
  1265. update_cond_flag(file);
  1266. if (test->ops->free)
  1267. test->ops->free(test->ops, test);
  1268. }
  1269. }
  1270. }
  1271. static int event_hist_trigger_func(struct event_command *cmd_ops,
  1272. struct trace_event_file *file,
  1273. char *glob, char *cmd, char *param)
  1274. {
  1275. unsigned int hist_trigger_bits = TRACING_MAP_BITS_DEFAULT;
  1276. struct event_trigger_data *trigger_data;
  1277. struct hist_trigger_attrs *attrs;
  1278. struct event_trigger_ops *trigger_ops;
  1279. struct hist_trigger_data *hist_data;
  1280. char *trigger;
  1281. int ret = 0;
  1282. if (!param)
  1283. return -EINVAL;
  1284. /* separate the trigger from the filter (k:v [if filter]) */
  1285. trigger = strsep(&param, " \t");
  1286. if (!trigger)
  1287. return -EINVAL;
  1288. attrs = parse_hist_trigger_attrs(trigger);
  1289. if (IS_ERR(attrs))
  1290. return PTR_ERR(attrs);
  1291. if (attrs->map_bits)
  1292. hist_trigger_bits = attrs->map_bits;
  1293. hist_data = create_hist_data(hist_trigger_bits, attrs, file);
  1294. if (IS_ERR(hist_data)) {
  1295. destroy_hist_trigger_attrs(attrs);
  1296. return PTR_ERR(hist_data);
  1297. }
  1298. trigger_ops = cmd_ops->get_trigger_ops(cmd, trigger);
  1299. ret = -ENOMEM;
  1300. trigger_data = kzalloc(sizeof(*trigger_data), GFP_KERNEL);
  1301. if (!trigger_data)
  1302. goto out_free;
  1303. trigger_data->count = -1;
  1304. trigger_data->ops = trigger_ops;
  1305. trigger_data->cmd_ops = cmd_ops;
  1306. INIT_LIST_HEAD(&trigger_data->list);
  1307. RCU_INIT_POINTER(trigger_data->filter, NULL);
  1308. trigger_data->private_data = hist_data;
  1309. /* if param is non-empty, it's supposed to be a filter */
  1310. if (param && cmd_ops->set_filter) {
  1311. ret = cmd_ops->set_filter(param, trigger_data, file);
  1312. if (ret < 0)
  1313. goto out_free;
  1314. }
  1315. if (glob[0] == '!') {
  1316. cmd_ops->unreg(glob+1, trigger_ops, trigger_data, file);
  1317. ret = 0;
  1318. goto out_free;
  1319. }
  1320. ret = cmd_ops->reg(glob, trigger_ops, trigger_data, file);
  1321. /*
  1322. * The above returns on success the # of triggers registered,
  1323. * but if it didn't register any it returns zero. Consider no
  1324. * triggers registered a failure too.
  1325. */
  1326. if (!ret) {
  1327. if (!(attrs->pause || attrs->cont || attrs->clear))
  1328. ret = -ENOENT;
  1329. goto out_free;
  1330. } else if (ret < 0)
  1331. goto out_free;
  1332. /* Just return zero, not the number of registered triggers */
  1333. ret = 0;
  1334. out:
  1335. return ret;
  1336. out_free:
  1337. if (cmd_ops->set_filter)
  1338. cmd_ops->set_filter(NULL, trigger_data, NULL);
  1339. kfree(trigger_data);
  1340. destroy_hist_data(hist_data);
  1341. goto out;
  1342. }
  1343. static struct event_command trigger_hist_cmd = {
  1344. .name = "hist",
  1345. .trigger_type = ETT_EVENT_HIST,
  1346. .flags = EVENT_CMD_FL_NEEDS_REC,
  1347. .func = event_hist_trigger_func,
  1348. .reg = hist_register_trigger,
  1349. .unreg = hist_unregister_trigger,
  1350. .unreg_all = hist_unreg_all,
  1351. .get_trigger_ops = event_hist_get_trigger_ops,
  1352. .set_filter = set_trigger_filter,
  1353. };
  1354. __init int register_trigger_hist_cmd(void)
  1355. {
  1356. int ret;
  1357. ret = register_event_command(&trigger_hist_cmd);
  1358. WARN_ON(ret < 0);
  1359. return ret;
  1360. }
  1361. static void
  1362. hist_enable_trigger(struct event_trigger_data *data, void *rec)
  1363. {
  1364. struct enable_trigger_data *enable_data = data->private_data;
  1365. struct event_trigger_data *test;
  1366. list_for_each_entry_rcu(test, &enable_data->file->triggers, list) {
  1367. if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
  1368. if (enable_data->enable)
  1369. test->paused = false;
  1370. else
  1371. test->paused = true;
  1372. }
  1373. }
  1374. }
  1375. static void
  1376. hist_enable_count_trigger(struct event_trigger_data *data, void *rec)
  1377. {
  1378. if (!data->count)
  1379. return;
  1380. if (data->count != -1)
  1381. (data->count)--;
  1382. hist_enable_trigger(data, rec);
  1383. }
  1384. static struct event_trigger_ops hist_enable_trigger_ops = {
  1385. .func = hist_enable_trigger,
  1386. .print = event_enable_trigger_print,
  1387. .init = event_trigger_init,
  1388. .free = event_enable_trigger_free,
  1389. };
  1390. static struct event_trigger_ops hist_enable_count_trigger_ops = {
  1391. .func = hist_enable_count_trigger,
  1392. .print = event_enable_trigger_print,
  1393. .init = event_trigger_init,
  1394. .free = event_enable_trigger_free,
  1395. };
  1396. static struct event_trigger_ops hist_disable_trigger_ops = {
  1397. .func = hist_enable_trigger,
  1398. .print = event_enable_trigger_print,
  1399. .init = event_trigger_init,
  1400. .free = event_enable_trigger_free,
  1401. };
  1402. static struct event_trigger_ops hist_disable_count_trigger_ops = {
  1403. .func = hist_enable_count_trigger,
  1404. .print = event_enable_trigger_print,
  1405. .init = event_trigger_init,
  1406. .free = event_enable_trigger_free,
  1407. };
  1408. static struct event_trigger_ops *
  1409. hist_enable_get_trigger_ops(char *cmd, char *param)
  1410. {
  1411. struct event_trigger_ops *ops;
  1412. bool enable;
  1413. enable = (strcmp(cmd, ENABLE_HIST_STR) == 0);
  1414. if (enable)
  1415. ops = param ? &hist_enable_count_trigger_ops :
  1416. &hist_enable_trigger_ops;
  1417. else
  1418. ops = param ? &hist_disable_count_trigger_ops :
  1419. &hist_disable_trigger_ops;
  1420. return ops;
  1421. }
  1422. static void hist_enable_unreg_all(struct trace_event_file *file)
  1423. {
  1424. struct event_trigger_data *test, *n;
  1425. list_for_each_entry_safe(test, n, &file->triggers, list) {
  1426. if (test->cmd_ops->trigger_type == ETT_HIST_ENABLE) {
  1427. list_del_rcu(&test->list);
  1428. update_cond_flag(file);
  1429. trace_event_trigger_enable_disable(file, 0);
  1430. if (test->ops->free)
  1431. test->ops->free(test->ops, test);
  1432. }
  1433. }
  1434. }
  1435. static struct event_command trigger_hist_enable_cmd = {
  1436. .name = ENABLE_HIST_STR,
  1437. .trigger_type = ETT_HIST_ENABLE,
  1438. .func = event_enable_trigger_func,
  1439. .reg = event_enable_register_trigger,
  1440. .unreg = event_enable_unregister_trigger,
  1441. .unreg_all = hist_enable_unreg_all,
  1442. .get_trigger_ops = hist_enable_get_trigger_ops,
  1443. .set_filter = set_trigger_filter,
  1444. };
  1445. static struct event_command trigger_hist_disable_cmd = {
  1446. .name = DISABLE_HIST_STR,
  1447. .trigger_type = ETT_HIST_ENABLE,
  1448. .func = event_enable_trigger_func,
  1449. .reg = event_enable_register_trigger,
  1450. .unreg = event_enable_unregister_trigger,
  1451. .unreg_all = hist_enable_unreg_all,
  1452. .get_trigger_ops = hist_enable_get_trigger_ops,
  1453. .set_filter = set_trigger_filter,
  1454. };
  1455. static __init void unregister_trigger_hist_enable_disable_cmds(void)
  1456. {
  1457. unregister_event_command(&trigger_hist_enable_cmd);
  1458. unregister_event_command(&trigger_hist_disable_cmd);
  1459. }
  1460. __init int register_trigger_hist_enable_disable_cmds(void)
  1461. {
  1462. int ret;
  1463. ret = register_event_command(&trigger_hist_enable_cmd);
  1464. if (WARN_ON(ret < 0))
  1465. return ret;
  1466. ret = register_event_command(&trigger_hist_disable_cmd);
  1467. if (WARN_ON(ret < 0))
  1468. unregister_trigger_hist_enable_disable_cmds();
  1469. return ret;
  1470. }