trace_events_hist.c 58 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545
  1. /*
  2. * trace_events_hist - trace event hist triggers
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License as published by
  6. * the Free Software Foundation; either version 2 of the License, or
  7. * (at your option) any later version.
  8. *
  9. * This program is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. *
  14. * Copyright (C) 2015 Tom Zanussi <tom.zanussi@linux.intel.com>
  15. */
  16. #include <linux/module.h>
  17. #include <linux/kallsyms.h>
  18. #include <linux/mutex.h>
  19. #include <linux/slab.h>
  20. #include <linux/stacktrace.h>
  21. #include <linux/rculist.h>
  22. #include "tracing_map.h"
  23. #include "trace.h"
  24. struct hist_field;
  25. typedef u64 (*hist_field_fn_t) (struct hist_field *field, void *event,
  26. struct ring_buffer_event *rbe);
  27. #define HIST_FIELD_OPERANDS_MAX 2
  28. #define HIST_FIELDS_MAX (TRACING_MAP_FIELDS_MAX + TRACING_MAP_VARS_MAX)
  29. enum field_op_id {
  30. FIELD_OP_NONE,
  31. FIELD_OP_PLUS,
  32. FIELD_OP_MINUS,
  33. FIELD_OP_UNARY_MINUS,
  34. };
  35. struct hist_var {
  36. char *name;
  37. struct hist_trigger_data *hist_data;
  38. unsigned int idx;
  39. };
  40. struct hist_field {
  41. struct ftrace_event_field *field;
  42. unsigned long flags;
  43. hist_field_fn_t fn;
  44. unsigned int size;
  45. unsigned int offset;
  46. unsigned int is_signed;
  47. struct hist_field *operands[HIST_FIELD_OPERANDS_MAX];
  48. struct hist_trigger_data *hist_data;
  49. struct hist_var var;
  50. enum field_op_id operator;
  51. char *name;
  52. };
  53. static u64 hist_field_none(struct hist_field *field, void *event,
  54. struct ring_buffer_event *rbe)
  55. {
  56. return 0;
  57. }
  58. static u64 hist_field_counter(struct hist_field *field, void *event,
  59. struct ring_buffer_event *rbe)
  60. {
  61. return 1;
  62. }
  63. static u64 hist_field_string(struct hist_field *hist_field, void *event,
  64. struct ring_buffer_event *rbe)
  65. {
  66. char *addr = (char *)(event + hist_field->field->offset);
  67. return (u64)(unsigned long)addr;
  68. }
  69. static u64 hist_field_dynstring(struct hist_field *hist_field, void *event,
  70. struct ring_buffer_event *rbe)
  71. {
  72. u32 str_item = *(u32 *)(event + hist_field->field->offset);
  73. int str_loc = str_item & 0xffff;
  74. char *addr = (char *)(event + str_loc);
  75. return (u64)(unsigned long)addr;
  76. }
  77. static u64 hist_field_pstring(struct hist_field *hist_field, void *event,
  78. struct ring_buffer_event *rbe)
  79. {
  80. char **addr = (char **)(event + hist_field->field->offset);
  81. return (u64)(unsigned long)*addr;
  82. }
  83. static u64 hist_field_log2(struct hist_field *hist_field, void *event,
  84. struct ring_buffer_event *rbe)
  85. {
  86. struct hist_field *operand = hist_field->operands[0];
  87. u64 val = operand->fn(operand, event, rbe);
  88. return (u64) ilog2(roundup_pow_of_two(val));
  89. }
  90. static u64 hist_field_plus(struct hist_field *hist_field, void *event,
  91. struct ring_buffer_event *rbe)
  92. {
  93. struct hist_field *operand1 = hist_field->operands[0];
  94. struct hist_field *operand2 = hist_field->operands[1];
  95. u64 val1 = operand1->fn(operand1, event, rbe);
  96. u64 val2 = operand2->fn(operand2, event, rbe);
  97. return val1 + val2;
  98. }
  99. static u64 hist_field_minus(struct hist_field *hist_field, void *event,
  100. struct ring_buffer_event *rbe)
  101. {
  102. struct hist_field *operand1 = hist_field->operands[0];
  103. struct hist_field *operand2 = hist_field->operands[1];
  104. u64 val1 = operand1->fn(operand1, event, rbe);
  105. u64 val2 = operand2->fn(operand2, event, rbe);
  106. return val1 - val2;
  107. }
  108. static u64 hist_field_unary_minus(struct hist_field *hist_field, void *event,
  109. struct ring_buffer_event *rbe)
  110. {
  111. struct hist_field *operand = hist_field->operands[0];
  112. s64 sval = (s64)operand->fn(operand, event, rbe);
  113. u64 val = (u64)-sval;
  114. return val;
  115. }
  116. #define DEFINE_HIST_FIELD_FN(type) \
  117. static u64 hist_field_##type(struct hist_field *hist_field, \
  118. void *event, \
  119. struct ring_buffer_event *rbe) \
  120. { \
  121. type *addr = (type *)(event + hist_field->field->offset); \
  122. \
  123. return (u64)(unsigned long)*addr; \
  124. }
  125. DEFINE_HIST_FIELD_FN(s64);
  126. DEFINE_HIST_FIELD_FN(u64);
  127. DEFINE_HIST_FIELD_FN(s32);
  128. DEFINE_HIST_FIELD_FN(u32);
  129. DEFINE_HIST_FIELD_FN(s16);
  130. DEFINE_HIST_FIELD_FN(u16);
  131. DEFINE_HIST_FIELD_FN(s8);
  132. DEFINE_HIST_FIELD_FN(u8);
  133. #define for_each_hist_field(i, hist_data) \
  134. for ((i) = 0; (i) < (hist_data)->n_fields; (i)++)
  135. #define for_each_hist_val_field(i, hist_data) \
  136. for ((i) = 0; (i) < (hist_data)->n_vals; (i)++)
  137. #define for_each_hist_key_field(i, hist_data) \
  138. for ((i) = (hist_data)->n_vals; (i) < (hist_data)->n_fields; (i)++)
  139. #define HIST_STACKTRACE_DEPTH 16
  140. #define HIST_STACKTRACE_SIZE (HIST_STACKTRACE_DEPTH * sizeof(unsigned long))
  141. #define HIST_STACKTRACE_SKIP 5
  142. #define HITCOUNT_IDX 0
  143. #define HIST_KEY_SIZE_MAX (MAX_FILTER_STR_VAL + HIST_STACKTRACE_SIZE)
  144. enum hist_field_flags {
  145. HIST_FIELD_FL_HITCOUNT = 1 << 0,
  146. HIST_FIELD_FL_KEY = 1 << 1,
  147. HIST_FIELD_FL_STRING = 1 << 2,
  148. HIST_FIELD_FL_HEX = 1 << 3,
  149. HIST_FIELD_FL_SYM = 1 << 4,
  150. HIST_FIELD_FL_SYM_OFFSET = 1 << 5,
  151. HIST_FIELD_FL_EXECNAME = 1 << 6,
  152. HIST_FIELD_FL_SYSCALL = 1 << 7,
  153. HIST_FIELD_FL_STACKTRACE = 1 << 8,
  154. HIST_FIELD_FL_LOG2 = 1 << 9,
  155. HIST_FIELD_FL_TIMESTAMP = 1 << 10,
  156. HIST_FIELD_FL_TIMESTAMP_USECS = 1 << 11,
  157. HIST_FIELD_FL_VAR = 1 << 12,
  158. HIST_FIELD_FL_EXPR = 1 << 13,
  159. };
  160. struct var_defs {
  161. unsigned int n_vars;
  162. char *name[TRACING_MAP_VARS_MAX];
  163. char *expr[TRACING_MAP_VARS_MAX];
  164. };
  165. struct hist_trigger_attrs {
  166. char *keys_str;
  167. char *vals_str;
  168. char *sort_key_str;
  169. char *name;
  170. bool pause;
  171. bool cont;
  172. bool clear;
  173. bool ts_in_usecs;
  174. unsigned int map_bits;
  175. char *assignment_str[TRACING_MAP_VARS_MAX];
  176. unsigned int n_assignments;
  177. struct var_defs var_defs;
  178. };
  179. struct hist_trigger_data {
  180. struct hist_field *fields[HIST_FIELDS_MAX];
  181. unsigned int n_vals;
  182. unsigned int n_keys;
  183. unsigned int n_fields;
  184. unsigned int n_vars;
  185. unsigned int key_size;
  186. struct tracing_map_sort_key sort_keys[TRACING_MAP_SORT_KEYS_MAX];
  187. unsigned int n_sort_keys;
  188. struct trace_event_file *event_file;
  189. struct hist_trigger_attrs *attrs;
  190. struct tracing_map *map;
  191. bool enable_timestamps;
  192. bool remove;
  193. };
  194. static u64 hist_field_timestamp(struct hist_field *hist_field, void *event,
  195. struct ring_buffer_event *rbe)
  196. {
  197. struct hist_trigger_data *hist_data = hist_field->hist_data;
  198. struct trace_array *tr = hist_data->event_file->tr;
  199. u64 ts = ring_buffer_event_time_stamp(rbe);
  200. if (hist_data->attrs->ts_in_usecs && trace_clock_in_ns(tr))
  201. ts = ns2usecs(ts);
  202. return ts;
  203. }
  204. static struct hist_field *find_var_field(struct hist_trigger_data *hist_data,
  205. const char *var_name)
  206. {
  207. struct hist_field *hist_field, *found = NULL;
  208. int i;
  209. for_each_hist_field(i, hist_data) {
  210. hist_field = hist_data->fields[i];
  211. if (hist_field && hist_field->flags & HIST_FIELD_FL_VAR &&
  212. strcmp(hist_field->var.name, var_name) == 0) {
  213. found = hist_field;
  214. break;
  215. }
  216. }
  217. return found;
  218. }
  219. static struct hist_field *find_var(struct hist_trigger_data *hist_data,
  220. struct trace_event_file *file,
  221. const char *var_name)
  222. {
  223. struct hist_trigger_data *test_data;
  224. struct event_trigger_data *test;
  225. struct hist_field *hist_field;
  226. hist_field = find_var_field(hist_data, var_name);
  227. if (hist_field)
  228. return hist_field;
  229. list_for_each_entry_rcu(test, &file->triggers, list) {
  230. if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
  231. test_data = test->private_data;
  232. hist_field = find_var_field(test_data, var_name);
  233. if (hist_field)
  234. return hist_field;
  235. }
  236. }
  237. return NULL;
  238. }
  239. struct hist_elt_data {
  240. char *comm;
  241. };
  242. static const char *hist_field_name(struct hist_field *field,
  243. unsigned int level)
  244. {
  245. const char *field_name = "";
  246. if (level > 1)
  247. return field_name;
  248. if (field->field)
  249. field_name = field->field->name;
  250. else if (field->flags & HIST_FIELD_FL_LOG2)
  251. field_name = hist_field_name(field->operands[0], ++level);
  252. else if (field->flags & HIST_FIELD_FL_TIMESTAMP)
  253. field_name = "common_timestamp";
  254. else if (field->flags & HIST_FIELD_FL_EXPR)
  255. field_name = field->name;
  256. if (field_name == NULL)
  257. field_name = "";
  258. return field_name;
  259. }
  260. static hist_field_fn_t select_value_fn(int field_size, int field_is_signed)
  261. {
  262. hist_field_fn_t fn = NULL;
  263. switch (field_size) {
  264. case 8:
  265. if (field_is_signed)
  266. fn = hist_field_s64;
  267. else
  268. fn = hist_field_u64;
  269. break;
  270. case 4:
  271. if (field_is_signed)
  272. fn = hist_field_s32;
  273. else
  274. fn = hist_field_u32;
  275. break;
  276. case 2:
  277. if (field_is_signed)
  278. fn = hist_field_s16;
  279. else
  280. fn = hist_field_u16;
  281. break;
  282. case 1:
  283. if (field_is_signed)
  284. fn = hist_field_s8;
  285. else
  286. fn = hist_field_u8;
  287. break;
  288. }
  289. return fn;
  290. }
  291. static int parse_map_size(char *str)
  292. {
  293. unsigned long size, map_bits;
  294. int ret;
  295. strsep(&str, "=");
  296. if (!str) {
  297. ret = -EINVAL;
  298. goto out;
  299. }
  300. ret = kstrtoul(str, 0, &size);
  301. if (ret)
  302. goto out;
  303. map_bits = ilog2(roundup_pow_of_two(size));
  304. if (map_bits < TRACING_MAP_BITS_MIN ||
  305. map_bits > TRACING_MAP_BITS_MAX)
  306. ret = -EINVAL;
  307. else
  308. ret = map_bits;
  309. out:
  310. return ret;
  311. }
  312. static void destroy_hist_trigger_attrs(struct hist_trigger_attrs *attrs)
  313. {
  314. unsigned int i;
  315. if (!attrs)
  316. return;
  317. for (i = 0; i < attrs->n_assignments; i++)
  318. kfree(attrs->assignment_str[i]);
  319. kfree(attrs->name);
  320. kfree(attrs->sort_key_str);
  321. kfree(attrs->keys_str);
  322. kfree(attrs->vals_str);
  323. kfree(attrs);
  324. }
  325. static int parse_assignment(char *str, struct hist_trigger_attrs *attrs)
  326. {
  327. int ret = 0;
  328. if ((strncmp(str, "key=", strlen("key=")) == 0) ||
  329. (strncmp(str, "keys=", strlen("keys=")) == 0)) {
  330. attrs->keys_str = kstrdup(str, GFP_KERNEL);
  331. if (!attrs->keys_str) {
  332. ret = -ENOMEM;
  333. goto out;
  334. }
  335. } else if ((strncmp(str, "val=", strlen("val=")) == 0) ||
  336. (strncmp(str, "vals=", strlen("vals=")) == 0) ||
  337. (strncmp(str, "values=", strlen("values=")) == 0)) {
  338. attrs->vals_str = kstrdup(str, GFP_KERNEL);
  339. if (!attrs->vals_str) {
  340. ret = -ENOMEM;
  341. goto out;
  342. }
  343. } else if (strncmp(str, "sort=", strlen("sort=")) == 0) {
  344. attrs->sort_key_str = kstrdup(str, GFP_KERNEL);
  345. if (!attrs->sort_key_str) {
  346. ret = -ENOMEM;
  347. goto out;
  348. }
  349. } else if (strncmp(str, "name=", strlen("name=")) == 0) {
  350. attrs->name = kstrdup(str, GFP_KERNEL);
  351. if (!attrs->name) {
  352. ret = -ENOMEM;
  353. goto out;
  354. }
  355. } else if (strncmp(str, "size=", strlen("size=")) == 0) {
  356. int map_bits = parse_map_size(str);
  357. if (map_bits < 0) {
  358. ret = map_bits;
  359. goto out;
  360. }
  361. attrs->map_bits = map_bits;
  362. } else {
  363. char *assignment;
  364. if (attrs->n_assignments == TRACING_MAP_VARS_MAX) {
  365. ret = -EINVAL;
  366. goto out;
  367. }
  368. assignment = kstrdup(str, GFP_KERNEL);
  369. if (!assignment) {
  370. ret = -ENOMEM;
  371. goto out;
  372. }
  373. attrs->assignment_str[attrs->n_assignments++] = assignment;
  374. }
  375. out:
  376. return ret;
  377. }
  378. static struct hist_trigger_attrs *parse_hist_trigger_attrs(char *trigger_str)
  379. {
  380. struct hist_trigger_attrs *attrs;
  381. int ret = 0;
  382. attrs = kzalloc(sizeof(*attrs), GFP_KERNEL);
  383. if (!attrs)
  384. return ERR_PTR(-ENOMEM);
  385. while (trigger_str) {
  386. char *str = strsep(&trigger_str, ":");
  387. if (strchr(str, '=')) {
  388. ret = parse_assignment(str, attrs);
  389. if (ret)
  390. goto free;
  391. } else if (strcmp(str, "pause") == 0)
  392. attrs->pause = true;
  393. else if ((strcmp(str, "cont") == 0) ||
  394. (strcmp(str, "continue") == 0))
  395. attrs->cont = true;
  396. else if (strcmp(str, "clear") == 0)
  397. attrs->clear = true;
  398. else {
  399. ret = -EINVAL;
  400. goto free;
  401. }
  402. }
  403. if (!attrs->keys_str) {
  404. ret = -EINVAL;
  405. goto free;
  406. }
  407. return attrs;
  408. free:
  409. destroy_hist_trigger_attrs(attrs);
  410. return ERR_PTR(ret);
  411. }
  412. static inline void save_comm(char *comm, struct task_struct *task)
  413. {
  414. if (!task->pid) {
  415. strcpy(comm, "<idle>");
  416. return;
  417. }
  418. if (WARN_ON_ONCE(task->pid < 0)) {
  419. strcpy(comm, "<XXX>");
  420. return;
  421. }
  422. memcpy(comm, task->comm, TASK_COMM_LEN);
  423. }
  424. static void hist_elt_data_free(struct hist_elt_data *elt_data)
  425. {
  426. kfree(elt_data->comm);
  427. kfree(elt_data);
  428. }
  429. static void hist_trigger_elt_data_free(struct tracing_map_elt *elt)
  430. {
  431. struct hist_elt_data *elt_data = elt->private_data;
  432. hist_elt_data_free(elt_data);
  433. }
  434. static int hist_trigger_elt_data_alloc(struct tracing_map_elt *elt)
  435. {
  436. struct hist_trigger_data *hist_data = elt->map->private_data;
  437. unsigned int size = TASK_COMM_LEN;
  438. struct hist_elt_data *elt_data;
  439. struct hist_field *key_field;
  440. unsigned int i;
  441. elt_data = kzalloc(sizeof(*elt_data), GFP_KERNEL);
  442. if (!elt_data)
  443. return -ENOMEM;
  444. for_each_hist_key_field(i, hist_data) {
  445. key_field = hist_data->fields[i];
  446. if (key_field->flags & HIST_FIELD_FL_EXECNAME) {
  447. elt_data->comm = kzalloc(size, GFP_KERNEL);
  448. if (!elt_data->comm) {
  449. kfree(elt_data);
  450. return -ENOMEM;
  451. }
  452. break;
  453. }
  454. }
  455. elt->private_data = elt_data;
  456. return 0;
  457. }
  458. static void hist_trigger_elt_data_init(struct tracing_map_elt *elt)
  459. {
  460. struct hist_elt_data *elt_data = elt->private_data;
  461. if (elt_data->comm)
  462. save_comm(elt_data->comm, current);
  463. }
  464. static const struct tracing_map_ops hist_trigger_elt_data_ops = {
  465. .elt_alloc = hist_trigger_elt_data_alloc,
  466. .elt_free = hist_trigger_elt_data_free,
  467. .elt_init = hist_trigger_elt_data_init,
  468. };
  469. static const char *get_hist_field_flags(struct hist_field *hist_field)
  470. {
  471. const char *flags_str = NULL;
  472. if (hist_field->flags & HIST_FIELD_FL_HEX)
  473. flags_str = "hex";
  474. else if (hist_field->flags & HIST_FIELD_FL_SYM)
  475. flags_str = "sym";
  476. else if (hist_field->flags & HIST_FIELD_FL_SYM_OFFSET)
  477. flags_str = "sym-offset";
  478. else if (hist_field->flags & HIST_FIELD_FL_EXECNAME)
  479. flags_str = "execname";
  480. else if (hist_field->flags & HIST_FIELD_FL_SYSCALL)
  481. flags_str = "syscall";
  482. else if (hist_field->flags & HIST_FIELD_FL_LOG2)
  483. flags_str = "log2";
  484. else if (hist_field->flags & HIST_FIELD_FL_TIMESTAMP_USECS)
  485. flags_str = "usecs";
  486. return flags_str;
  487. }
  488. static void expr_field_str(struct hist_field *field, char *expr)
  489. {
  490. strcat(expr, hist_field_name(field, 0));
  491. if (field->flags) {
  492. const char *flags_str = get_hist_field_flags(field);
  493. if (flags_str) {
  494. strcat(expr, ".");
  495. strcat(expr, flags_str);
  496. }
  497. }
  498. }
  499. static char *expr_str(struct hist_field *field, unsigned int level)
  500. {
  501. char *expr;
  502. if (level > 1)
  503. return NULL;
  504. expr = kzalloc(MAX_FILTER_STR_VAL, GFP_KERNEL);
  505. if (!expr)
  506. return NULL;
  507. if (!field->operands[0]) {
  508. expr_field_str(field, expr);
  509. return expr;
  510. }
  511. if (field->operator == FIELD_OP_UNARY_MINUS) {
  512. char *subexpr;
  513. strcat(expr, "-(");
  514. subexpr = expr_str(field->operands[0], ++level);
  515. if (!subexpr) {
  516. kfree(expr);
  517. return NULL;
  518. }
  519. strcat(expr, subexpr);
  520. strcat(expr, ")");
  521. kfree(subexpr);
  522. return expr;
  523. }
  524. expr_field_str(field->operands[0], expr);
  525. switch (field->operator) {
  526. case FIELD_OP_MINUS:
  527. strcat(expr, "-");
  528. break;
  529. case FIELD_OP_PLUS:
  530. strcat(expr, "+");
  531. break;
  532. default:
  533. kfree(expr);
  534. return NULL;
  535. }
  536. expr_field_str(field->operands[1], expr);
  537. return expr;
  538. }
  539. static int contains_operator(char *str)
  540. {
  541. enum field_op_id field_op = FIELD_OP_NONE;
  542. char *op;
  543. op = strpbrk(str, "+-");
  544. if (!op)
  545. return FIELD_OP_NONE;
  546. switch (*op) {
  547. case '-':
  548. if (*str == '-')
  549. field_op = FIELD_OP_UNARY_MINUS;
  550. else
  551. field_op = FIELD_OP_MINUS;
  552. break;
  553. case '+':
  554. field_op = FIELD_OP_PLUS;
  555. break;
  556. default:
  557. break;
  558. }
  559. return field_op;
  560. }
  561. static void destroy_hist_field(struct hist_field *hist_field,
  562. unsigned int level)
  563. {
  564. unsigned int i;
  565. if (level > 3)
  566. return;
  567. if (!hist_field)
  568. return;
  569. for (i = 0; i < HIST_FIELD_OPERANDS_MAX; i++)
  570. destroy_hist_field(hist_field->operands[i], level + 1);
  571. kfree(hist_field->var.name);
  572. kfree(hist_field->name);
  573. kfree(hist_field);
  574. }
  575. static struct hist_field *create_hist_field(struct hist_trigger_data *hist_data,
  576. struct ftrace_event_field *field,
  577. unsigned long flags,
  578. char *var_name)
  579. {
  580. struct hist_field *hist_field;
  581. if (field && is_function_field(field))
  582. return NULL;
  583. hist_field = kzalloc(sizeof(struct hist_field), GFP_KERNEL);
  584. if (!hist_field)
  585. return NULL;
  586. hist_field->hist_data = hist_data;
  587. if (flags & HIST_FIELD_FL_EXPR)
  588. goto out; /* caller will populate */
  589. if (flags & HIST_FIELD_FL_HITCOUNT) {
  590. hist_field->fn = hist_field_counter;
  591. goto out;
  592. }
  593. if (flags & HIST_FIELD_FL_STACKTRACE) {
  594. hist_field->fn = hist_field_none;
  595. goto out;
  596. }
  597. if (flags & HIST_FIELD_FL_LOG2) {
  598. unsigned long fl = flags & ~HIST_FIELD_FL_LOG2;
  599. hist_field->fn = hist_field_log2;
  600. hist_field->operands[0] = create_hist_field(hist_data, field, fl, NULL);
  601. hist_field->size = hist_field->operands[0]->size;
  602. goto out;
  603. }
  604. if (flags & HIST_FIELD_FL_TIMESTAMP) {
  605. hist_field->fn = hist_field_timestamp;
  606. hist_field->size = sizeof(u64);
  607. goto out;
  608. }
  609. if (WARN_ON_ONCE(!field))
  610. goto out;
  611. if (is_string_field(field)) {
  612. flags |= HIST_FIELD_FL_STRING;
  613. if (field->filter_type == FILTER_STATIC_STRING)
  614. hist_field->fn = hist_field_string;
  615. else if (field->filter_type == FILTER_DYN_STRING)
  616. hist_field->fn = hist_field_dynstring;
  617. else
  618. hist_field->fn = hist_field_pstring;
  619. } else {
  620. hist_field->fn = select_value_fn(field->size,
  621. field->is_signed);
  622. if (!hist_field->fn) {
  623. destroy_hist_field(hist_field, 0);
  624. return NULL;
  625. }
  626. }
  627. out:
  628. hist_field->field = field;
  629. hist_field->flags = flags;
  630. if (var_name) {
  631. hist_field->var.name = kstrdup(var_name, GFP_KERNEL);
  632. if (!hist_field->var.name)
  633. goto free;
  634. }
  635. return hist_field;
  636. free:
  637. destroy_hist_field(hist_field, 0);
  638. return NULL;
  639. }
  640. static void destroy_hist_fields(struct hist_trigger_data *hist_data)
  641. {
  642. unsigned int i;
  643. for (i = 0; i < HIST_FIELDS_MAX; i++) {
  644. if (hist_data->fields[i]) {
  645. destroy_hist_field(hist_data->fields[i], 0);
  646. hist_data->fields[i] = NULL;
  647. }
  648. }
  649. }
  650. static struct ftrace_event_field *
  651. parse_field(struct hist_trigger_data *hist_data, struct trace_event_file *file,
  652. char *field_str, unsigned long *flags)
  653. {
  654. struct ftrace_event_field *field = NULL;
  655. char *field_name, *modifier, *str;
  656. modifier = str = kstrdup(field_str, GFP_KERNEL);
  657. if (!modifier)
  658. return ERR_PTR(-ENOMEM);
  659. field_name = strsep(&modifier, ".");
  660. if (modifier) {
  661. if (strcmp(modifier, "hex") == 0)
  662. *flags |= HIST_FIELD_FL_HEX;
  663. else if (strcmp(modifier, "sym") == 0)
  664. *flags |= HIST_FIELD_FL_SYM;
  665. else if (strcmp(modifier, "sym-offset") == 0)
  666. *flags |= HIST_FIELD_FL_SYM_OFFSET;
  667. else if ((strcmp(modifier, "execname") == 0) &&
  668. (strcmp(field_name, "common_pid") == 0))
  669. *flags |= HIST_FIELD_FL_EXECNAME;
  670. else if (strcmp(modifier, "syscall") == 0)
  671. *flags |= HIST_FIELD_FL_SYSCALL;
  672. else if (strcmp(modifier, "log2") == 0)
  673. *flags |= HIST_FIELD_FL_LOG2;
  674. else if (strcmp(modifier, "usecs") == 0)
  675. *flags |= HIST_FIELD_FL_TIMESTAMP_USECS;
  676. else {
  677. field = ERR_PTR(-EINVAL);
  678. goto out;
  679. }
  680. }
  681. if (strcmp(field_name, "common_timestamp") == 0) {
  682. *flags |= HIST_FIELD_FL_TIMESTAMP;
  683. hist_data->enable_timestamps = true;
  684. if (*flags & HIST_FIELD_FL_TIMESTAMP_USECS)
  685. hist_data->attrs->ts_in_usecs = true;
  686. } else {
  687. field = trace_find_event_field(file->event_call, field_name);
  688. if (!field || !field->size) {
  689. field = ERR_PTR(-EINVAL);
  690. goto out;
  691. }
  692. }
  693. out:
  694. kfree(str);
  695. return field;
  696. }
  697. static struct hist_field *parse_atom(struct hist_trigger_data *hist_data,
  698. struct trace_event_file *file, char *str,
  699. unsigned long *flags, char *var_name)
  700. {
  701. struct ftrace_event_field *field = NULL;
  702. struct hist_field *hist_field = NULL;
  703. int ret = 0;
  704. field = parse_field(hist_data, file, str, flags);
  705. if (IS_ERR(field)) {
  706. ret = PTR_ERR(field);
  707. goto out;
  708. }
  709. hist_field = create_hist_field(hist_data, field, *flags, var_name);
  710. if (!hist_field) {
  711. ret = -ENOMEM;
  712. goto out;
  713. }
  714. return hist_field;
  715. out:
  716. return ERR_PTR(ret);
  717. }
  718. static struct hist_field *parse_expr(struct hist_trigger_data *hist_data,
  719. struct trace_event_file *file,
  720. char *str, unsigned long flags,
  721. char *var_name, unsigned int level);
  722. static struct hist_field *parse_unary(struct hist_trigger_data *hist_data,
  723. struct trace_event_file *file,
  724. char *str, unsigned long flags,
  725. char *var_name, unsigned int level)
  726. {
  727. struct hist_field *operand1, *expr = NULL;
  728. unsigned long operand_flags;
  729. int ret = 0;
  730. char *s;
  731. /* we support only -(xxx) i.e. explicit parens required */
  732. if (level > 3) {
  733. ret = -EINVAL;
  734. goto free;
  735. }
  736. str++; /* skip leading '-' */
  737. s = strchr(str, '(');
  738. if (s)
  739. str++;
  740. else {
  741. ret = -EINVAL;
  742. goto free;
  743. }
  744. s = strrchr(str, ')');
  745. if (s)
  746. *s = '\0';
  747. else {
  748. ret = -EINVAL; /* no closing ')' */
  749. goto free;
  750. }
  751. flags |= HIST_FIELD_FL_EXPR;
  752. expr = create_hist_field(hist_data, NULL, flags, var_name);
  753. if (!expr) {
  754. ret = -ENOMEM;
  755. goto free;
  756. }
  757. operand_flags = 0;
  758. operand1 = parse_expr(hist_data, file, str, operand_flags, NULL, ++level);
  759. if (IS_ERR(operand1)) {
  760. ret = PTR_ERR(operand1);
  761. goto free;
  762. }
  763. expr->flags |= operand1->flags &
  764. (HIST_FIELD_FL_TIMESTAMP | HIST_FIELD_FL_TIMESTAMP_USECS);
  765. expr->fn = hist_field_unary_minus;
  766. expr->operands[0] = operand1;
  767. expr->operator = FIELD_OP_UNARY_MINUS;
  768. expr->name = expr_str(expr, 0);
  769. return expr;
  770. free:
  771. destroy_hist_field(expr, 0);
  772. return ERR_PTR(ret);
  773. }
  774. static int check_expr_operands(struct hist_field *operand1,
  775. struct hist_field *operand2)
  776. {
  777. unsigned long operand1_flags = operand1->flags;
  778. unsigned long operand2_flags = operand2->flags;
  779. if ((operand1_flags & HIST_FIELD_FL_TIMESTAMP_USECS) !=
  780. (operand2_flags & HIST_FIELD_FL_TIMESTAMP_USECS))
  781. return -EINVAL;
  782. return 0;
  783. }
  784. static struct hist_field *parse_expr(struct hist_trigger_data *hist_data,
  785. struct trace_event_file *file,
  786. char *str, unsigned long flags,
  787. char *var_name, unsigned int level)
  788. {
  789. struct hist_field *operand1 = NULL, *operand2 = NULL, *expr = NULL;
  790. unsigned long operand_flags;
  791. int field_op, ret = -EINVAL;
  792. char *sep, *operand1_str;
  793. if (level > 3)
  794. return ERR_PTR(-EINVAL);
  795. field_op = contains_operator(str);
  796. if (field_op == FIELD_OP_NONE)
  797. return parse_atom(hist_data, file, str, &flags, var_name);
  798. if (field_op == FIELD_OP_UNARY_MINUS)
  799. return parse_unary(hist_data, file, str, flags, var_name, ++level);
  800. switch (field_op) {
  801. case FIELD_OP_MINUS:
  802. sep = "-";
  803. break;
  804. case FIELD_OP_PLUS:
  805. sep = "+";
  806. break;
  807. default:
  808. goto free;
  809. }
  810. operand1_str = strsep(&str, sep);
  811. if (!operand1_str || !str)
  812. goto free;
  813. operand_flags = 0;
  814. operand1 = parse_atom(hist_data, file, operand1_str,
  815. &operand_flags, NULL);
  816. if (IS_ERR(operand1)) {
  817. ret = PTR_ERR(operand1);
  818. operand1 = NULL;
  819. goto free;
  820. }
  821. /* rest of string could be another expression e.g. b+c in a+b+c */
  822. operand_flags = 0;
  823. operand2 = parse_expr(hist_data, file, str, operand_flags, NULL, ++level);
  824. if (IS_ERR(operand2)) {
  825. ret = PTR_ERR(operand2);
  826. operand2 = NULL;
  827. goto free;
  828. }
  829. ret = check_expr_operands(operand1, operand2);
  830. if (ret)
  831. goto free;
  832. flags |= HIST_FIELD_FL_EXPR;
  833. flags |= operand1->flags &
  834. (HIST_FIELD_FL_TIMESTAMP | HIST_FIELD_FL_TIMESTAMP_USECS);
  835. expr = create_hist_field(hist_data, NULL, flags, var_name);
  836. if (!expr) {
  837. ret = -ENOMEM;
  838. goto free;
  839. }
  840. expr->operands[0] = operand1;
  841. expr->operands[1] = operand2;
  842. expr->operator = field_op;
  843. expr->name = expr_str(expr, 0);
  844. switch (field_op) {
  845. case FIELD_OP_MINUS:
  846. expr->fn = hist_field_minus;
  847. break;
  848. case FIELD_OP_PLUS:
  849. expr->fn = hist_field_plus;
  850. break;
  851. default:
  852. goto free;
  853. }
  854. return expr;
  855. free:
  856. destroy_hist_field(operand1, 0);
  857. destroy_hist_field(operand2, 0);
  858. destroy_hist_field(expr, 0);
  859. return ERR_PTR(ret);
  860. }
  861. static int create_hitcount_val(struct hist_trigger_data *hist_data)
  862. {
  863. hist_data->fields[HITCOUNT_IDX] =
  864. create_hist_field(hist_data, NULL, HIST_FIELD_FL_HITCOUNT, NULL);
  865. if (!hist_data->fields[HITCOUNT_IDX])
  866. return -ENOMEM;
  867. hist_data->n_vals++;
  868. hist_data->n_fields++;
  869. if (WARN_ON(hist_data->n_vals > TRACING_MAP_VALS_MAX))
  870. return -EINVAL;
  871. return 0;
  872. }
  873. static int __create_val_field(struct hist_trigger_data *hist_data,
  874. unsigned int val_idx,
  875. struct trace_event_file *file,
  876. char *var_name, char *field_str,
  877. unsigned long flags)
  878. {
  879. struct hist_field *hist_field;
  880. int ret = 0;
  881. hist_field = parse_expr(hist_data, file, field_str, flags, var_name, 0);
  882. if (IS_ERR(hist_field)) {
  883. ret = PTR_ERR(hist_field);
  884. goto out;
  885. }
  886. hist_data->fields[val_idx] = hist_field;
  887. ++hist_data->n_vals;
  888. ++hist_data->n_fields;
  889. if (WARN_ON(hist_data->n_vals > TRACING_MAP_VALS_MAX + TRACING_MAP_VARS_MAX))
  890. ret = -EINVAL;
  891. out:
  892. return ret;
  893. }
  894. static int create_val_field(struct hist_trigger_data *hist_data,
  895. unsigned int val_idx,
  896. struct trace_event_file *file,
  897. char *field_str)
  898. {
  899. if (WARN_ON(val_idx >= TRACING_MAP_VALS_MAX))
  900. return -EINVAL;
  901. return __create_val_field(hist_data, val_idx, file, NULL, field_str, 0);
  902. }
  903. static int create_var_field(struct hist_trigger_data *hist_data,
  904. unsigned int val_idx,
  905. struct trace_event_file *file,
  906. char *var_name, char *expr_str)
  907. {
  908. unsigned long flags = 0;
  909. if (WARN_ON(val_idx >= TRACING_MAP_VALS_MAX + TRACING_MAP_VARS_MAX))
  910. return -EINVAL;
  911. if (find_var(hist_data, file, var_name) && !hist_data->remove) {
  912. return -EINVAL;
  913. }
  914. flags |= HIST_FIELD_FL_VAR;
  915. hist_data->n_vars++;
  916. if (WARN_ON(hist_data->n_vars > TRACING_MAP_VARS_MAX))
  917. return -EINVAL;
  918. return __create_val_field(hist_data, val_idx, file, var_name, expr_str, flags);
  919. }
  920. static int create_val_fields(struct hist_trigger_data *hist_data,
  921. struct trace_event_file *file)
  922. {
  923. char *fields_str, *field_str;
  924. unsigned int i, j = 1;
  925. int ret;
  926. ret = create_hitcount_val(hist_data);
  927. if (ret)
  928. goto out;
  929. fields_str = hist_data->attrs->vals_str;
  930. if (!fields_str)
  931. goto out;
  932. strsep(&fields_str, "=");
  933. if (!fields_str)
  934. goto out;
  935. for (i = 0, j = 1; i < TRACING_MAP_VALS_MAX &&
  936. j < TRACING_MAP_VALS_MAX; i++) {
  937. field_str = strsep(&fields_str, ",");
  938. if (!field_str)
  939. break;
  940. if (strcmp(field_str, "hitcount") == 0)
  941. continue;
  942. ret = create_val_field(hist_data, j++, file, field_str);
  943. if (ret)
  944. goto out;
  945. }
  946. if (fields_str && (strcmp(fields_str, "hitcount") != 0))
  947. ret = -EINVAL;
  948. out:
  949. return ret;
  950. }
  951. static int create_key_field(struct hist_trigger_data *hist_data,
  952. unsigned int key_idx,
  953. unsigned int key_offset,
  954. struct trace_event_file *file,
  955. char *field_str)
  956. {
  957. struct hist_field *hist_field = NULL;
  958. unsigned long flags = 0;
  959. unsigned int key_size;
  960. int ret = 0;
  961. if (WARN_ON(key_idx >= HIST_FIELDS_MAX))
  962. return -EINVAL;
  963. flags |= HIST_FIELD_FL_KEY;
  964. if (strcmp(field_str, "stacktrace") == 0) {
  965. flags |= HIST_FIELD_FL_STACKTRACE;
  966. key_size = sizeof(unsigned long) * HIST_STACKTRACE_DEPTH;
  967. hist_field = create_hist_field(hist_data, NULL, flags, NULL);
  968. } else {
  969. hist_field = parse_expr(hist_data, file, field_str, flags,
  970. NULL, 0);
  971. if (IS_ERR(hist_field)) {
  972. ret = PTR_ERR(hist_field);
  973. goto out;
  974. }
  975. key_size = hist_field->size;
  976. }
  977. hist_data->fields[key_idx] = hist_field;
  978. key_size = ALIGN(key_size, sizeof(u64));
  979. hist_data->fields[key_idx]->size = key_size;
  980. hist_data->fields[key_idx]->offset = key_offset;
  981. hist_data->key_size += key_size;
  982. if (hist_data->key_size > HIST_KEY_SIZE_MAX) {
  983. ret = -EINVAL;
  984. goto out;
  985. }
  986. hist_data->n_keys++;
  987. hist_data->n_fields++;
  988. if (WARN_ON(hist_data->n_keys > TRACING_MAP_KEYS_MAX))
  989. return -EINVAL;
  990. ret = key_size;
  991. out:
  992. return ret;
  993. }
  994. static int create_key_fields(struct hist_trigger_data *hist_data,
  995. struct trace_event_file *file)
  996. {
  997. unsigned int i, key_offset = 0, n_vals = hist_data->n_vals;
  998. char *fields_str, *field_str;
  999. int ret = -EINVAL;
  1000. fields_str = hist_data->attrs->keys_str;
  1001. if (!fields_str)
  1002. goto out;
  1003. strsep(&fields_str, "=");
  1004. if (!fields_str)
  1005. goto out;
  1006. for (i = n_vals; i < n_vals + TRACING_MAP_KEYS_MAX; i++) {
  1007. field_str = strsep(&fields_str, ",");
  1008. if (!field_str)
  1009. break;
  1010. ret = create_key_field(hist_data, i, key_offset,
  1011. file, field_str);
  1012. if (ret < 0)
  1013. goto out;
  1014. key_offset += ret;
  1015. }
  1016. if (fields_str) {
  1017. ret = -EINVAL;
  1018. goto out;
  1019. }
  1020. ret = 0;
  1021. out:
  1022. return ret;
  1023. }
  1024. static int create_var_fields(struct hist_trigger_data *hist_data,
  1025. struct trace_event_file *file)
  1026. {
  1027. unsigned int i, j = hist_data->n_vals;
  1028. int ret = 0;
  1029. unsigned int n_vars = hist_data->attrs->var_defs.n_vars;
  1030. for (i = 0; i < n_vars; i++) {
  1031. char *var_name = hist_data->attrs->var_defs.name[i];
  1032. char *expr = hist_data->attrs->var_defs.expr[i];
  1033. ret = create_var_field(hist_data, j++, file, var_name, expr);
  1034. if (ret)
  1035. goto out;
  1036. }
  1037. out:
  1038. return ret;
  1039. }
  1040. static void free_var_defs(struct hist_trigger_data *hist_data)
  1041. {
  1042. unsigned int i;
  1043. for (i = 0; i < hist_data->attrs->var_defs.n_vars; i++) {
  1044. kfree(hist_data->attrs->var_defs.name[i]);
  1045. kfree(hist_data->attrs->var_defs.expr[i]);
  1046. }
  1047. hist_data->attrs->var_defs.n_vars = 0;
  1048. }
  1049. static int parse_var_defs(struct hist_trigger_data *hist_data)
  1050. {
  1051. char *s, *str, *var_name, *field_str;
  1052. unsigned int i, j, n_vars = 0;
  1053. int ret = 0;
  1054. for (i = 0; i < hist_data->attrs->n_assignments; i++) {
  1055. str = hist_data->attrs->assignment_str[i];
  1056. for (j = 0; j < TRACING_MAP_VARS_MAX; j++) {
  1057. field_str = strsep(&str, ",");
  1058. if (!field_str)
  1059. break;
  1060. var_name = strsep(&field_str, "=");
  1061. if (!var_name || !field_str) {
  1062. ret = -EINVAL;
  1063. goto free;
  1064. }
  1065. if (n_vars == TRACING_MAP_VARS_MAX) {
  1066. ret = -EINVAL;
  1067. goto free;
  1068. }
  1069. s = kstrdup(var_name, GFP_KERNEL);
  1070. if (!s) {
  1071. ret = -ENOMEM;
  1072. goto free;
  1073. }
  1074. hist_data->attrs->var_defs.name[n_vars] = s;
  1075. s = kstrdup(field_str, GFP_KERNEL);
  1076. if (!s) {
  1077. kfree(hist_data->attrs->var_defs.name[n_vars]);
  1078. ret = -ENOMEM;
  1079. goto free;
  1080. }
  1081. hist_data->attrs->var_defs.expr[n_vars++] = s;
  1082. hist_data->attrs->var_defs.n_vars = n_vars;
  1083. }
  1084. }
  1085. return ret;
  1086. free:
  1087. free_var_defs(hist_data);
  1088. return ret;
  1089. }
  1090. static int create_hist_fields(struct hist_trigger_data *hist_data,
  1091. struct trace_event_file *file)
  1092. {
  1093. int ret;
  1094. ret = parse_var_defs(hist_data);
  1095. if (ret)
  1096. goto out;
  1097. ret = create_val_fields(hist_data, file);
  1098. if (ret)
  1099. goto out;
  1100. ret = create_var_fields(hist_data, file);
  1101. if (ret)
  1102. goto out;
  1103. ret = create_key_fields(hist_data, file);
  1104. if (ret)
  1105. goto out;
  1106. out:
  1107. free_var_defs(hist_data);
  1108. return ret;
  1109. }
  1110. static int is_descending(const char *str)
  1111. {
  1112. if (!str)
  1113. return 0;
  1114. if (strcmp(str, "descending") == 0)
  1115. return 1;
  1116. if (strcmp(str, "ascending") == 0)
  1117. return 0;
  1118. return -EINVAL;
  1119. }
  1120. static int create_sort_keys(struct hist_trigger_data *hist_data)
  1121. {
  1122. char *fields_str = hist_data->attrs->sort_key_str;
  1123. struct tracing_map_sort_key *sort_key;
  1124. int descending, ret = 0;
  1125. unsigned int i, j, k;
  1126. hist_data->n_sort_keys = 1; /* we always have at least one, hitcount */
  1127. if (!fields_str)
  1128. goto out;
  1129. strsep(&fields_str, "=");
  1130. if (!fields_str) {
  1131. ret = -EINVAL;
  1132. goto out;
  1133. }
  1134. for (i = 0; i < TRACING_MAP_SORT_KEYS_MAX; i++) {
  1135. struct hist_field *hist_field;
  1136. char *field_str, *field_name;
  1137. const char *test_name;
  1138. sort_key = &hist_data->sort_keys[i];
  1139. field_str = strsep(&fields_str, ",");
  1140. if (!field_str) {
  1141. if (i == 0)
  1142. ret = -EINVAL;
  1143. break;
  1144. }
  1145. if ((i == TRACING_MAP_SORT_KEYS_MAX - 1) && fields_str) {
  1146. ret = -EINVAL;
  1147. break;
  1148. }
  1149. field_name = strsep(&field_str, ".");
  1150. if (!field_name) {
  1151. ret = -EINVAL;
  1152. break;
  1153. }
  1154. if (strcmp(field_name, "hitcount") == 0) {
  1155. descending = is_descending(field_str);
  1156. if (descending < 0) {
  1157. ret = descending;
  1158. break;
  1159. }
  1160. sort_key->descending = descending;
  1161. continue;
  1162. }
  1163. for (j = 1, k = 1; j < hist_data->n_fields; j++) {
  1164. unsigned int idx;
  1165. hist_field = hist_data->fields[j];
  1166. if (hist_field->flags & HIST_FIELD_FL_VAR)
  1167. continue;
  1168. idx = k++;
  1169. test_name = hist_field_name(hist_field, 0);
  1170. if (strcmp(field_name, test_name) == 0) {
  1171. sort_key->field_idx = idx;
  1172. descending = is_descending(field_str);
  1173. if (descending < 0) {
  1174. ret = descending;
  1175. goto out;
  1176. }
  1177. sort_key->descending = descending;
  1178. break;
  1179. }
  1180. }
  1181. if (j == hist_data->n_fields) {
  1182. ret = -EINVAL;
  1183. break;
  1184. }
  1185. }
  1186. hist_data->n_sort_keys = i;
  1187. out:
  1188. return ret;
  1189. }
  1190. static void destroy_hist_data(struct hist_trigger_data *hist_data)
  1191. {
  1192. destroy_hist_trigger_attrs(hist_data->attrs);
  1193. destroy_hist_fields(hist_data);
  1194. tracing_map_destroy(hist_data->map);
  1195. kfree(hist_data);
  1196. }
  1197. static int create_tracing_map_fields(struct hist_trigger_data *hist_data)
  1198. {
  1199. struct tracing_map *map = hist_data->map;
  1200. struct ftrace_event_field *field;
  1201. struct hist_field *hist_field;
  1202. int i, idx;
  1203. for_each_hist_field(i, hist_data) {
  1204. hist_field = hist_data->fields[i];
  1205. if (hist_field->flags & HIST_FIELD_FL_KEY) {
  1206. tracing_map_cmp_fn_t cmp_fn;
  1207. field = hist_field->field;
  1208. if (hist_field->flags & HIST_FIELD_FL_STACKTRACE)
  1209. cmp_fn = tracing_map_cmp_none;
  1210. else if (!field)
  1211. cmp_fn = tracing_map_cmp_num(hist_field->size,
  1212. hist_field->is_signed);
  1213. else if (is_string_field(field))
  1214. cmp_fn = tracing_map_cmp_string;
  1215. else
  1216. cmp_fn = tracing_map_cmp_num(field->size,
  1217. field->is_signed);
  1218. idx = tracing_map_add_key_field(map,
  1219. hist_field->offset,
  1220. cmp_fn);
  1221. } else if (!(hist_field->flags & HIST_FIELD_FL_VAR))
  1222. idx = tracing_map_add_sum_field(map);
  1223. if (idx < 0)
  1224. return idx;
  1225. if (hist_field->flags & HIST_FIELD_FL_VAR) {
  1226. idx = tracing_map_add_var(map);
  1227. if (idx < 0)
  1228. return idx;
  1229. hist_field->var.idx = idx;
  1230. hist_field->var.hist_data = hist_data;
  1231. }
  1232. }
  1233. return 0;
  1234. }
  1235. static struct hist_trigger_data *
  1236. create_hist_data(unsigned int map_bits,
  1237. struct hist_trigger_attrs *attrs,
  1238. struct trace_event_file *file,
  1239. bool remove)
  1240. {
  1241. const struct tracing_map_ops *map_ops = NULL;
  1242. struct hist_trigger_data *hist_data;
  1243. int ret = 0;
  1244. hist_data = kzalloc(sizeof(*hist_data), GFP_KERNEL);
  1245. if (!hist_data)
  1246. return ERR_PTR(-ENOMEM);
  1247. hist_data->attrs = attrs;
  1248. hist_data->remove = remove;
  1249. ret = create_hist_fields(hist_data, file);
  1250. if (ret)
  1251. goto free;
  1252. ret = create_sort_keys(hist_data);
  1253. if (ret)
  1254. goto free;
  1255. map_ops = &hist_trigger_elt_data_ops;
  1256. hist_data->map = tracing_map_create(map_bits, hist_data->key_size,
  1257. map_ops, hist_data);
  1258. if (IS_ERR(hist_data->map)) {
  1259. ret = PTR_ERR(hist_data->map);
  1260. hist_data->map = NULL;
  1261. goto free;
  1262. }
  1263. ret = create_tracing_map_fields(hist_data);
  1264. if (ret)
  1265. goto free;
  1266. ret = tracing_map_init(hist_data->map);
  1267. if (ret)
  1268. goto free;
  1269. hist_data->event_file = file;
  1270. out:
  1271. return hist_data;
  1272. free:
  1273. hist_data->attrs = NULL;
  1274. destroy_hist_data(hist_data);
  1275. hist_data = ERR_PTR(ret);
  1276. goto out;
  1277. }
  1278. static void hist_trigger_elt_update(struct hist_trigger_data *hist_data,
  1279. struct tracing_map_elt *elt, void *rec,
  1280. struct ring_buffer_event *rbe)
  1281. {
  1282. struct hist_field *hist_field;
  1283. unsigned int i, var_idx;
  1284. u64 hist_val;
  1285. for_each_hist_val_field(i, hist_data) {
  1286. hist_field = hist_data->fields[i];
  1287. hist_val = hist_field->fn(hist_field, rec, rbe);
  1288. if (hist_field->flags & HIST_FIELD_FL_VAR) {
  1289. var_idx = hist_field->var.idx;
  1290. tracing_map_set_var(elt, var_idx, hist_val);
  1291. continue;
  1292. }
  1293. tracing_map_update_sum(elt, i, hist_val);
  1294. }
  1295. for_each_hist_key_field(i, hist_data) {
  1296. hist_field = hist_data->fields[i];
  1297. if (hist_field->flags & HIST_FIELD_FL_VAR) {
  1298. hist_val = hist_field->fn(hist_field, rec, rbe);
  1299. var_idx = hist_field->var.idx;
  1300. tracing_map_set_var(elt, var_idx, hist_val);
  1301. }
  1302. }
  1303. }
  1304. static inline void add_to_key(char *compound_key, void *key,
  1305. struct hist_field *key_field, void *rec)
  1306. {
  1307. size_t size = key_field->size;
  1308. if (key_field->flags & HIST_FIELD_FL_STRING) {
  1309. struct ftrace_event_field *field;
  1310. field = key_field->field;
  1311. if (field->filter_type == FILTER_DYN_STRING)
  1312. size = *(u32 *)(rec + field->offset) >> 16;
  1313. else if (field->filter_type == FILTER_PTR_STRING)
  1314. size = strlen(key);
  1315. else if (field->filter_type == FILTER_STATIC_STRING)
  1316. size = field->size;
  1317. /* ensure NULL-termination */
  1318. if (size > key_field->size - 1)
  1319. size = key_field->size - 1;
  1320. }
  1321. memcpy(compound_key + key_field->offset, key, size);
  1322. }
  1323. static void event_hist_trigger(struct event_trigger_data *data, void *rec,
  1324. struct ring_buffer_event *rbe)
  1325. {
  1326. struct hist_trigger_data *hist_data = data->private_data;
  1327. bool use_compound_key = (hist_data->n_keys > 1);
  1328. unsigned long entries[HIST_STACKTRACE_DEPTH];
  1329. char compound_key[HIST_KEY_SIZE_MAX];
  1330. struct stack_trace stacktrace;
  1331. struct hist_field *key_field;
  1332. struct tracing_map_elt *elt;
  1333. u64 field_contents;
  1334. void *key = NULL;
  1335. unsigned int i;
  1336. memset(compound_key, 0, hist_data->key_size);
  1337. for_each_hist_key_field(i, hist_data) {
  1338. key_field = hist_data->fields[i];
  1339. if (key_field->flags & HIST_FIELD_FL_STACKTRACE) {
  1340. stacktrace.max_entries = HIST_STACKTRACE_DEPTH;
  1341. stacktrace.entries = entries;
  1342. stacktrace.nr_entries = 0;
  1343. stacktrace.skip = HIST_STACKTRACE_SKIP;
  1344. memset(stacktrace.entries, 0, HIST_STACKTRACE_SIZE);
  1345. save_stack_trace(&stacktrace);
  1346. key = entries;
  1347. } else {
  1348. field_contents = key_field->fn(key_field, rec, rbe);
  1349. if (key_field->flags & HIST_FIELD_FL_STRING) {
  1350. key = (void *)(unsigned long)field_contents;
  1351. use_compound_key = true;
  1352. } else
  1353. key = (void *)&field_contents;
  1354. }
  1355. if (use_compound_key)
  1356. add_to_key(compound_key, key, key_field, rec);
  1357. }
  1358. if (use_compound_key)
  1359. key = compound_key;
  1360. elt = tracing_map_insert(hist_data->map, key);
  1361. if (elt)
  1362. hist_trigger_elt_update(hist_data, elt, rec, rbe);
  1363. }
  1364. static void hist_trigger_stacktrace_print(struct seq_file *m,
  1365. unsigned long *stacktrace_entries,
  1366. unsigned int max_entries)
  1367. {
  1368. char str[KSYM_SYMBOL_LEN];
  1369. unsigned int spaces = 8;
  1370. unsigned int i;
  1371. for (i = 0; i < max_entries; i++) {
  1372. if (stacktrace_entries[i] == ULONG_MAX)
  1373. return;
  1374. seq_printf(m, "%*c", 1 + spaces, ' ');
  1375. sprint_symbol(str, stacktrace_entries[i]);
  1376. seq_printf(m, "%s\n", str);
  1377. }
  1378. }
  1379. static void
  1380. hist_trigger_entry_print(struct seq_file *m,
  1381. struct hist_trigger_data *hist_data, void *key,
  1382. struct tracing_map_elt *elt)
  1383. {
  1384. struct hist_field *key_field;
  1385. char str[KSYM_SYMBOL_LEN];
  1386. bool multiline = false;
  1387. const char *field_name;
  1388. unsigned int i;
  1389. u64 uval;
  1390. seq_puts(m, "{ ");
  1391. for_each_hist_key_field(i, hist_data) {
  1392. key_field = hist_data->fields[i];
  1393. if (i > hist_data->n_vals)
  1394. seq_puts(m, ", ");
  1395. field_name = hist_field_name(key_field, 0);
  1396. if (key_field->flags & HIST_FIELD_FL_HEX) {
  1397. uval = *(u64 *)(key + key_field->offset);
  1398. seq_printf(m, "%s: %llx", field_name, uval);
  1399. } else if (key_field->flags & HIST_FIELD_FL_SYM) {
  1400. uval = *(u64 *)(key + key_field->offset);
  1401. sprint_symbol_no_offset(str, uval);
  1402. seq_printf(m, "%s: [%llx] %-45s", field_name,
  1403. uval, str);
  1404. } else if (key_field->flags & HIST_FIELD_FL_SYM_OFFSET) {
  1405. uval = *(u64 *)(key + key_field->offset);
  1406. sprint_symbol(str, uval);
  1407. seq_printf(m, "%s: [%llx] %-55s", field_name,
  1408. uval, str);
  1409. } else if (key_field->flags & HIST_FIELD_FL_EXECNAME) {
  1410. struct hist_elt_data *elt_data = elt->private_data;
  1411. char *comm;
  1412. if (WARN_ON_ONCE(!elt_data))
  1413. return;
  1414. comm = elt_data->comm;
  1415. uval = *(u64 *)(key + key_field->offset);
  1416. seq_printf(m, "%s: %-16s[%10llu]", field_name,
  1417. comm, uval);
  1418. } else if (key_field->flags & HIST_FIELD_FL_SYSCALL) {
  1419. const char *syscall_name;
  1420. uval = *(u64 *)(key + key_field->offset);
  1421. syscall_name = get_syscall_name(uval);
  1422. if (!syscall_name)
  1423. syscall_name = "unknown_syscall";
  1424. seq_printf(m, "%s: %-30s[%3llu]", field_name,
  1425. syscall_name, uval);
  1426. } else if (key_field->flags & HIST_FIELD_FL_STACKTRACE) {
  1427. seq_puts(m, "stacktrace:\n");
  1428. hist_trigger_stacktrace_print(m,
  1429. key + key_field->offset,
  1430. HIST_STACKTRACE_DEPTH);
  1431. multiline = true;
  1432. } else if (key_field->flags & HIST_FIELD_FL_LOG2) {
  1433. seq_printf(m, "%s: ~ 2^%-2llu", field_name,
  1434. *(u64 *)(key + key_field->offset));
  1435. } else if (key_field->flags & HIST_FIELD_FL_STRING) {
  1436. seq_printf(m, "%s: %-50s", field_name,
  1437. (char *)(key + key_field->offset));
  1438. } else {
  1439. uval = *(u64 *)(key + key_field->offset);
  1440. seq_printf(m, "%s: %10llu", field_name, uval);
  1441. }
  1442. }
  1443. if (!multiline)
  1444. seq_puts(m, " ");
  1445. seq_puts(m, "}");
  1446. seq_printf(m, " hitcount: %10llu",
  1447. tracing_map_read_sum(elt, HITCOUNT_IDX));
  1448. for (i = 1; i < hist_data->n_vals; i++) {
  1449. field_name = hist_field_name(hist_data->fields[i], 0);
  1450. if (hist_data->fields[i]->flags & HIST_FIELD_FL_VAR ||
  1451. hist_data->fields[i]->flags & HIST_FIELD_FL_EXPR)
  1452. continue;
  1453. if (hist_data->fields[i]->flags & HIST_FIELD_FL_HEX) {
  1454. seq_printf(m, " %s: %10llx", field_name,
  1455. tracing_map_read_sum(elt, i));
  1456. } else {
  1457. seq_printf(m, " %s: %10llu", field_name,
  1458. tracing_map_read_sum(elt, i));
  1459. }
  1460. }
  1461. seq_puts(m, "\n");
  1462. }
  1463. static int print_entries(struct seq_file *m,
  1464. struct hist_trigger_data *hist_data)
  1465. {
  1466. struct tracing_map_sort_entry **sort_entries = NULL;
  1467. struct tracing_map *map = hist_data->map;
  1468. int i, n_entries;
  1469. n_entries = tracing_map_sort_entries(map, hist_data->sort_keys,
  1470. hist_data->n_sort_keys,
  1471. &sort_entries);
  1472. if (n_entries < 0)
  1473. return n_entries;
  1474. for (i = 0; i < n_entries; i++)
  1475. hist_trigger_entry_print(m, hist_data,
  1476. sort_entries[i]->key,
  1477. sort_entries[i]->elt);
  1478. tracing_map_destroy_sort_entries(sort_entries, n_entries);
  1479. return n_entries;
  1480. }
  1481. static void hist_trigger_show(struct seq_file *m,
  1482. struct event_trigger_data *data, int n)
  1483. {
  1484. struct hist_trigger_data *hist_data;
  1485. int n_entries;
  1486. if (n > 0)
  1487. seq_puts(m, "\n\n");
  1488. seq_puts(m, "# event histogram\n#\n# trigger info: ");
  1489. data->ops->print(m, data->ops, data);
  1490. seq_puts(m, "#\n\n");
  1491. hist_data = data->private_data;
  1492. n_entries = print_entries(m, hist_data);
  1493. if (n_entries < 0)
  1494. n_entries = 0;
  1495. seq_printf(m, "\nTotals:\n Hits: %llu\n Entries: %u\n Dropped: %llu\n",
  1496. (u64)atomic64_read(&hist_data->map->hits),
  1497. n_entries, (u64)atomic64_read(&hist_data->map->drops));
  1498. }
  1499. static int hist_show(struct seq_file *m, void *v)
  1500. {
  1501. struct event_trigger_data *data;
  1502. struct trace_event_file *event_file;
  1503. int n = 0, ret = 0;
  1504. mutex_lock(&event_mutex);
  1505. event_file = event_file_data(m->private);
  1506. if (unlikely(!event_file)) {
  1507. ret = -ENODEV;
  1508. goto out_unlock;
  1509. }
  1510. list_for_each_entry_rcu(data, &event_file->triggers, list) {
  1511. if (data->cmd_ops->trigger_type == ETT_EVENT_HIST)
  1512. hist_trigger_show(m, data, n++);
  1513. }
  1514. out_unlock:
  1515. mutex_unlock(&event_mutex);
  1516. return ret;
  1517. }
  1518. static int event_hist_open(struct inode *inode, struct file *file)
  1519. {
  1520. return single_open(file, hist_show, file);
  1521. }
  1522. const struct file_operations event_hist_fops = {
  1523. .open = event_hist_open,
  1524. .read = seq_read,
  1525. .llseek = seq_lseek,
  1526. .release = single_release,
  1527. };
  1528. static void hist_field_print(struct seq_file *m, struct hist_field *hist_field)
  1529. {
  1530. const char *field_name = hist_field_name(hist_field, 0);
  1531. if (hist_field->var.name)
  1532. seq_printf(m, "%s=", hist_field->var.name);
  1533. if (hist_field->flags & HIST_FIELD_FL_TIMESTAMP)
  1534. seq_puts(m, "common_timestamp");
  1535. else if (field_name)
  1536. seq_printf(m, "%s", field_name);
  1537. if (hist_field->flags) {
  1538. const char *flags_str = get_hist_field_flags(hist_field);
  1539. if (flags_str)
  1540. seq_printf(m, ".%s", flags_str);
  1541. }
  1542. }
  1543. static int event_hist_trigger_print(struct seq_file *m,
  1544. struct event_trigger_ops *ops,
  1545. struct event_trigger_data *data)
  1546. {
  1547. struct hist_trigger_data *hist_data = data->private_data;
  1548. struct hist_field *field;
  1549. bool have_var = false;
  1550. unsigned int i;
  1551. seq_puts(m, "hist:");
  1552. if (data->name)
  1553. seq_printf(m, "%s:", data->name);
  1554. seq_puts(m, "keys=");
  1555. for_each_hist_key_field(i, hist_data) {
  1556. field = hist_data->fields[i];
  1557. if (i > hist_data->n_vals)
  1558. seq_puts(m, ",");
  1559. if (field->flags & HIST_FIELD_FL_STACKTRACE)
  1560. seq_puts(m, "stacktrace");
  1561. else
  1562. hist_field_print(m, field);
  1563. }
  1564. seq_puts(m, ":vals=");
  1565. for_each_hist_val_field(i, hist_data) {
  1566. field = hist_data->fields[i];
  1567. if (field->flags & HIST_FIELD_FL_VAR) {
  1568. have_var = true;
  1569. continue;
  1570. }
  1571. if (i == HITCOUNT_IDX)
  1572. seq_puts(m, "hitcount");
  1573. else {
  1574. seq_puts(m, ",");
  1575. hist_field_print(m, field);
  1576. }
  1577. }
  1578. if (have_var) {
  1579. unsigned int n = 0;
  1580. seq_puts(m, ":");
  1581. for_each_hist_val_field(i, hist_data) {
  1582. field = hist_data->fields[i];
  1583. if (field->flags & HIST_FIELD_FL_VAR) {
  1584. if (n++)
  1585. seq_puts(m, ",");
  1586. hist_field_print(m, field);
  1587. }
  1588. }
  1589. }
  1590. seq_puts(m, ":sort=");
  1591. for (i = 0; i < hist_data->n_sort_keys; i++) {
  1592. struct tracing_map_sort_key *sort_key;
  1593. unsigned int idx, first_key_idx;
  1594. /* skip VAR vals */
  1595. first_key_idx = hist_data->n_vals - hist_data->n_vars;
  1596. sort_key = &hist_data->sort_keys[i];
  1597. idx = sort_key->field_idx;
  1598. if (WARN_ON(idx >= HIST_FIELDS_MAX))
  1599. return -EINVAL;
  1600. if (i > 0)
  1601. seq_puts(m, ",");
  1602. if (idx == HITCOUNT_IDX)
  1603. seq_puts(m, "hitcount");
  1604. else {
  1605. if (idx >= first_key_idx)
  1606. idx += hist_data->n_vars;
  1607. hist_field_print(m, hist_data->fields[idx]);
  1608. }
  1609. if (sort_key->descending)
  1610. seq_puts(m, ".descending");
  1611. }
  1612. seq_printf(m, ":size=%u", (1 << hist_data->map->map_bits));
  1613. if (data->filter_str)
  1614. seq_printf(m, " if %s", data->filter_str);
  1615. if (data->paused)
  1616. seq_puts(m, " [paused]");
  1617. else
  1618. seq_puts(m, " [active]");
  1619. seq_putc(m, '\n');
  1620. return 0;
  1621. }
  1622. static int event_hist_trigger_init(struct event_trigger_ops *ops,
  1623. struct event_trigger_data *data)
  1624. {
  1625. struct hist_trigger_data *hist_data = data->private_data;
  1626. if (!data->ref && hist_data->attrs->name)
  1627. save_named_trigger(hist_data->attrs->name, data);
  1628. data->ref++;
  1629. return 0;
  1630. }
  1631. static void event_hist_trigger_free(struct event_trigger_ops *ops,
  1632. struct event_trigger_data *data)
  1633. {
  1634. struct hist_trigger_data *hist_data = data->private_data;
  1635. if (WARN_ON_ONCE(data->ref <= 0))
  1636. return;
  1637. data->ref--;
  1638. if (!data->ref) {
  1639. if (data->name)
  1640. del_named_trigger(data);
  1641. trigger_data_free(data);
  1642. destroy_hist_data(hist_data);
  1643. }
  1644. }
  1645. static struct event_trigger_ops event_hist_trigger_ops = {
  1646. .func = event_hist_trigger,
  1647. .print = event_hist_trigger_print,
  1648. .init = event_hist_trigger_init,
  1649. .free = event_hist_trigger_free,
  1650. };
  1651. static int event_hist_trigger_named_init(struct event_trigger_ops *ops,
  1652. struct event_trigger_data *data)
  1653. {
  1654. data->ref++;
  1655. save_named_trigger(data->named_data->name, data);
  1656. event_hist_trigger_init(ops, data->named_data);
  1657. return 0;
  1658. }
  1659. static void event_hist_trigger_named_free(struct event_trigger_ops *ops,
  1660. struct event_trigger_data *data)
  1661. {
  1662. if (WARN_ON_ONCE(data->ref <= 0))
  1663. return;
  1664. event_hist_trigger_free(ops, data->named_data);
  1665. data->ref--;
  1666. if (!data->ref) {
  1667. del_named_trigger(data);
  1668. trigger_data_free(data);
  1669. }
  1670. }
  1671. static struct event_trigger_ops event_hist_trigger_named_ops = {
  1672. .func = event_hist_trigger,
  1673. .print = event_hist_trigger_print,
  1674. .init = event_hist_trigger_named_init,
  1675. .free = event_hist_trigger_named_free,
  1676. };
  1677. static struct event_trigger_ops *event_hist_get_trigger_ops(char *cmd,
  1678. char *param)
  1679. {
  1680. return &event_hist_trigger_ops;
  1681. }
  1682. static void hist_clear(struct event_trigger_data *data)
  1683. {
  1684. struct hist_trigger_data *hist_data = data->private_data;
  1685. if (data->name)
  1686. pause_named_trigger(data);
  1687. synchronize_sched();
  1688. tracing_map_clear(hist_data->map);
  1689. if (data->name)
  1690. unpause_named_trigger(data);
  1691. }
  1692. static bool compatible_field(struct ftrace_event_field *field,
  1693. struct ftrace_event_field *test_field)
  1694. {
  1695. if (field == test_field)
  1696. return true;
  1697. if (field == NULL || test_field == NULL)
  1698. return false;
  1699. if (strcmp(field->name, test_field->name) != 0)
  1700. return false;
  1701. if (strcmp(field->type, test_field->type) != 0)
  1702. return false;
  1703. if (field->size != test_field->size)
  1704. return false;
  1705. if (field->is_signed != test_field->is_signed)
  1706. return false;
  1707. return true;
  1708. }
  1709. static bool hist_trigger_match(struct event_trigger_data *data,
  1710. struct event_trigger_data *data_test,
  1711. struct event_trigger_data *named_data,
  1712. bool ignore_filter)
  1713. {
  1714. struct tracing_map_sort_key *sort_key, *sort_key_test;
  1715. struct hist_trigger_data *hist_data, *hist_data_test;
  1716. struct hist_field *key_field, *key_field_test;
  1717. unsigned int i;
  1718. if (named_data && (named_data != data_test) &&
  1719. (named_data != data_test->named_data))
  1720. return false;
  1721. if (!named_data && is_named_trigger(data_test))
  1722. return false;
  1723. hist_data = data->private_data;
  1724. hist_data_test = data_test->private_data;
  1725. if (hist_data->n_vals != hist_data_test->n_vals ||
  1726. hist_data->n_fields != hist_data_test->n_fields ||
  1727. hist_data->n_sort_keys != hist_data_test->n_sort_keys)
  1728. return false;
  1729. if (!ignore_filter) {
  1730. if ((data->filter_str && !data_test->filter_str) ||
  1731. (!data->filter_str && data_test->filter_str))
  1732. return false;
  1733. }
  1734. for_each_hist_field(i, hist_data) {
  1735. key_field = hist_data->fields[i];
  1736. key_field_test = hist_data_test->fields[i];
  1737. if (key_field->flags != key_field_test->flags)
  1738. return false;
  1739. if (!compatible_field(key_field->field, key_field_test->field))
  1740. return false;
  1741. if (key_field->offset != key_field_test->offset)
  1742. return false;
  1743. if (key_field->size != key_field_test->size)
  1744. return false;
  1745. if (key_field->is_signed != key_field_test->is_signed)
  1746. return false;
  1747. if (!!key_field->var.name != !!key_field_test->var.name)
  1748. return false;
  1749. if (key_field->var.name &&
  1750. strcmp(key_field->var.name, key_field_test->var.name) != 0)
  1751. return false;
  1752. }
  1753. for (i = 0; i < hist_data->n_sort_keys; i++) {
  1754. sort_key = &hist_data->sort_keys[i];
  1755. sort_key_test = &hist_data_test->sort_keys[i];
  1756. if (sort_key->field_idx != sort_key_test->field_idx ||
  1757. sort_key->descending != sort_key_test->descending)
  1758. return false;
  1759. }
  1760. if (!ignore_filter && data->filter_str &&
  1761. (strcmp(data->filter_str, data_test->filter_str) != 0))
  1762. return false;
  1763. return true;
  1764. }
  1765. static int hist_register_trigger(char *glob, struct event_trigger_ops *ops,
  1766. struct event_trigger_data *data,
  1767. struct trace_event_file *file)
  1768. {
  1769. struct hist_trigger_data *hist_data = data->private_data;
  1770. struct event_trigger_data *test, *named_data = NULL;
  1771. int ret = 0;
  1772. if (hist_data->attrs->name) {
  1773. named_data = find_named_trigger(hist_data->attrs->name);
  1774. if (named_data) {
  1775. if (!hist_trigger_match(data, named_data, named_data,
  1776. true)) {
  1777. ret = -EINVAL;
  1778. goto out;
  1779. }
  1780. }
  1781. }
  1782. if (hist_data->attrs->name && !named_data)
  1783. goto new;
  1784. list_for_each_entry_rcu(test, &file->triggers, list) {
  1785. if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
  1786. if (!hist_trigger_match(data, test, named_data, false))
  1787. continue;
  1788. if (hist_data->attrs->pause)
  1789. test->paused = true;
  1790. else if (hist_data->attrs->cont)
  1791. test->paused = false;
  1792. else if (hist_data->attrs->clear)
  1793. hist_clear(test);
  1794. else
  1795. ret = -EEXIST;
  1796. goto out;
  1797. }
  1798. }
  1799. new:
  1800. if (hist_data->attrs->cont || hist_data->attrs->clear) {
  1801. ret = -ENOENT;
  1802. goto out;
  1803. }
  1804. if (hist_data->attrs->pause)
  1805. data->paused = true;
  1806. if (named_data) {
  1807. destroy_hist_data(data->private_data);
  1808. data->private_data = named_data->private_data;
  1809. set_named_trigger_data(data, named_data);
  1810. data->ops = &event_hist_trigger_named_ops;
  1811. }
  1812. if (data->ops->init) {
  1813. ret = data->ops->init(data->ops, data);
  1814. if (ret < 0)
  1815. goto out;
  1816. }
  1817. list_add_rcu(&data->list, &file->triggers);
  1818. ret++;
  1819. update_cond_flag(file);
  1820. if (hist_data->enable_timestamps)
  1821. tracing_set_time_stamp_abs(file->tr, true);
  1822. if (trace_event_trigger_enable_disable(file, 1) < 0) {
  1823. list_del_rcu(&data->list);
  1824. update_cond_flag(file);
  1825. ret--;
  1826. }
  1827. out:
  1828. return ret;
  1829. }
  1830. static void hist_unregister_trigger(char *glob, struct event_trigger_ops *ops,
  1831. struct event_trigger_data *data,
  1832. struct trace_event_file *file)
  1833. {
  1834. struct hist_trigger_data *hist_data = data->private_data;
  1835. struct event_trigger_data *test, *named_data = NULL;
  1836. bool unregistered = false;
  1837. if (hist_data->attrs->name)
  1838. named_data = find_named_trigger(hist_data->attrs->name);
  1839. list_for_each_entry_rcu(test, &file->triggers, list) {
  1840. if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
  1841. if (!hist_trigger_match(data, test, named_data, false))
  1842. continue;
  1843. unregistered = true;
  1844. list_del_rcu(&test->list);
  1845. trace_event_trigger_enable_disable(file, 0);
  1846. update_cond_flag(file);
  1847. break;
  1848. }
  1849. }
  1850. if (unregistered && test->ops->free)
  1851. test->ops->free(test->ops, test);
  1852. if (hist_data->enable_timestamps) {
  1853. if (!hist_data->remove || unregistered)
  1854. tracing_set_time_stamp_abs(file->tr, false);
  1855. }
  1856. }
  1857. static void hist_unreg_all(struct trace_event_file *file)
  1858. {
  1859. struct event_trigger_data *test, *n;
  1860. struct hist_trigger_data *hist_data;
  1861. list_for_each_entry_safe(test, n, &file->triggers, list) {
  1862. if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
  1863. hist_data = test->private_data;
  1864. list_del_rcu(&test->list);
  1865. trace_event_trigger_enable_disable(file, 0);
  1866. update_cond_flag(file);
  1867. if (hist_data->enable_timestamps)
  1868. tracing_set_time_stamp_abs(file->tr, false);
  1869. if (test->ops->free)
  1870. test->ops->free(test->ops, test);
  1871. }
  1872. }
  1873. }
  1874. static int event_hist_trigger_func(struct event_command *cmd_ops,
  1875. struct trace_event_file *file,
  1876. char *glob, char *cmd, char *param)
  1877. {
  1878. unsigned int hist_trigger_bits = TRACING_MAP_BITS_DEFAULT;
  1879. struct event_trigger_data *trigger_data;
  1880. struct hist_trigger_attrs *attrs;
  1881. struct event_trigger_ops *trigger_ops;
  1882. struct hist_trigger_data *hist_data;
  1883. bool remove = false;
  1884. char *trigger;
  1885. int ret = 0;
  1886. if (!param)
  1887. return -EINVAL;
  1888. if (glob[0] == '!')
  1889. remove = true;
  1890. /* separate the trigger from the filter (k:v [if filter]) */
  1891. trigger = strsep(&param, " \t");
  1892. if (!trigger)
  1893. return -EINVAL;
  1894. attrs = parse_hist_trigger_attrs(trigger);
  1895. if (IS_ERR(attrs))
  1896. return PTR_ERR(attrs);
  1897. if (attrs->map_bits)
  1898. hist_trigger_bits = attrs->map_bits;
  1899. hist_data = create_hist_data(hist_trigger_bits, attrs, file, remove);
  1900. if (IS_ERR(hist_data)) {
  1901. destroy_hist_trigger_attrs(attrs);
  1902. return PTR_ERR(hist_data);
  1903. }
  1904. trigger_ops = cmd_ops->get_trigger_ops(cmd, trigger);
  1905. ret = -ENOMEM;
  1906. trigger_data = kzalloc(sizeof(*trigger_data), GFP_KERNEL);
  1907. if (!trigger_data)
  1908. goto out_free;
  1909. trigger_data->count = -1;
  1910. trigger_data->ops = trigger_ops;
  1911. trigger_data->cmd_ops = cmd_ops;
  1912. INIT_LIST_HEAD(&trigger_data->list);
  1913. RCU_INIT_POINTER(trigger_data->filter, NULL);
  1914. trigger_data->private_data = hist_data;
  1915. /* if param is non-empty, it's supposed to be a filter */
  1916. if (param && cmd_ops->set_filter) {
  1917. ret = cmd_ops->set_filter(param, trigger_data, file);
  1918. if (ret < 0)
  1919. goto out_free;
  1920. }
  1921. if (remove) {
  1922. cmd_ops->unreg(glob+1, trigger_ops, trigger_data, file);
  1923. ret = 0;
  1924. goto out_free;
  1925. }
  1926. ret = cmd_ops->reg(glob, trigger_ops, trigger_data, file);
  1927. /*
  1928. * The above returns on success the # of triggers registered,
  1929. * but if it didn't register any it returns zero. Consider no
  1930. * triggers registered a failure too.
  1931. */
  1932. if (!ret) {
  1933. if (!(attrs->pause || attrs->cont || attrs->clear))
  1934. ret = -ENOENT;
  1935. goto out_free;
  1936. } else if (ret < 0)
  1937. goto out_free;
  1938. /* Just return zero, not the number of registered triggers */
  1939. ret = 0;
  1940. out:
  1941. return ret;
  1942. out_free:
  1943. if (cmd_ops->set_filter)
  1944. cmd_ops->set_filter(NULL, trigger_data, NULL);
  1945. kfree(trigger_data);
  1946. destroy_hist_data(hist_data);
  1947. goto out;
  1948. }
  1949. static struct event_command trigger_hist_cmd = {
  1950. .name = "hist",
  1951. .trigger_type = ETT_EVENT_HIST,
  1952. .flags = EVENT_CMD_FL_NEEDS_REC,
  1953. .func = event_hist_trigger_func,
  1954. .reg = hist_register_trigger,
  1955. .unreg = hist_unregister_trigger,
  1956. .unreg_all = hist_unreg_all,
  1957. .get_trigger_ops = event_hist_get_trigger_ops,
  1958. .set_filter = set_trigger_filter,
  1959. };
  1960. __init int register_trigger_hist_cmd(void)
  1961. {
  1962. int ret;
  1963. ret = register_event_command(&trigger_hist_cmd);
  1964. WARN_ON(ret < 0);
  1965. return ret;
  1966. }
  1967. static void
  1968. hist_enable_trigger(struct event_trigger_data *data, void *rec,
  1969. struct ring_buffer_event *event)
  1970. {
  1971. struct enable_trigger_data *enable_data = data->private_data;
  1972. struct event_trigger_data *test;
  1973. list_for_each_entry_rcu(test, &enable_data->file->triggers, list) {
  1974. if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
  1975. if (enable_data->enable)
  1976. test->paused = false;
  1977. else
  1978. test->paused = true;
  1979. }
  1980. }
  1981. }
  1982. static void
  1983. hist_enable_count_trigger(struct event_trigger_data *data, void *rec,
  1984. struct ring_buffer_event *event)
  1985. {
  1986. if (!data->count)
  1987. return;
  1988. if (data->count != -1)
  1989. (data->count)--;
  1990. hist_enable_trigger(data, rec, event);
  1991. }
  1992. static struct event_trigger_ops hist_enable_trigger_ops = {
  1993. .func = hist_enable_trigger,
  1994. .print = event_enable_trigger_print,
  1995. .init = event_trigger_init,
  1996. .free = event_enable_trigger_free,
  1997. };
  1998. static struct event_trigger_ops hist_enable_count_trigger_ops = {
  1999. .func = hist_enable_count_trigger,
  2000. .print = event_enable_trigger_print,
  2001. .init = event_trigger_init,
  2002. .free = event_enable_trigger_free,
  2003. };
  2004. static struct event_trigger_ops hist_disable_trigger_ops = {
  2005. .func = hist_enable_trigger,
  2006. .print = event_enable_trigger_print,
  2007. .init = event_trigger_init,
  2008. .free = event_enable_trigger_free,
  2009. };
  2010. static struct event_trigger_ops hist_disable_count_trigger_ops = {
  2011. .func = hist_enable_count_trigger,
  2012. .print = event_enable_trigger_print,
  2013. .init = event_trigger_init,
  2014. .free = event_enable_trigger_free,
  2015. };
  2016. static struct event_trigger_ops *
  2017. hist_enable_get_trigger_ops(char *cmd, char *param)
  2018. {
  2019. struct event_trigger_ops *ops;
  2020. bool enable;
  2021. enable = (strcmp(cmd, ENABLE_HIST_STR) == 0);
  2022. if (enable)
  2023. ops = param ? &hist_enable_count_trigger_ops :
  2024. &hist_enable_trigger_ops;
  2025. else
  2026. ops = param ? &hist_disable_count_trigger_ops :
  2027. &hist_disable_trigger_ops;
  2028. return ops;
  2029. }
  2030. static void hist_enable_unreg_all(struct trace_event_file *file)
  2031. {
  2032. struct event_trigger_data *test, *n;
  2033. list_for_each_entry_safe(test, n, &file->triggers, list) {
  2034. if (test->cmd_ops->trigger_type == ETT_HIST_ENABLE) {
  2035. list_del_rcu(&test->list);
  2036. update_cond_flag(file);
  2037. trace_event_trigger_enable_disable(file, 0);
  2038. if (test->ops->free)
  2039. test->ops->free(test->ops, test);
  2040. }
  2041. }
  2042. }
  2043. static struct event_command trigger_hist_enable_cmd = {
  2044. .name = ENABLE_HIST_STR,
  2045. .trigger_type = ETT_HIST_ENABLE,
  2046. .func = event_enable_trigger_func,
  2047. .reg = event_enable_register_trigger,
  2048. .unreg = event_enable_unregister_trigger,
  2049. .unreg_all = hist_enable_unreg_all,
  2050. .get_trigger_ops = hist_enable_get_trigger_ops,
  2051. .set_filter = set_trigger_filter,
  2052. };
  2053. static struct event_command trigger_hist_disable_cmd = {
  2054. .name = DISABLE_HIST_STR,
  2055. .trigger_type = ETT_HIST_ENABLE,
  2056. .func = event_enable_trigger_func,
  2057. .reg = event_enable_register_trigger,
  2058. .unreg = event_enable_unregister_trigger,
  2059. .unreg_all = hist_enable_unreg_all,
  2060. .get_trigger_ops = hist_enable_get_trigger_ops,
  2061. .set_filter = set_trigger_filter,
  2062. };
  2063. static __init void unregister_trigger_hist_enable_disable_cmds(void)
  2064. {
  2065. unregister_event_command(&trigger_hist_enable_cmd);
  2066. unregister_event_command(&trigger_hist_disable_cmd);
  2067. }
  2068. __init int register_trigger_hist_enable_disable_cmds(void)
  2069. {
  2070. int ret;
  2071. ret = register_event_command(&trigger_hist_enable_cmd);
  2072. if (WARN_ON(ret < 0))
  2073. return ret;
  2074. ret = register_event_command(&trigger_hist_disable_cmd);
  2075. if (WARN_ON(ret < 0))
  2076. unregister_trigger_hist_enable_disable_cmds();
  2077. return ret;
  2078. }