trace_events_trigger.c 40 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633
  1. /*
  2. * trace_events_trigger - trace event triggers
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License as published by
  6. * the Free Software Foundation; either version 2 of the License, or
  7. * (at your option) any later version.
  8. *
  9. * This program is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program; if not, write to the Free Software
  16. * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  17. *
  18. * Copyright (C) 2013 Tom Zanussi <tom.zanussi@linux.intel.com>
  19. */
  20. #include <linux/module.h>
  21. #include <linux/ctype.h>
  22. #include <linux/mutex.h>
  23. #include <linux/slab.h>
  24. #include <linux/rculist.h>
  25. #include "trace.h"
  26. static LIST_HEAD(trigger_commands);
  27. static DEFINE_MUTEX(trigger_cmd_mutex);
  28. void trigger_data_free(struct event_trigger_data *data)
  29. {
  30. if (data->cmd_ops->set_filter)
  31. data->cmd_ops->set_filter(NULL, data, NULL);
  32. synchronize_sched(); /* make sure current triggers exit before free */
  33. kfree(data);
  34. }
  35. /**
  36. * event_triggers_call - Call triggers associated with a trace event
  37. * @file: The trace_event_file associated with the event
  38. * @rec: The trace entry for the event, NULL for unconditional invocation
  39. *
  40. * For each trigger associated with an event, invoke the trigger
  41. * function registered with the associated trigger command. If rec is
  42. * non-NULL, it means that the trigger requires further processing and
  43. * shouldn't be unconditionally invoked. If rec is non-NULL and the
  44. * trigger has a filter associated with it, rec will checked against
  45. * the filter and if the record matches the trigger will be invoked.
  46. * If the trigger is a 'post_trigger', meaning it shouldn't be invoked
  47. * in any case until the current event is written, the trigger
  48. * function isn't invoked but the bit associated with the deferred
  49. * trigger is set in the return value.
  50. *
  51. * Returns an enum event_trigger_type value containing a set bit for
  52. * any trigger that should be deferred, ETT_NONE if nothing to defer.
  53. *
  54. * Called from tracepoint handlers (with rcu_read_lock_sched() held).
  55. *
  56. * Return: an enum event_trigger_type value containing a set bit for
  57. * any trigger that should be deferred, ETT_NONE if nothing to defer.
  58. */
  59. enum event_trigger_type
  60. event_triggers_call(struct trace_event_file *file, void *rec,
  61. struct ring_buffer_event *event)
  62. {
  63. struct event_trigger_data *data;
  64. enum event_trigger_type tt = ETT_NONE;
  65. struct event_filter *filter;
  66. if (list_empty(&file->triggers))
  67. return tt;
  68. list_for_each_entry_rcu(data, &file->triggers, list) {
  69. if (data->paused)
  70. continue;
  71. if (!rec) {
  72. data->ops->func(data, rec, event);
  73. continue;
  74. }
  75. filter = rcu_dereference_sched(data->filter);
  76. if (filter && !filter_match_preds(filter, rec))
  77. continue;
  78. if (event_command_post_trigger(data->cmd_ops)) {
  79. tt |= data->cmd_ops->trigger_type;
  80. continue;
  81. }
  82. data->ops->func(data, rec, event);
  83. }
  84. return tt;
  85. }
  86. EXPORT_SYMBOL_GPL(event_triggers_call);
  87. /**
  88. * event_triggers_post_call - Call 'post_triggers' for a trace event
  89. * @file: The trace_event_file associated with the event
  90. * @tt: enum event_trigger_type containing a set bit for each trigger to invoke
  91. * @rec: The trace entry for the event
  92. *
  93. * For each trigger associated with an event, invoke the trigger
  94. * function registered with the associated trigger command, if the
  95. * corresponding bit is set in the tt enum passed into this function.
  96. * See @event_triggers_call for details on how those bits are set.
  97. *
  98. * Called from tracepoint handlers (with rcu_read_lock_sched() held).
  99. */
  100. void
  101. event_triggers_post_call(struct trace_event_file *file,
  102. enum event_trigger_type tt,
  103. void *rec, struct ring_buffer_event *event)
  104. {
  105. struct event_trigger_data *data;
  106. list_for_each_entry_rcu(data, &file->triggers, list) {
  107. if (data->paused)
  108. continue;
  109. if (data->cmd_ops->trigger_type & tt)
  110. data->ops->func(data, rec, event);
  111. }
  112. }
  113. EXPORT_SYMBOL_GPL(event_triggers_post_call);
  114. #define SHOW_AVAILABLE_TRIGGERS (void *)(1UL)
  115. static void *trigger_next(struct seq_file *m, void *t, loff_t *pos)
  116. {
  117. struct trace_event_file *event_file = event_file_data(m->private);
  118. if (t == SHOW_AVAILABLE_TRIGGERS)
  119. return NULL;
  120. return seq_list_next(t, &event_file->triggers, pos);
  121. }
  122. static void *trigger_start(struct seq_file *m, loff_t *pos)
  123. {
  124. struct trace_event_file *event_file;
  125. /* ->stop() is called even if ->start() fails */
  126. mutex_lock(&event_mutex);
  127. event_file = event_file_data(m->private);
  128. if (unlikely(!event_file))
  129. return ERR_PTR(-ENODEV);
  130. if (list_empty(&event_file->triggers))
  131. return *pos == 0 ? SHOW_AVAILABLE_TRIGGERS : NULL;
  132. return seq_list_start(&event_file->triggers, *pos);
  133. }
  134. static void trigger_stop(struct seq_file *m, void *t)
  135. {
  136. mutex_unlock(&event_mutex);
  137. }
  138. static int trigger_show(struct seq_file *m, void *v)
  139. {
  140. struct event_trigger_data *data;
  141. struct event_command *p;
  142. if (v == SHOW_AVAILABLE_TRIGGERS) {
  143. seq_puts(m, "# Available triggers:\n");
  144. seq_putc(m, '#');
  145. mutex_lock(&trigger_cmd_mutex);
  146. list_for_each_entry_reverse(p, &trigger_commands, list)
  147. seq_printf(m, " %s", p->name);
  148. seq_putc(m, '\n');
  149. mutex_unlock(&trigger_cmd_mutex);
  150. return 0;
  151. }
  152. data = list_entry(v, struct event_trigger_data, list);
  153. data->ops->print(m, data->ops, data);
  154. return 0;
  155. }
  156. static const struct seq_operations event_triggers_seq_ops = {
  157. .start = trigger_start,
  158. .next = trigger_next,
  159. .stop = trigger_stop,
  160. .show = trigger_show,
  161. };
  162. static int event_trigger_regex_open(struct inode *inode, struct file *file)
  163. {
  164. int ret = 0;
  165. mutex_lock(&event_mutex);
  166. if (unlikely(!event_file_data(file))) {
  167. mutex_unlock(&event_mutex);
  168. return -ENODEV;
  169. }
  170. if ((file->f_mode & FMODE_WRITE) &&
  171. (file->f_flags & O_TRUNC)) {
  172. struct trace_event_file *event_file;
  173. struct event_command *p;
  174. event_file = event_file_data(file);
  175. list_for_each_entry(p, &trigger_commands, list) {
  176. if (p->unreg_all)
  177. p->unreg_all(event_file);
  178. }
  179. }
  180. if (file->f_mode & FMODE_READ) {
  181. ret = seq_open(file, &event_triggers_seq_ops);
  182. if (!ret) {
  183. struct seq_file *m = file->private_data;
  184. m->private = file;
  185. }
  186. }
  187. mutex_unlock(&event_mutex);
  188. return ret;
  189. }
  190. static int trigger_process_regex(struct trace_event_file *file, char *buff)
  191. {
  192. char *command, *next = buff;
  193. struct event_command *p;
  194. int ret = -EINVAL;
  195. command = strsep(&next, ": \t");
  196. command = (command[0] != '!') ? command : command + 1;
  197. mutex_lock(&trigger_cmd_mutex);
  198. list_for_each_entry(p, &trigger_commands, list) {
  199. if (strcmp(p->name, command) == 0) {
  200. ret = p->func(p, file, buff, command, next);
  201. goto out_unlock;
  202. }
  203. }
  204. out_unlock:
  205. mutex_unlock(&trigger_cmd_mutex);
  206. return ret;
  207. }
  208. static ssize_t event_trigger_regex_write(struct file *file,
  209. const char __user *ubuf,
  210. size_t cnt, loff_t *ppos)
  211. {
  212. struct trace_event_file *event_file;
  213. ssize_t ret;
  214. char *buf;
  215. if (!cnt)
  216. return 0;
  217. if (cnt >= PAGE_SIZE)
  218. return -EINVAL;
  219. buf = memdup_user_nul(ubuf, cnt);
  220. if (IS_ERR(buf))
  221. return PTR_ERR(buf);
  222. strim(buf);
  223. mutex_lock(&event_mutex);
  224. event_file = event_file_data(file);
  225. if (unlikely(!event_file)) {
  226. mutex_unlock(&event_mutex);
  227. kfree(buf);
  228. return -ENODEV;
  229. }
  230. ret = trigger_process_regex(event_file, buf);
  231. mutex_unlock(&event_mutex);
  232. kfree(buf);
  233. if (ret < 0)
  234. goto out;
  235. *ppos += cnt;
  236. ret = cnt;
  237. out:
  238. return ret;
  239. }
  240. static int event_trigger_regex_release(struct inode *inode, struct file *file)
  241. {
  242. mutex_lock(&event_mutex);
  243. if (file->f_mode & FMODE_READ)
  244. seq_release(inode, file);
  245. mutex_unlock(&event_mutex);
  246. return 0;
  247. }
  248. static ssize_t
  249. event_trigger_write(struct file *filp, const char __user *ubuf,
  250. size_t cnt, loff_t *ppos)
  251. {
  252. return event_trigger_regex_write(filp, ubuf, cnt, ppos);
  253. }
  254. static int
  255. event_trigger_open(struct inode *inode, struct file *filp)
  256. {
  257. return event_trigger_regex_open(inode, filp);
  258. }
  259. static int
  260. event_trigger_release(struct inode *inode, struct file *file)
  261. {
  262. return event_trigger_regex_release(inode, file);
  263. }
  264. const struct file_operations event_trigger_fops = {
  265. .open = event_trigger_open,
  266. .read = seq_read,
  267. .write = event_trigger_write,
  268. .llseek = tracing_lseek,
  269. .release = event_trigger_release,
  270. };
  271. /*
  272. * Currently we only register event commands from __init, so mark this
  273. * __init too.
  274. */
  275. __init int register_event_command(struct event_command *cmd)
  276. {
  277. struct event_command *p;
  278. int ret = 0;
  279. mutex_lock(&trigger_cmd_mutex);
  280. list_for_each_entry(p, &trigger_commands, list) {
  281. if (strcmp(cmd->name, p->name) == 0) {
  282. ret = -EBUSY;
  283. goto out_unlock;
  284. }
  285. }
  286. list_add(&cmd->list, &trigger_commands);
  287. out_unlock:
  288. mutex_unlock(&trigger_cmd_mutex);
  289. return ret;
  290. }
  291. /*
  292. * Currently we only unregister event commands from __init, so mark
  293. * this __init too.
  294. */
  295. __init int unregister_event_command(struct event_command *cmd)
  296. {
  297. struct event_command *p, *n;
  298. int ret = -ENODEV;
  299. mutex_lock(&trigger_cmd_mutex);
  300. list_for_each_entry_safe(p, n, &trigger_commands, list) {
  301. if (strcmp(cmd->name, p->name) == 0) {
  302. ret = 0;
  303. list_del_init(&p->list);
  304. goto out_unlock;
  305. }
  306. }
  307. out_unlock:
  308. mutex_unlock(&trigger_cmd_mutex);
  309. return ret;
  310. }
  311. /**
  312. * event_trigger_print - Generic event_trigger_ops @print implementation
  313. * @name: The name of the event trigger
  314. * @m: The seq_file being printed to
  315. * @data: Trigger-specific data
  316. * @filter_str: filter_str to print, if present
  317. *
  318. * Common implementation for event triggers to print themselves.
  319. *
  320. * Usually wrapped by a function that simply sets the @name of the
  321. * trigger command and then invokes this.
  322. *
  323. * Return: 0 on success, errno otherwise
  324. */
  325. static int
  326. event_trigger_print(const char *name, struct seq_file *m,
  327. void *data, char *filter_str)
  328. {
  329. long count = (long)data;
  330. seq_puts(m, name);
  331. if (count == -1)
  332. seq_puts(m, ":unlimited");
  333. else
  334. seq_printf(m, ":count=%ld", count);
  335. if (filter_str)
  336. seq_printf(m, " if %s\n", filter_str);
  337. else
  338. seq_putc(m, '\n');
  339. return 0;
  340. }
  341. /**
  342. * event_trigger_init - Generic event_trigger_ops @init implementation
  343. * @ops: The trigger ops associated with the trigger
  344. * @data: Trigger-specific data
  345. *
  346. * Common implementation of event trigger initialization.
  347. *
  348. * Usually used directly as the @init method in event trigger
  349. * implementations.
  350. *
  351. * Return: 0 on success, errno otherwise
  352. */
  353. int event_trigger_init(struct event_trigger_ops *ops,
  354. struct event_trigger_data *data)
  355. {
  356. data->ref++;
  357. return 0;
  358. }
  359. /**
  360. * event_trigger_free - Generic event_trigger_ops @free implementation
  361. * @ops: The trigger ops associated with the trigger
  362. * @data: Trigger-specific data
  363. *
  364. * Common implementation of event trigger de-initialization.
  365. *
  366. * Usually used directly as the @free method in event trigger
  367. * implementations.
  368. */
  369. static void
  370. event_trigger_free(struct event_trigger_ops *ops,
  371. struct event_trigger_data *data)
  372. {
  373. if (WARN_ON_ONCE(data->ref <= 0))
  374. return;
  375. data->ref--;
  376. if (!data->ref)
  377. trigger_data_free(data);
  378. }
  379. int trace_event_trigger_enable_disable(struct trace_event_file *file,
  380. int trigger_enable)
  381. {
  382. int ret = 0;
  383. if (trigger_enable) {
  384. if (atomic_inc_return(&file->tm_ref) > 1)
  385. return ret;
  386. set_bit(EVENT_FILE_FL_TRIGGER_MODE_BIT, &file->flags);
  387. ret = trace_event_enable_disable(file, 1, 1);
  388. } else {
  389. if (atomic_dec_return(&file->tm_ref) > 0)
  390. return ret;
  391. clear_bit(EVENT_FILE_FL_TRIGGER_MODE_BIT, &file->flags);
  392. ret = trace_event_enable_disable(file, 0, 1);
  393. }
  394. return ret;
  395. }
  396. /**
  397. * clear_event_triggers - Clear all triggers associated with a trace array
  398. * @tr: The trace array to clear
  399. *
  400. * For each trigger, the triggering event has its tm_ref decremented
  401. * via trace_event_trigger_enable_disable(), and any associated event
  402. * (in the case of enable/disable_event triggers) will have its sm_ref
  403. * decremented via free()->trace_event_enable_disable(). That
  404. * combination effectively reverses the soft-mode/trigger state added
  405. * by trigger registration.
  406. *
  407. * Must be called with event_mutex held.
  408. */
  409. void
  410. clear_event_triggers(struct trace_array *tr)
  411. {
  412. struct trace_event_file *file;
  413. list_for_each_entry(file, &tr->events, list) {
  414. struct event_trigger_data *data;
  415. list_for_each_entry_rcu(data, &file->triggers, list) {
  416. trace_event_trigger_enable_disable(file, 0);
  417. if (data->ops->free)
  418. data->ops->free(data->ops, data);
  419. }
  420. }
  421. }
  422. /**
  423. * update_cond_flag - Set or reset the TRIGGER_COND bit
  424. * @file: The trace_event_file associated with the event
  425. *
  426. * If an event has triggers and any of those triggers has a filter or
  427. * a post_trigger, trigger invocation needs to be deferred until after
  428. * the current event has logged its data, and the event should have
  429. * its TRIGGER_COND bit set, otherwise the TRIGGER_COND bit should be
  430. * cleared.
  431. */
  432. void update_cond_flag(struct trace_event_file *file)
  433. {
  434. struct event_trigger_data *data;
  435. bool set_cond = false;
  436. list_for_each_entry_rcu(data, &file->triggers, list) {
  437. if (data->filter || event_command_post_trigger(data->cmd_ops) ||
  438. event_command_needs_rec(data->cmd_ops)) {
  439. set_cond = true;
  440. break;
  441. }
  442. }
  443. if (set_cond)
  444. set_bit(EVENT_FILE_FL_TRIGGER_COND_BIT, &file->flags);
  445. else
  446. clear_bit(EVENT_FILE_FL_TRIGGER_COND_BIT, &file->flags);
  447. }
  448. /**
  449. * register_trigger - Generic event_command @reg implementation
  450. * @glob: The raw string used to register the trigger
  451. * @ops: The trigger ops associated with the trigger
  452. * @data: Trigger-specific data to associate with the trigger
  453. * @file: The trace_event_file associated with the event
  454. *
  455. * Common implementation for event trigger registration.
  456. *
  457. * Usually used directly as the @reg method in event command
  458. * implementations.
  459. *
  460. * Return: 0 on success, errno otherwise
  461. */
  462. static int register_trigger(char *glob, struct event_trigger_ops *ops,
  463. struct event_trigger_data *data,
  464. struct trace_event_file *file)
  465. {
  466. struct event_trigger_data *test;
  467. int ret = 0;
  468. list_for_each_entry_rcu(test, &file->triggers, list) {
  469. if (test->cmd_ops->trigger_type == data->cmd_ops->trigger_type) {
  470. ret = -EEXIST;
  471. goto out;
  472. }
  473. }
  474. if (data->ops->init) {
  475. ret = data->ops->init(data->ops, data);
  476. if (ret < 0)
  477. goto out;
  478. }
  479. list_add_rcu(&data->list, &file->triggers);
  480. ret++;
  481. update_cond_flag(file);
  482. if (trace_event_trigger_enable_disable(file, 1) < 0) {
  483. list_del_rcu(&data->list);
  484. update_cond_flag(file);
  485. ret--;
  486. }
  487. out:
  488. return ret;
  489. }
  490. /**
  491. * unregister_trigger - Generic event_command @unreg implementation
  492. * @glob: The raw string used to register the trigger
  493. * @ops: The trigger ops associated with the trigger
  494. * @test: Trigger-specific data used to find the trigger to remove
  495. * @file: The trace_event_file associated with the event
  496. *
  497. * Common implementation for event trigger unregistration.
  498. *
  499. * Usually used directly as the @unreg method in event command
  500. * implementations.
  501. */
  502. void unregister_trigger(char *glob, struct event_trigger_ops *ops,
  503. struct event_trigger_data *test,
  504. struct trace_event_file *file)
  505. {
  506. struct event_trigger_data *data;
  507. bool unregistered = false;
  508. list_for_each_entry_rcu(data, &file->triggers, list) {
  509. if (data->cmd_ops->trigger_type == test->cmd_ops->trigger_type) {
  510. unregistered = true;
  511. list_del_rcu(&data->list);
  512. trace_event_trigger_enable_disable(file, 0);
  513. update_cond_flag(file);
  514. break;
  515. }
  516. }
  517. if (unregistered && data->ops->free)
  518. data->ops->free(data->ops, data);
  519. }
  520. /**
  521. * event_trigger_callback - Generic event_command @func implementation
  522. * @cmd_ops: The command ops, used for trigger registration
  523. * @file: The trace_event_file associated with the event
  524. * @glob: The raw string used to register the trigger
  525. * @cmd: The cmd portion of the string used to register the trigger
  526. * @param: The params portion of the string used to register the trigger
  527. *
  528. * Common implementation for event command parsing and trigger
  529. * instantiation.
  530. *
  531. * Usually used directly as the @func method in event command
  532. * implementations.
  533. *
  534. * Return: 0 on success, errno otherwise
  535. */
  536. static int
  537. event_trigger_callback(struct event_command *cmd_ops,
  538. struct trace_event_file *file,
  539. char *glob, char *cmd, char *param)
  540. {
  541. struct event_trigger_data *trigger_data;
  542. struct event_trigger_ops *trigger_ops;
  543. char *trigger = NULL;
  544. char *number;
  545. int ret;
  546. /* separate the trigger from the filter (t:n [if filter]) */
  547. if (param && isdigit(param[0]))
  548. trigger = strsep(&param, " \t");
  549. trigger_ops = cmd_ops->get_trigger_ops(cmd, trigger);
  550. ret = -ENOMEM;
  551. trigger_data = kzalloc(sizeof(*trigger_data), GFP_KERNEL);
  552. if (!trigger_data)
  553. goto out;
  554. trigger_data->count = -1;
  555. trigger_data->ops = trigger_ops;
  556. trigger_data->cmd_ops = cmd_ops;
  557. INIT_LIST_HEAD(&trigger_data->list);
  558. INIT_LIST_HEAD(&trigger_data->named_list);
  559. if (glob[0] == '!') {
  560. cmd_ops->unreg(glob+1, trigger_ops, trigger_data, file);
  561. kfree(trigger_data);
  562. ret = 0;
  563. goto out;
  564. }
  565. if (trigger) {
  566. number = strsep(&trigger, ":");
  567. ret = -EINVAL;
  568. if (!strlen(number))
  569. goto out_free;
  570. /*
  571. * We use the callback data field (which is a pointer)
  572. * as our counter.
  573. */
  574. ret = kstrtoul(number, 0, &trigger_data->count);
  575. if (ret)
  576. goto out_free;
  577. }
  578. if (!param) /* if param is non-empty, it's supposed to be a filter */
  579. goto out_reg;
  580. if (!cmd_ops->set_filter)
  581. goto out_reg;
  582. ret = cmd_ops->set_filter(param, trigger_data, file);
  583. if (ret < 0)
  584. goto out_free;
  585. out_reg:
  586. ret = cmd_ops->reg(glob, trigger_ops, trigger_data, file);
  587. /*
  588. * The above returns on success the # of functions enabled,
  589. * but if it didn't find any functions it returns zero.
  590. * Consider no functions a failure too.
  591. */
  592. if (!ret) {
  593. ret = -ENOENT;
  594. goto out_free;
  595. } else if (ret < 0)
  596. goto out_free;
  597. ret = 0;
  598. out:
  599. return ret;
  600. out_free:
  601. if (cmd_ops->set_filter)
  602. cmd_ops->set_filter(NULL, trigger_data, NULL);
  603. kfree(trigger_data);
  604. goto out;
  605. }
  606. /**
  607. * set_trigger_filter - Generic event_command @set_filter implementation
  608. * @filter_str: The filter string for the trigger, NULL to remove filter
  609. * @trigger_data: Trigger-specific data
  610. * @file: The trace_event_file associated with the event
  611. *
  612. * Common implementation for event command filter parsing and filter
  613. * instantiation.
  614. *
  615. * Usually used directly as the @set_filter method in event command
  616. * implementations.
  617. *
  618. * Also used to remove a filter (if filter_str = NULL).
  619. *
  620. * Return: 0 on success, errno otherwise
  621. */
  622. int set_trigger_filter(char *filter_str,
  623. struct event_trigger_data *trigger_data,
  624. struct trace_event_file *file)
  625. {
  626. struct event_trigger_data *data = trigger_data;
  627. struct event_filter *filter = NULL, *tmp;
  628. int ret = -EINVAL;
  629. char *s;
  630. if (!filter_str) /* clear the current filter */
  631. goto assign;
  632. s = strsep(&filter_str, " \t");
  633. if (!strlen(s) || strcmp(s, "if") != 0)
  634. goto out;
  635. if (!filter_str)
  636. goto out;
  637. /* The filter is for the 'trigger' event, not the triggered event */
  638. ret = create_event_filter(file->event_call, filter_str, false, &filter);
  639. if (ret)
  640. goto out;
  641. assign:
  642. tmp = rcu_access_pointer(data->filter);
  643. rcu_assign_pointer(data->filter, filter);
  644. if (tmp) {
  645. /* Make sure the call is done with the filter */
  646. synchronize_sched();
  647. free_event_filter(tmp);
  648. }
  649. kfree(data->filter_str);
  650. data->filter_str = NULL;
  651. if (filter_str) {
  652. data->filter_str = kstrdup(filter_str, GFP_KERNEL);
  653. if (!data->filter_str) {
  654. free_event_filter(rcu_access_pointer(data->filter));
  655. data->filter = NULL;
  656. ret = -ENOMEM;
  657. }
  658. }
  659. out:
  660. return ret;
  661. }
  662. static LIST_HEAD(named_triggers);
  663. /**
  664. * find_named_trigger - Find the common named trigger associated with @name
  665. * @name: The name of the set of named triggers to find the common data for
  666. *
  667. * Named triggers are sets of triggers that share a common set of
  668. * trigger data. The first named trigger registered with a given name
  669. * owns the common trigger data that the others subsequently
  670. * registered with the same name will reference. This function
  671. * returns the common trigger data associated with that first
  672. * registered instance.
  673. *
  674. * Return: the common trigger data for the given named trigger on
  675. * success, NULL otherwise.
  676. */
  677. struct event_trigger_data *find_named_trigger(const char *name)
  678. {
  679. struct event_trigger_data *data;
  680. if (!name)
  681. return NULL;
  682. list_for_each_entry(data, &named_triggers, named_list) {
  683. if (data->named_data)
  684. continue;
  685. if (strcmp(data->name, name) == 0)
  686. return data;
  687. }
  688. return NULL;
  689. }
  690. /**
  691. * is_named_trigger - determine if a given trigger is a named trigger
  692. * @test: The trigger data to test
  693. *
  694. * Return: true if 'test' is a named trigger, false otherwise.
  695. */
  696. bool is_named_trigger(struct event_trigger_data *test)
  697. {
  698. struct event_trigger_data *data;
  699. list_for_each_entry(data, &named_triggers, named_list) {
  700. if (test == data)
  701. return true;
  702. }
  703. return false;
  704. }
  705. /**
  706. * save_named_trigger - save the trigger in the named trigger list
  707. * @name: The name of the named trigger set
  708. * @data: The trigger data to save
  709. *
  710. * Return: 0 if successful, negative error otherwise.
  711. */
  712. int save_named_trigger(const char *name, struct event_trigger_data *data)
  713. {
  714. data->name = kstrdup(name, GFP_KERNEL);
  715. if (!data->name)
  716. return -ENOMEM;
  717. list_add(&data->named_list, &named_triggers);
  718. return 0;
  719. }
  720. /**
  721. * del_named_trigger - delete a trigger from the named trigger list
  722. * @data: The trigger data to delete
  723. */
  724. void del_named_trigger(struct event_trigger_data *data)
  725. {
  726. kfree(data->name);
  727. data->name = NULL;
  728. list_del(&data->named_list);
  729. }
  730. static void __pause_named_trigger(struct event_trigger_data *data, bool pause)
  731. {
  732. struct event_trigger_data *test;
  733. list_for_each_entry(test, &named_triggers, named_list) {
  734. if (strcmp(test->name, data->name) == 0) {
  735. if (pause) {
  736. test->paused_tmp = test->paused;
  737. test->paused = true;
  738. } else {
  739. test->paused = test->paused_tmp;
  740. }
  741. }
  742. }
  743. }
  744. /**
  745. * pause_named_trigger - Pause all named triggers with the same name
  746. * @data: The trigger data of a named trigger to pause
  747. *
  748. * Pauses a named trigger along with all other triggers having the
  749. * same name. Because named triggers share a common set of data,
  750. * pausing only one is meaningless, so pausing one named trigger needs
  751. * to pause all triggers with the same name.
  752. */
  753. void pause_named_trigger(struct event_trigger_data *data)
  754. {
  755. __pause_named_trigger(data, true);
  756. }
  757. /**
  758. * unpause_named_trigger - Un-pause all named triggers with the same name
  759. * @data: The trigger data of a named trigger to unpause
  760. *
  761. * Un-pauses a named trigger along with all other triggers having the
  762. * same name. Because named triggers share a common set of data,
  763. * unpausing only one is meaningless, so unpausing one named trigger
  764. * needs to unpause all triggers with the same name.
  765. */
  766. void unpause_named_trigger(struct event_trigger_data *data)
  767. {
  768. __pause_named_trigger(data, false);
  769. }
  770. /**
  771. * set_named_trigger_data - Associate common named trigger data
  772. * @data: The trigger data of a named trigger to unpause
  773. *
  774. * Named triggers are sets of triggers that share a common set of
  775. * trigger data. The first named trigger registered with a given name
  776. * owns the common trigger data that the others subsequently
  777. * registered with the same name will reference. This function
  778. * associates the common trigger data from the first trigger with the
  779. * given trigger.
  780. */
  781. void set_named_trigger_data(struct event_trigger_data *data,
  782. struct event_trigger_data *named_data)
  783. {
  784. data->named_data = named_data;
  785. }
  786. struct event_trigger_data *
  787. get_named_trigger_data(struct event_trigger_data *data)
  788. {
  789. return data->named_data;
  790. }
  791. static void
  792. traceon_trigger(struct event_trigger_data *data, void *rec,
  793. struct ring_buffer_event *event)
  794. {
  795. if (tracing_is_on())
  796. return;
  797. tracing_on();
  798. }
  799. static void
  800. traceon_count_trigger(struct event_trigger_data *data, void *rec,
  801. struct ring_buffer_event *event)
  802. {
  803. if (tracing_is_on())
  804. return;
  805. if (!data->count)
  806. return;
  807. if (data->count != -1)
  808. (data->count)--;
  809. tracing_on();
  810. }
  811. static void
  812. traceoff_trigger(struct event_trigger_data *data, void *rec,
  813. struct ring_buffer_event *event)
  814. {
  815. if (!tracing_is_on())
  816. return;
  817. tracing_off();
  818. }
  819. static void
  820. traceoff_count_trigger(struct event_trigger_data *data, void *rec,
  821. struct ring_buffer_event *event)
  822. {
  823. if (!tracing_is_on())
  824. return;
  825. if (!data->count)
  826. return;
  827. if (data->count != -1)
  828. (data->count)--;
  829. tracing_off();
  830. }
  831. static int
  832. traceon_trigger_print(struct seq_file *m, struct event_trigger_ops *ops,
  833. struct event_trigger_data *data)
  834. {
  835. return event_trigger_print("traceon", m, (void *)data->count,
  836. data->filter_str);
  837. }
  838. static int
  839. traceoff_trigger_print(struct seq_file *m, struct event_trigger_ops *ops,
  840. struct event_trigger_data *data)
  841. {
  842. return event_trigger_print("traceoff", m, (void *)data->count,
  843. data->filter_str);
  844. }
  845. static struct event_trigger_ops traceon_trigger_ops = {
  846. .func = traceon_trigger,
  847. .print = traceon_trigger_print,
  848. .init = event_trigger_init,
  849. .free = event_trigger_free,
  850. };
  851. static struct event_trigger_ops traceon_count_trigger_ops = {
  852. .func = traceon_count_trigger,
  853. .print = traceon_trigger_print,
  854. .init = event_trigger_init,
  855. .free = event_trigger_free,
  856. };
  857. static struct event_trigger_ops traceoff_trigger_ops = {
  858. .func = traceoff_trigger,
  859. .print = traceoff_trigger_print,
  860. .init = event_trigger_init,
  861. .free = event_trigger_free,
  862. };
  863. static struct event_trigger_ops traceoff_count_trigger_ops = {
  864. .func = traceoff_count_trigger,
  865. .print = traceoff_trigger_print,
  866. .init = event_trigger_init,
  867. .free = event_trigger_free,
  868. };
  869. static struct event_trigger_ops *
  870. onoff_get_trigger_ops(char *cmd, char *param)
  871. {
  872. struct event_trigger_ops *ops;
  873. /* we register both traceon and traceoff to this callback */
  874. if (strcmp(cmd, "traceon") == 0)
  875. ops = param ? &traceon_count_trigger_ops :
  876. &traceon_trigger_ops;
  877. else
  878. ops = param ? &traceoff_count_trigger_ops :
  879. &traceoff_trigger_ops;
  880. return ops;
  881. }
  882. static struct event_command trigger_traceon_cmd = {
  883. .name = "traceon",
  884. .trigger_type = ETT_TRACE_ONOFF,
  885. .func = event_trigger_callback,
  886. .reg = register_trigger,
  887. .unreg = unregister_trigger,
  888. .get_trigger_ops = onoff_get_trigger_ops,
  889. .set_filter = set_trigger_filter,
  890. };
  891. static struct event_command trigger_traceoff_cmd = {
  892. .name = "traceoff",
  893. .trigger_type = ETT_TRACE_ONOFF,
  894. .flags = EVENT_CMD_FL_POST_TRIGGER,
  895. .func = event_trigger_callback,
  896. .reg = register_trigger,
  897. .unreg = unregister_trigger,
  898. .get_trigger_ops = onoff_get_trigger_ops,
  899. .set_filter = set_trigger_filter,
  900. };
  901. #ifdef CONFIG_TRACER_SNAPSHOT
  902. static void
  903. snapshot_trigger(struct event_trigger_data *data, void *rec,
  904. struct ring_buffer_event *event)
  905. {
  906. tracing_snapshot();
  907. }
  908. static void
  909. snapshot_count_trigger(struct event_trigger_data *data, void *rec,
  910. struct ring_buffer_event *event)
  911. {
  912. if (!data->count)
  913. return;
  914. if (data->count != -1)
  915. (data->count)--;
  916. snapshot_trigger(data, rec, event);
  917. }
  918. static int
  919. register_snapshot_trigger(char *glob, struct event_trigger_ops *ops,
  920. struct event_trigger_data *data,
  921. struct trace_event_file *file)
  922. {
  923. int ret = register_trigger(glob, ops, data, file);
  924. if (ret > 0 && tracing_alloc_snapshot() != 0) {
  925. unregister_trigger(glob, ops, data, file);
  926. ret = 0;
  927. }
  928. return ret;
  929. }
  930. static int
  931. snapshot_trigger_print(struct seq_file *m, struct event_trigger_ops *ops,
  932. struct event_trigger_data *data)
  933. {
  934. return event_trigger_print("snapshot", m, (void *)data->count,
  935. data->filter_str);
  936. }
  937. static struct event_trigger_ops snapshot_trigger_ops = {
  938. .func = snapshot_trigger,
  939. .print = snapshot_trigger_print,
  940. .init = event_trigger_init,
  941. .free = event_trigger_free,
  942. };
  943. static struct event_trigger_ops snapshot_count_trigger_ops = {
  944. .func = snapshot_count_trigger,
  945. .print = snapshot_trigger_print,
  946. .init = event_trigger_init,
  947. .free = event_trigger_free,
  948. };
  949. static struct event_trigger_ops *
  950. snapshot_get_trigger_ops(char *cmd, char *param)
  951. {
  952. return param ? &snapshot_count_trigger_ops : &snapshot_trigger_ops;
  953. }
  954. static struct event_command trigger_snapshot_cmd = {
  955. .name = "snapshot",
  956. .trigger_type = ETT_SNAPSHOT,
  957. .func = event_trigger_callback,
  958. .reg = register_snapshot_trigger,
  959. .unreg = unregister_trigger,
  960. .get_trigger_ops = snapshot_get_trigger_ops,
  961. .set_filter = set_trigger_filter,
  962. };
  963. static __init int register_trigger_snapshot_cmd(void)
  964. {
  965. int ret;
  966. ret = register_event_command(&trigger_snapshot_cmd);
  967. WARN_ON(ret < 0);
  968. return ret;
  969. }
  970. #else
  971. static __init int register_trigger_snapshot_cmd(void) { return 0; }
  972. #endif /* CONFIG_TRACER_SNAPSHOT */
  973. #ifdef CONFIG_STACKTRACE
  974. #ifdef CONFIG_UNWINDER_ORC
  975. /* Skip 2:
  976. * event_triggers_post_call()
  977. * trace_event_raw_event_xxx()
  978. */
  979. # define STACK_SKIP 2
  980. #else
  981. /*
  982. * Skip 4:
  983. * stacktrace_trigger()
  984. * event_triggers_post_call()
  985. * trace_event_buffer_commit()
  986. * trace_event_raw_event_xxx()
  987. */
  988. #define STACK_SKIP 4
  989. #endif
  990. static void
  991. stacktrace_trigger(struct event_trigger_data *data, void *rec,
  992. struct ring_buffer_event *event)
  993. {
  994. trace_dump_stack(STACK_SKIP);
  995. }
  996. static void
  997. stacktrace_count_trigger(struct event_trigger_data *data, void *rec,
  998. struct ring_buffer_event *event)
  999. {
  1000. if (!data->count)
  1001. return;
  1002. if (data->count != -1)
  1003. (data->count)--;
  1004. stacktrace_trigger(data, rec, event);
  1005. }
  1006. static int
  1007. stacktrace_trigger_print(struct seq_file *m, struct event_trigger_ops *ops,
  1008. struct event_trigger_data *data)
  1009. {
  1010. return event_trigger_print("stacktrace", m, (void *)data->count,
  1011. data->filter_str);
  1012. }
  1013. static struct event_trigger_ops stacktrace_trigger_ops = {
  1014. .func = stacktrace_trigger,
  1015. .print = stacktrace_trigger_print,
  1016. .init = event_trigger_init,
  1017. .free = event_trigger_free,
  1018. };
  1019. static struct event_trigger_ops stacktrace_count_trigger_ops = {
  1020. .func = stacktrace_count_trigger,
  1021. .print = stacktrace_trigger_print,
  1022. .init = event_trigger_init,
  1023. .free = event_trigger_free,
  1024. };
  1025. static struct event_trigger_ops *
  1026. stacktrace_get_trigger_ops(char *cmd, char *param)
  1027. {
  1028. return param ? &stacktrace_count_trigger_ops : &stacktrace_trigger_ops;
  1029. }
  1030. static struct event_command trigger_stacktrace_cmd = {
  1031. .name = "stacktrace",
  1032. .trigger_type = ETT_STACKTRACE,
  1033. .flags = EVENT_CMD_FL_POST_TRIGGER,
  1034. .func = event_trigger_callback,
  1035. .reg = register_trigger,
  1036. .unreg = unregister_trigger,
  1037. .get_trigger_ops = stacktrace_get_trigger_ops,
  1038. .set_filter = set_trigger_filter,
  1039. };
  1040. static __init int register_trigger_stacktrace_cmd(void)
  1041. {
  1042. int ret;
  1043. ret = register_event_command(&trigger_stacktrace_cmd);
  1044. WARN_ON(ret < 0);
  1045. return ret;
  1046. }
  1047. #else
  1048. static __init int register_trigger_stacktrace_cmd(void) { return 0; }
  1049. #endif /* CONFIG_STACKTRACE */
  1050. static __init void unregister_trigger_traceon_traceoff_cmds(void)
  1051. {
  1052. unregister_event_command(&trigger_traceon_cmd);
  1053. unregister_event_command(&trigger_traceoff_cmd);
  1054. }
  1055. static void
  1056. event_enable_trigger(struct event_trigger_data *data, void *rec,
  1057. struct ring_buffer_event *event)
  1058. {
  1059. struct enable_trigger_data *enable_data = data->private_data;
  1060. if (enable_data->enable)
  1061. clear_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &enable_data->file->flags);
  1062. else
  1063. set_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &enable_data->file->flags);
  1064. }
  1065. static void
  1066. event_enable_count_trigger(struct event_trigger_data *data, void *rec,
  1067. struct ring_buffer_event *event)
  1068. {
  1069. struct enable_trigger_data *enable_data = data->private_data;
  1070. if (!data->count)
  1071. return;
  1072. /* Skip if the event is in a state we want to switch to */
  1073. if (enable_data->enable == !(enable_data->file->flags & EVENT_FILE_FL_SOFT_DISABLED))
  1074. return;
  1075. if (data->count != -1)
  1076. (data->count)--;
  1077. event_enable_trigger(data, rec, event);
  1078. }
  1079. int event_enable_trigger_print(struct seq_file *m,
  1080. struct event_trigger_ops *ops,
  1081. struct event_trigger_data *data)
  1082. {
  1083. struct enable_trigger_data *enable_data = data->private_data;
  1084. seq_printf(m, "%s:%s:%s",
  1085. enable_data->hist ?
  1086. (enable_data->enable ? ENABLE_HIST_STR : DISABLE_HIST_STR) :
  1087. (enable_data->enable ? ENABLE_EVENT_STR : DISABLE_EVENT_STR),
  1088. enable_data->file->event_call->class->system,
  1089. trace_event_name(enable_data->file->event_call));
  1090. if (data->count == -1)
  1091. seq_puts(m, ":unlimited");
  1092. else
  1093. seq_printf(m, ":count=%ld", data->count);
  1094. if (data->filter_str)
  1095. seq_printf(m, " if %s\n", data->filter_str);
  1096. else
  1097. seq_putc(m, '\n');
  1098. return 0;
  1099. }
  1100. void event_enable_trigger_free(struct event_trigger_ops *ops,
  1101. struct event_trigger_data *data)
  1102. {
  1103. struct enable_trigger_data *enable_data = data->private_data;
  1104. if (WARN_ON_ONCE(data->ref <= 0))
  1105. return;
  1106. data->ref--;
  1107. if (!data->ref) {
  1108. /* Remove the SOFT_MODE flag */
  1109. trace_event_enable_disable(enable_data->file, 0, 1);
  1110. module_put(enable_data->file->event_call->mod);
  1111. trigger_data_free(data);
  1112. kfree(enable_data);
  1113. }
  1114. }
  1115. static struct event_trigger_ops event_enable_trigger_ops = {
  1116. .func = event_enable_trigger,
  1117. .print = event_enable_trigger_print,
  1118. .init = event_trigger_init,
  1119. .free = event_enable_trigger_free,
  1120. };
  1121. static struct event_trigger_ops event_enable_count_trigger_ops = {
  1122. .func = event_enable_count_trigger,
  1123. .print = event_enable_trigger_print,
  1124. .init = event_trigger_init,
  1125. .free = event_enable_trigger_free,
  1126. };
  1127. static struct event_trigger_ops event_disable_trigger_ops = {
  1128. .func = event_enable_trigger,
  1129. .print = event_enable_trigger_print,
  1130. .init = event_trigger_init,
  1131. .free = event_enable_trigger_free,
  1132. };
  1133. static struct event_trigger_ops event_disable_count_trigger_ops = {
  1134. .func = event_enable_count_trigger,
  1135. .print = event_enable_trigger_print,
  1136. .init = event_trigger_init,
  1137. .free = event_enable_trigger_free,
  1138. };
  1139. int event_enable_trigger_func(struct event_command *cmd_ops,
  1140. struct trace_event_file *file,
  1141. char *glob, char *cmd, char *param)
  1142. {
  1143. struct trace_event_file *event_enable_file;
  1144. struct enable_trigger_data *enable_data;
  1145. struct event_trigger_data *trigger_data;
  1146. struct event_trigger_ops *trigger_ops;
  1147. struct trace_array *tr = file->tr;
  1148. const char *system;
  1149. const char *event;
  1150. bool hist = false;
  1151. char *trigger;
  1152. char *number;
  1153. bool enable;
  1154. int ret;
  1155. if (!param)
  1156. return -EINVAL;
  1157. /* separate the trigger from the filter (s:e:n [if filter]) */
  1158. trigger = strsep(&param, " \t");
  1159. if (!trigger)
  1160. return -EINVAL;
  1161. system = strsep(&trigger, ":");
  1162. if (!trigger)
  1163. return -EINVAL;
  1164. event = strsep(&trigger, ":");
  1165. ret = -EINVAL;
  1166. event_enable_file = find_event_file(tr, system, event);
  1167. if (!event_enable_file)
  1168. goto out;
  1169. #ifdef CONFIG_HIST_TRIGGERS
  1170. hist = ((strcmp(cmd, ENABLE_HIST_STR) == 0) ||
  1171. (strcmp(cmd, DISABLE_HIST_STR) == 0));
  1172. enable = ((strcmp(cmd, ENABLE_EVENT_STR) == 0) ||
  1173. (strcmp(cmd, ENABLE_HIST_STR) == 0));
  1174. #else
  1175. enable = strcmp(cmd, ENABLE_EVENT_STR) == 0;
  1176. #endif
  1177. trigger_ops = cmd_ops->get_trigger_ops(cmd, trigger);
  1178. ret = -ENOMEM;
  1179. trigger_data = kzalloc(sizeof(*trigger_data), GFP_KERNEL);
  1180. if (!trigger_data)
  1181. goto out;
  1182. enable_data = kzalloc(sizeof(*enable_data), GFP_KERNEL);
  1183. if (!enable_data) {
  1184. kfree(trigger_data);
  1185. goto out;
  1186. }
  1187. trigger_data->count = -1;
  1188. trigger_data->ops = trigger_ops;
  1189. trigger_data->cmd_ops = cmd_ops;
  1190. INIT_LIST_HEAD(&trigger_data->list);
  1191. RCU_INIT_POINTER(trigger_data->filter, NULL);
  1192. enable_data->hist = hist;
  1193. enable_data->enable = enable;
  1194. enable_data->file = event_enable_file;
  1195. trigger_data->private_data = enable_data;
  1196. if (glob[0] == '!') {
  1197. cmd_ops->unreg(glob+1, trigger_ops, trigger_data, file);
  1198. kfree(trigger_data);
  1199. kfree(enable_data);
  1200. ret = 0;
  1201. goto out;
  1202. }
  1203. if (trigger) {
  1204. number = strsep(&trigger, ":");
  1205. ret = -EINVAL;
  1206. if (!strlen(number))
  1207. goto out_free;
  1208. /*
  1209. * We use the callback data field (which is a pointer)
  1210. * as our counter.
  1211. */
  1212. ret = kstrtoul(number, 0, &trigger_data->count);
  1213. if (ret)
  1214. goto out_free;
  1215. }
  1216. if (!param) /* if param is non-empty, it's supposed to be a filter */
  1217. goto out_reg;
  1218. if (!cmd_ops->set_filter)
  1219. goto out_reg;
  1220. ret = cmd_ops->set_filter(param, trigger_data, file);
  1221. if (ret < 0)
  1222. goto out_free;
  1223. out_reg:
  1224. /* Don't let event modules unload while probe registered */
  1225. ret = try_module_get(event_enable_file->event_call->mod);
  1226. if (!ret) {
  1227. ret = -EBUSY;
  1228. goto out_free;
  1229. }
  1230. ret = trace_event_enable_disable(event_enable_file, 1, 1);
  1231. if (ret < 0)
  1232. goto out_put;
  1233. ret = cmd_ops->reg(glob, trigger_ops, trigger_data, file);
  1234. /*
  1235. * The above returns on success the # of functions enabled,
  1236. * but if it didn't find any functions it returns zero.
  1237. * Consider no functions a failure too.
  1238. */
  1239. if (!ret) {
  1240. ret = -ENOENT;
  1241. goto out_disable;
  1242. } else if (ret < 0)
  1243. goto out_disable;
  1244. /* Just return zero, not the number of enabled functions */
  1245. ret = 0;
  1246. out:
  1247. return ret;
  1248. out_disable:
  1249. trace_event_enable_disable(event_enable_file, 0, 1);
  1250. out_put:
  1251. module_put(event_enable_file->event_call->mod);
  1252. out_free:
  1253. if (cmd_ops->set_filter)
  1254. cmd_ops->set_filter(NULL, trigger_data, NULL);
  1255. kfree(trigger_data);
  1256. kfree(enable_data);
  1257. goto out;
  1258. }
  1259. int event_enable_register_trigger(char *glob,
  1260. struct event_trigger_ops *ops,
  1261. struct event_trigger_data *data,
  1262. struct trace_event_file *file)
  1263. {
  1264. struct enable_trigger_data *enable_data = data->private_data;
  1265. struct enable_trigger_data *test_enable_data;
  1266. struct event_trigger_data *test;
  1267. int ret = 0;
  1268. list_for_each_entry_rcu(test, &file->triggers, list) {
  1269. test_enable_data = test->private_data;
  1270. if (test_enable_data &&
  1271. (test->cmd_ops->trigger_type ==
  1272. data->cmd_ops->trigger_type) &&
  1273. (test_enable_data->file == enable_data->file)) {
  1274. ret = -EEXIST;
  1275. goto out;
  1276. }
  1277. }
  1278. if (data->ops->init) {
  1279. ret = data->ops->init(data->ops, data);
  1280. if (ret < 0)
  1281. goto out;
  1282. }
  1283. list_add_rcu(&data->list, &file->triggers);
  1284. ret++;
  1285. update_cond_flag(file);
  1286. if (trace_event_trigger_enable_disable(file, 1) < 0) {
  1287. list_del_rcu(&data->list);
  1288. update_cond_flag(file);
  1289. ret--;
  1290. }
  1291. out:
  1292. return ret;
  1293. }
  1294. void event_enable_unregister_trigger(char *glob,
  1295. struct event_trigger_ops *ops,
  1296. struct event_trigger_data *test,
  1297. struct trace_event_file *file)
  1298. {
  1299. struct enable_trigger_data *test_enable_data = test->private_data;
  1300. struct enable_trigger_data *enable_data;
  1301. struct event_trigger_data *data;
  1302. bool unregistered = false;
  1303. list_for_each_entry_rcu(data, &file->triggers, list) {
  1304. enable_data = data->private_data;
  1305. if (enable_data &&
  1306. (data->cmd_ops->trigger_type ==
  1307. test->cmd_ops->trigger_type) &&
  1308. (enable_data->file == test_enable_data->file)) {
  1309. unregistered = true;
  1310. list_del_rcu(&data->list);
  1311. trace_event_trigger_enable_disable(file, 0);
  1312. update_cond_flag(file);
  1313. break;
  1314. }
  1315. }
  1316. if (unregistered && data->ops->free)
  1317. data->ops->free(data->ops, data);
  1318. }
  1319. static struct event_trigger_ops *
  1320. event_enable_get_trigger_ops(char *cmd, char *param)
  1321. {
  1322. struct event_trigger_ops *ops;
  1323. bool enable;
  1324. #ifdef CONFIG_HIST_TRIGGERS
  1325. enable = ((strcmp(cmd, ENABLE_EVENT_STR) == 0) ||
  1326. (strcmp(cmd, ENABLE_HIST_STR) == 0));
  1327. #else
  1328. enable = strcmp(cmd, ENABLE_EVENT_STR) == 0;
  1329. #endif
  1330. if (enable)
  1331. ops = param ? &event_enable_count_trigger_ops :
  1332. &event_enable_trigger_ops;
  1333. else
  1334. ops = param ? &event_disable_count_trigger_ops :
  1335. &event_disable_trigger_ops;
  1336. return ops;
  1337. }
  1338. static struct event_command trigger_enable_cmd = {
  1339. .name = ENABLE_EVENT_STR,
  1340. .trigger_type = ETT_EVENT_ENABLE,
  1341. .func = event_enable_trigger_func,
  1342. .reg = event_enable_register_trigger,
  1343. .unreg = event_enable_unregister_trigger,
  1344. .get_trigger_ops = event_enable_get_trigger_ops,
  1345. .set_filter = set_trigger_filter,
  1346. };
  1347. static struct event_command trigger_disable_cmd = {
  1348. .name = DISABLE_EVENT_STR,
  1349. .trigger_type = ETT_EVENT_ENABLE,
  1350. .func = event_enable_trigger_func,
  1351. .reg = event_enable_register_trigger,
  1352. .unreg = event_enable_unregister_trigger,
  1353. .get_trigger_ops = event_enable_get_trigger_ops,
  1354. .set_filter = set_trigger_filter,
  1355. };
  1356. static __init void unregister_trigger_enable_disable_cmds(void)
  1357. {
  1358. unregister_event_command(&trigger_enable_cmd);
  1359. unregister_event_command(&trigger_disable_cmd);
  1360. }
  1361. static __init int register_trigger_enable_disable_cmds(void)
  1362. {
  1363. int ret;
  1364. ret = register_event_command(&trigger_enable_cmd);
  1365. if (WARN_ON(ret < 0))
  1366. return ret;
  1367. ret = register_event_command(&trigger_disable_cmd);
  1368. if (WARN_ON(ret < 0))
  1369. unregister_trigger_enable_disable_cmds();
  1370. return ret;
  1371. }
  1372. static __init int register_trigger_traceon_traceoff_cmds(void)
  1373. {
  1374. int ret;
  1375. ret = register_event_command(&trigger_traceon_cmd);
  1376. if (WARN_ON(ret < 0))
  1377. return ret;
  1378. ret = register_event_command(&trigger_traceoff_cmd);
  1379. if (WARN_ON(ret < 0))
  1380. unregister_trigger_traceon_traceoff_cmds();
  1381. return ret;
  1382. }
  1383. __init int register_trigger_cmds(void)
  1384. {
  1385. register_trigger_traceon_traceoff_cmds();
  1386. register_trigger_snapshot_cmd();
  1387. register_trigger_stacktrace_cmd();
  1388. register_trigger_enable_disable_cmds();
  1389. register_trigger_hist_enable_disable_cmds();
  1390. register_trigger_hist_cmd();
  1391. return 0;
  1392. }