trace_events_trigger.c 34 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411
  1. /*
  2. * trace_events_trigger - trace event triggers
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License as published by
  6. * the Free Software Foundation; either version 2 of the License, or
  7. * (at your option) any later version.
  8. *
  9. * This program is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program; if not, write to the Free Software
  16. * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  17. *
  18. * Copyright (C) 2013 Tom Zanussi <tom.zanussi@linux.intel.com>
  19. */
  20. #include <linux/module.h>
  21. #include <linux/ctype.h>
  22. #include <linux/mutex.h>
  23. #include <linux/slab.h>
  24. #include "trace.h"
  25. static LIST_HEAD(trigger_commands);
  26. static DEFINE_MUTEX(trigger_cmd_mutex);
  27. static void
  28. trigger_data_free(struct event_trigger_data *data)
  29. {
  30. if (data->cmd_ops->set_filter)
  31. data->cmd_ops->set_filter(NULL, data, NULL);
  32. synchronize_sched(); /* make sure current triggers exit before free */
  33. kfree(data);
  34. }
  35. /**
  36. * event_triggers_call - Call triggers associated with a trace event
  37. * @file: The ftrace_event_file associated with the event
  38. * @rec: The trace entry for the event, NULL for unconditional invocation
  39. *
  40. * For each trigger associated with an event, invoke the trigger
  41. * function registered with the associated trigger command. If rec is
  42. * non-NULL, it means that the trigger requires further processing and
  43. * shouldn't be unconditionally invoked. If rec is non-NULL and the
  44. * trigger has a filter associated with it, rec will checked against
  45. * the filter and if the record matches the trigger will be invoked.
  46. * If the trigger is a 'post_trigger', meaning it shouldn't be invoked
  47. * in any case until the current event is written, the trigger
  48. * function isn't invoked but the bit associated with the deferred
  49. * trigger is set in the return value.
  50. *
  51. * Returns an enum event_trigger_type value containing a set bit for
  52. * any trigger that should be deferred, ETT_NONE if nothing to defer.
  53. *
  54. * Called from tracepoint handlers (with rcu_read_lock_sched() held).
  55. *
  56. * Return: an enum event_trigger_type value containing a set bit for
  57. * any trigger that should be deferred, ETT_NONE if nothing to defer.
  58. */
  59. enum event_trigger_type
  60. event_triggers_call(struct ftrace_event_file *file, void *rec)
  61. {
  62. struct event_trigger_data *data;
  63. enum event_trigger_type tt = ETT_NONE;
  64. struct event_filter *filter;
  65. if (list_empty(&file->triggers))
  66. return tt;
  67. list_for_each_entry_rcu(data, &file->triggers, list) {
  68. if (!rec) {
  69. data->ops->func(data);
  70. continue;
  71. }
  72. filter = rcu_dereference(data->filter);
  73. if (filter && !filter_match_preds(filter, rec))
  74. continue;
  75. if (data->cmd_ops->post_trigger) {
  76. tt |= data->cmd_ops->trigger_type;
  77. continue;
  78. }
  79. data->ops->func(data);
  80. }
  81. return tt;
  82. }
  83. EXPORT_SYMBOL_GPL(event_triggers_call);
  84. /**
  85. * event_triggers_post_call - Call 'post_triggers' for a trace event
  86. * @file: The ftrace_event_file associated with the event
  87. * @tt: enum event_trigger_type containing a set bit for each trigger to invoke
  88. *
  89. * For each trigger associated with an event, invoke the trigger
  90. * function registered with the associated trigger command, if the
  91. * corresponding bit is set in the tt enum passed into this function.
  92. * See @event_triggers_call for details on how those bits are set.
  93. *
  94. * Called from tracepoint handlers (with rcu_read_lock_sched() held).
  95. */
  96. void
  97. event_triggers_post_call(struct ftrace_event_file *file,
  98. enum event_trigger_type tt)
  99. {
  100. struct event_trigger_data *data;
  101. list_for_each_entry_rcu(data, &file->triggers, list) {
  102. if (data->cmd_ops->trigger_type & tt)
  103. data->ops->func(data);
  104. }
  105. }
  106. EXPORT_SYMBOL_GPL(event_triggers_post_call);
  107. static void *trigger_next(struct seq_file *m, void *t, loff_t *pos)
  108. {
  109. struct ftrace_event_file *event_file = event_file_data(m->private);
  110. return seq_list_next(t, &event_file->triggers, pos);
  111. }
  112. static void *trigger_start(struct seq_file *m, loff_t *pos)
  113. {
  114. struct ftrace_event_file *event_file;
  115. /* ->stop() is called even if ->start() fails */
  116. mutex_lock(&event_mutex);
  117. event_file = event_file_data(m->private);
  118. if (unlikely(!event_file))
  119. return ERR_PTR(-ENODEV);
  120. return seq_list_start(&event_file->triggers, *pos);
  121. }
  122. static void trigger_stop(struct seq_file *m, void *t)
  123. {
  124. mutex_unlock(&event_mutex);
  125. }
  126. static int trigger_show(struct seq_file *m, void *v)
  127. {
  128. struct event_trigger_data *data;
  129. data = list_entry(v, struct event_trigger_data, list);
  130. data->ops->print(m, data->ops, data);
  131. return 0;
  132. }
  133. static const struct seq_operations event_triggers_seq_ops = {
  134. .start = trigger_start,
  135. .next = trigger_next,
  136. .stop = trigger_stop,
  137. .show = trigger_show,
  138. };
  139. static int event_trigger_regex_open(struct inode *inode, struct file *file)
  140. {
  141. int ret = 0;
  142. mutex_lock(&event_mutex);
  143. if (unlikely(!event_file_data(file))) {
  144. mutex_unlock(&event_mutex);
  145. return -ENODEV;
  146. }
  147. if (file->f_mode & FMODE_READ) {
  148. ret = seq_open(file, &event_triggers_seq_ops);
  149. if (!ret) {
  150. struct seq_file *m = file->private_data;
  151. m->private = file;
  152. }
  153. }
  154. mutex_unlock(&event_mutex);
  155. return ret;
  156. }
  157. static int trigger_process_regex(struct ftrace_event_file *file, char *buff)
  158. {
  159. char *command, *next = buff;
  160. struct event_command *p;
  161. int ret = -EINVAL;
  162. command = strsep(&next, ": \t");
  163. command = (command[0] != '!') ? command : command + 1;
  164. mutex_lock(&trigger_cmd_mutex);
  165. list_for_each_entry(p, &trigger_commands, list) {
  166. if (strcmp(p->name, command) == 0) {
  167. ret = p->func(p, file, buff, command, next);
  168. goto out_unlock;
  169. }
  170. }
  171. out_unlock:
  172. mutex_unlock(&trigger_cmd_mutex);
  173. return ret;
  174. }
  175. static ssize_t event_trigger_regex_write(struct file *file,
  176. const char __user *ubuf,
  177. size_t cnt, loff_t *ppos)
  178. {
  179. struct ftrace_event_file *event_file;
  180. ssize_t ret;
  181. char *buf;
  182. if (!cnt)
  183. return 0;
  184. if (cnt >= PAGE_SIZE)
  185. return -EINVAL;
  186. buf = (char *)__get_free_page(GFP_TEMPORARY);
  187. if (!buf)
  188. return -ENOMEM;
  189. if (copy_from_user(buf, ubuf, cnt)) {
  190. free_page((unsigned long)buf);
  191. return -EFAULT;
  192. }
  193. buf[cnt] = '\0';
  194. strim(buf);
  195. mutex_lock(&event_mutex);
  196. event_file = event_file_data(file);
  197. if (unlikely(!event_file)) {
  198. mutex_unlock(&event_mutex);
  199. free_page((unsigned long)buf);
  200. return -ENODEV;
  201. }
  202. ret = trigger_process_regex(event_file, buf);
  203. mutex_unlock(&event_mutex);
  204. free_page((unsigned long)buf);
  205. if (ret < 0)
  206. goto out;
  207. *ppos += cnt;
  208. ret = cnt;
  209. out:
  210. return ret;
  211. }
  212. static int event_trigger_regex_release(struct inode *inode, struct file *file)
  213. {
  214. mutex_lock(&event_mutex);
  215. if (file->f_mode & FMODE_READ)
  216. seq_release(inode, file);
  217. mutex_unlock(&event_mutex);
  218. return 0;
  219. }
  220. static ssize_t
  221. event_trigger_write(struct file *filp, const char __user *ubuf,
  222. size_t cnt, loff_t *ppos)
  223. {
  224. return event_trigger_regex_write(filp, ubuf, cnt, ppos);
  225. }
  226. static int
  227. event_trigger_open(struct inode *inode, struct file *filp)
  228. {
  229. return event_trigger_regex_open(inode, filp);
  230. }
  231. static int
  232. event_trigger_release(struct inode *inode, struct file *file)
  233. {
  234. return event_trigger_regex_release(inode, file);
  235. }
  236. const struct file_operations event_trigger_fops = {
  237. .open = event_trigger_open,
  238. .read = seq_read,
  239. .write = event_trigger_write,
  240. .llseek = tracing_lseek,
  241. .release = event_trigger_release,
  242. };
  243. /*
  244. * Currently we only register event commands from __init, so mark this
  245. * __init too.
  246. */
  247. static __init int register_event_command(struct event_command *cmd)
  248. {
  249. struct event_command *p;
  250. int ret = 0;
  251. mutex_lock(&trigger_cmd_mutex);
  252. list_for_each_entry(p, &trigger_commands, list) {
  253. if (strcmp(cmd->name, p->name) == 0) {
  254. ret = -EBUSY;
  255. goto out_unlock;
  256. }
  257. }
  258. list_add(&cmd->list, &trigger_commands);
  259. out_unlock:
  260. mutex_unlock(&trigger_cmd_mutex);
  261. return ret;
  262. }
  263. /*
  264. * Currently we only unregister event commands from __init, so mark
  265. * this __init too.
  266. */
  267. static __init int unregister_event_command(struct event_command *cmd)
  268. {
  269. struct event_command *p, *n;
  270. int ret = -ENODEV;
  271. mutex_lock(&trigger_cmd_mutex);
  272. list_for_each_entry_safe(p, n, &trigger_commands, list) {
  273. if (strcmp(cmd->name, p->name) == 0) {
  274. ret = 0;
  275. list_del_init(&p->list);
  276. goto out_unlock;
  277. }
  278. }
  279. out_unlock:
  280. mutex_unlock(&trigger_cmd_mutex);
  281. return ret;
  282. }
  283. /**
  284. * event_trigger_print - Generic event_trigger_ops @print implementation
  285. * @name: The name of the event trigger
  286. * @m: The seq_file being printed to
  287. * @data: Trigger-specific data
  288. * @filter_str: filter_str to print, if present
  289. *
  290. * Common implementation for event triggers to print themselves.
  291. *
  292. * Usually wrapped by a function that simply sets the @name of the
  293. * trigger command and then invokes this.
  294. *
  295. * Return: 0 on success, errno otherwise
  296. */
  297. static int
  298. event_trigger_print(const char *name, struct seq_file *m,
  299. void *data, char *filter_str)
  300. {
  301. long count = (long)data;
  302. seq_printf(m, "%s", name);
  303. if (count == -1)
  304. seq_puts(m, ":unlimited");
  305. else
  306. seq_printf(m, ":count=%ld", count);
  307. if (filter_str)
  308. seq_printf(m, " if %s\n", filter_str);
  309. else
  310. seq_puts(m, "\n");
  311. return 0;
  312. }
  313. /**
  314. * event_trigger_init - Generic event_trigger_ops @init implementation
  315. * @ops: The trigger ops associated with the trigger
  316. * @data: Trigger-specific data
  317. *
  318. * Common implementation of event trigger initialization.
  319. *
  320. * Usually used directly as the @init method in event trigger
  321. * implementations.
  322. *
  323. * Return: 0 on success, errno otherwise
  324. */
  325. static int
  326. event_trigger_init(struct event_trigger_ops *ops,
  327. struct event_trigger_data *data)
  328. {
  329. data->ref++;
  330. return 0;
  331. }
  332. /**
  333. * event_trigger_free - Generic event_trigger_ops @free implementation
  334. * @ops: The trigger ops associated with the trigger
  335. * @data: Trigger-specific data
  336. *
  337. * Common implementation of event trigger de-initialization.
  338. *
  339. * Usually used directly as the @free method in event trigger
  340. * implementations.
  341. */
  342. static void
  343. event_trigger_free(struct event_trigger_ops *ops,
  344. struct event_trigger_data *data)
  345. {
  346. if (WARN_ON_ONCE(data->ref <= 0))
  347. return;
  348. data->ref--;
  349. if (!data->ref)
  350. trigger_data_free(data);
  351. }
  352. static int trace_event_trigger_enable_disable(struct ftrace_event_file *file,
  353. int trigger_enable)
  354. {
  355. int ret = 0;
  356. if (trigger_enable) {
  357. if (atomic_inc_return(&file->tm_ref) > 1)
  358. return ret;
  359. set_bit(FTRACE_EVENT_FL_TRIGGER_MODE_BIT, &file->flags);
  360. ret = trace_event_enable_disable(file, 1, 1);
  361. } else {
  362. if (atomic_dec_return(&file->tm_ref) > 0)
  363. return ret;
  364. clear_bit(FTRACE_EVENT_FL_TRIGGER_MODE_BIT, &file->flags);
  365. ret = trace_event_enable_disable(file, 0, 1);
  366. }
  367. return ret;
  368. }
  369. /**
  370. * clear_event_triggers - Clear all triggers associated with a trace array
  371. * @tr: The trace array to clear
  372. *
  373. * For each trigger, the triggering event has its tm_ref decremented
  374. * via trace_event_trigger_enable_disable(), and any associated event
  375. * (in the case of enable/disable_event triggers) will have its sm_ref
  376. * decremented via free()->trace_event_enable_disable(). That
  377. * combination effectively reverses the soft-mode/trigger state added
  378. * by trigger registration.
  379. *
  380. * Must be called with event_mutex held.
  381. */
  382. void
  383. clear_event_triggers(struct trace_array *tr)
  384. {
  385. struct ftrace_event_file *file;
  386. list_for_each_entry(file, &tr->events, list) {
  387. struct event_trigger_data *data;
  388. list_for_each_entry_rcu(data, &file->triggers, list) {
  389. trace_event_trigger_enable_disable(file, 0);
  390. if (data->ops->free)
  391. data->ops->free(data->ops, data);
  392. }
  393. }
  394. }
  395. /**
  396. * update_cond_flag - Set or reset the TRIGGER_COND bit
  397. * @file: The ftrace_event_file associated with the event
  398. *
  399. * If an event has triggers and any of those triggers has a filter or
  400. * a post_trigger, trigger invocation needs to be deferred until after
  401. * the current event has logged its data, and the event should have
  402. * its TRIGGER_COND bit set, otherwise the TRIGGER_COND bit should be
  403. * cleared.
  404. */
  405. static void update_cond_flag(struct ftrace_event_file *file)
  406. {
  407. struct event_trigger_data *data;
  408. bool set_cond = false;
  409. list_for_each_entry_rcu(data, &file->triggers, list) {
  410. if (data->filter || data->cmd_ops->post_trigger) {
  411. set_cond = true;
  412. break;
  413. }
  414. }
  415. if (set_cond)
  416. set_bit(FTRACE_EVENT_FL_TRIGGER_COND_BIT, &file->flags);
  417. else
  418. clear_bit(FTRACE_EVENT_FL_TRIGGER_COND_BIT, &file->flags);
  419. }
  420. /**
  421. * register_trigger - Generic event_command @reg implementation
  422. * @glob: The raw string used to register the trigger
  423. * @ops: The trigger ops associated with the trigger
  424. * @data: Trigger-specific data to associate with the trigger
  425. * @file: The ftrace_event_file associated with the event
  426. *
  427. * Common implementation for event trigger registration.
  428. *
  429. * Usually used directly as the @reg method in event command
  430. * implementations.
  431. *
  432. * Return: 0 on success, errno otherwise
  433. */
  434. static int register_trigger(char *glob, struct event_trigger_ops *ops,
  435. struct event_trigger_data *data,
  436. struct ftrace_event_file *file)
  437. {
  438. struct event_trigger_data *test;
  439. int ret = 0;
  440. list_for_each_entry_rcu(test, &file->triggers, list) {
  441. if (test->cmd_ops->trigger_type == data->cmd_ops->trigger_type) {
  442. ret = -EEXIST;
  443. goto out;
  444. }
  445. }
  446. if (data->ops->init) {
  447. ret = data->ops->init(data->ops, data);
  448. if (ret < 0)
  449. goto out;
  450. }
  451. list_add_rcu(&data->list, &file->triggers);
  452. ret++;
  453. if (trace_event_trigger_enable_disable(file, 1) < 0) {
  454. list_del_rcu(&data->list);
  455. ret--;
  456. }
  457. update_cond_flag(file);
  458. out:
  459. return ret;
  460. }
  461. /**
  462. * unregister_trigger - Generic event_command @unreg implementation
  463. * @glob: The raw string used to register the trigger
  464. * @ops: The trigger ops associated with the trigger
  465. * @test: Trigger-specific data used to find the trigger to remove
  466. * @file: The ftrace_event_file associated with the event
  467. *
  468. * Common implementation for event trigger unregistration.
  469. *
  470. * Usually used directly as the @unreg method in event command
  471. * implementations.
  472. */
  473. static void unregister_trigger(char *glob, struct event_trigger_ops *ops,
  474. struct event_trigger_data *test,
  475. struct ftrace_event_file *file)
  476. {
  477. struct event_trigger_data *data;
  478. bool unregistered = false;
  479. list_for_each_entry_rcu(data, &file->triggers, list) {
  480. if (data->cmd_ops->trigger_type == test->cmd_ops->trigger_type) {
  481. unregistered = true;
  482. list_del_rcu(&data->list);
  483. update_cond_flag(file);
  484. trace_event_trigger_enable_disable(file, 0);
  485. break;
  486. }
  487. }
  488. if (unregistered && data->ops->free)
  489. data->ops->free(data->ops, data);
  490. }
  491. /**
  492. * event_trigger_callback - Generic event_command @func implementation
  493. * @cmd_ops: The command ops, used for trigger registration
  494. * @file: The ftrace_event_file associated with the event
  495. * @glob: The raw string used to register the trigger
  496. * @cmd: The cmd portion of the string used to register the trigger
  497. * @param: The params portion of the string used to register the trigger
  498. *
  499. * Common implementation for event command parsing and trigger
  500. * instantiation.
  501. *
  502. * Usually used directly as the @func method in event command
  503. * implementations.
  504. *
  505. * Return: 0 on success, errno otherwise
  506. */
  507. static int
  508. event_trigger_callback(struct event_command *cmd_ops,
  509. struct ftrace_event_file *file,
  510. char *glob, char *cmd, char *param)
  511. {
  512. struct event_trigger_data *trigger_data;
  513. struct event_trigger_ops *trigger_ops;
  514. char *trigger = NULL;
  515. char *number;
  516. int ret;
  517. /* separate the trigger from the filter (t:n [if filter]) */
  518. if (param && isdigit(param[0]))
  519. trigger = strsep(&param, " \t");
  520. trigger_ops = cmd_ops->get_trigger_ops(cmd, trigger);
  521. ret = -ENOMEM;
  522. trigger_data = kzalloc(sizeof(*trigger_data), GFP_KERNEL);
  523. if (!trigger_data)
  524. goto out;
  525. trigger_data->count = -1;
  526. trigger_data->ops = trigger_ops;
  527. trigger_data->cmd_ops = cmd_ops;
  528. INIT_LIST_HEAD(&trigger_data->list);
  529. if (glob[0] == '!') {
  530. cmd_ops->unreg(glob+1, trigger_ops, trigger_data, file);
  531. kfree(trigger_data);
  532. ret = 0;
  533. goto out;
  534. }
  535. if (trigger) {
  536. number = strsep(&trigger, ":");
  537. ret = -EINVAL;
  538. if (!strlen(number))
  539. goto out_free;
  540. /*
  541. * We use the callback data field (which is a pointer)
  542. * as our counter.
  543. */
  544. ret = kstrtoul(number, 0, &trigger_data->count);
  545. if (ret)
  546. goto out_free;
  547. }
  548. if (!param) /* if param is non-empty, it's supposed to be a filter */
  549. goto out_reg;
  550. if (!cmd_ops->set_filter)
  551. goto out_reg;
  552. ret = cmd_ops->set_filter(param, trigger_data, file);
  553. if (ret < 0)
  554. goto out_free;
  555. out_reg:
  556. ret = cmd_ops->reg(glob, trigger_ops, trigger_data, file);
  557. /*
  558. * The above returns on success the # of functions enabled,
  559. * but if it didn't find any functions it returns zero.
  560. * Consider no functions a failure too.
  561. */
  562. if (!ret) {
  563. ret = -ENOENT;
  564. goto out_free;
  565. } else if (ret < 0)
  566. goto out_free;
  567. ret = 0;
  568. out:
  569. return ret;
  570. out_free:
  571. if (cmd_ops->set_filter)
  572. cmd_ops->set_filter(NULL, trigger_data, NULL);
  573. kfree(trigger_data);
  574. goto out;
  575. }
  576. /**
  577. * set_trigger_filter - Generic event_command @set_filter implementation
  578. * @filter_str: The filter string for the trigger, NULL to remove filter
  579. * @trigger_data: Trigger-specific data
  580. * @file: The ftrace_event_file associated with the event
  581. *
  582. * Common implementation for event command filter parsing and filter
  583. * instantiation.
  584. *
  585. * Usually used directly as the @set_filter method in event command
  586. * implementations.
  587. *
  588. * Also used to remove a filter (if filter_str = NULL).
  589. *
  590. * Return: 0 on success, errno otherwise
  591. */
  592. static int set_trigger_filter(char *filter_str,
  593. struct event_trigger_data *trigger_data,
  594. struct ftrace_event_file *file)
  595. {
  596. struct event_trigger_data *data = trigger_data;
  597. struct event_filter *filter = NULL, *tmp;
  598. int ret = -EINVAL;
  599. char *s;
  600. if (!filter_str) /* clear the current filter */
  601. goto assign;
  602. s = strsep(&filter_str, " \t");
  603. if (!strlen(s) || strcmp(s, "if") != 0)
  604. goto out;
  605. if (!filter_str)
  606. goto out;
  607. /* The filter is for the 'trigger' event, not the triggered event */
  608. ret = create_event_filter(file->event_call, filter_str, false, &filter);
  609. if (ret)
  610. goto out;
  611. assign:
  612. tmp = rcu_access_pointer(data->filter);
  613. rcu_assign_pointer(data->filter, filter);
  614. if (tmp) {
  615. /* Make sure the call is done with the filter */
  616. synchronize_sched();
  617. free_event_filter(tmp);
  618. }
  619. kfree(data->filter_str);
  620. data->filter_str = NULL;
  621. if (filter_str) {
  622. data->filter_str = kstrdup(filter_str, GFP_KERNEL);
  623. if (!data->filter_str) {
  624. free_event_filter(rcu_access_pointer(data->filter));
  625. data->filter = NULL;
  626. ret = -ENOMEM;
  627. }
  628. }
  629. out:
  630. return ret;
  631. }
  632. static void
  633. traceon_trigger(struct event_trigger_data *data)
  634. {
  635. if (tracing_is_on())
  636. return;
  637. tracing_on();
  638. }
  639. static void
  640. traceon_count_trigger(struct event_trigger_data *data)
  641. {
  642. if (!data->count)
  643. return;
  644. if (data->count != -1)
  645. (data->count)--;
  646. traceon_trigger(data);
  647. }
  648. static void
  649. traceoff_trigger(struct event_trigger_data *data)
  650. {
  651. if (!tracing_is_on())
  652. return;
  653. tracing_off();
  654. }
  655. static void
  656. traceoff_count_trigger(struct event_trigger_data *data)
  657. {
  658. if (!data->count)
  659. return;
  660. if (data->count != -1)
  661. (data->count)--;
  662. traceoff_trigger(data);
  663. }
  664. static int
  665. traceon_trigger_print(struct seq_file *m, struct event_trigger_ops *ops,
  666. struct event_trigger_data *data)
  667. {
  668. return event_trigger_print("traceon", m, (void *)data->count,
  669. data->filter_str);
  670. }
  671. static int
  672. traceoff_trigger_print(struct seq_file *m, struct event_trigger_ops *ops,
  673. struct event_trigger_data *data)
  674. {
  675. return event_trigger_print("traceoff", m, (void *)data->count,
  676. data->filter_str);
  677. }
  678. static struct event_trigger_ops traceon_trigger_ops = {
  679. .func = traceon_trigger,
  680. .print = traceon_trigger_print,
  681. .init = event_trigger_init,
  682. .free = event_trigger_free,
  683. };
  684. static struct event_trigger_ops traceon_count_trigger_ops = {
  685. .func = traceon_count_trigger,
  686. .print = traceon_trigger_print,
  687. .init = event_trigger_init,
  688. .free = event_trigger_free,
  689. };
  690. static struct event_trigger_ops traceoff_trigger_ops = {
  691. .func = traceoff_trigger,
  692. .print = traceoff_trigger_print,
  693. .init = event_trigger_init,
  694. .free = event_trigger_free,
  695. };
  696. static struct event_trigger_ops traceoff_count_trigger_ops = {
  697. .func = traceoff_count_trigger,
  698. .print = traceoff_trigger_print,
  699. .init = event_trigger_init,
  700. .free = event_trigger_free,
  701. };
  702. static struct event_trigger_ops *
  703. onoff_get_trigger_ops(char *cmd, char *param)
  704. {
  705. struct event_trigger_ops *ops;
  706. /* we register both traceon and traceoff to this callback */
  707. if (strcmp(cmd, "traceon") == 0)
  708. ops = param ? &traceon_count_trigger_ops :
  709. &traceon_trigger_ops;
  710. else
  711. ops = param ? &traceoff_count_trigger_ops :
  712. &traceoff_trigger_ops;
  713. return ops;
  714. }
  715. static struct event_command trigger_traceon_cmd = {
  716. .name = "traceon",
  717. .trigger_type = ETT_TRACE_ONOFF,
  718. .func = event_trigger_callback,
  719. .reg = register_trigger,
  720. .unreg = unregister_trigger,
  721. .get_trigger_ops = onoff_get_trigger_ops,
  722. .set_filter = set_trigger_filter,
  723. };
  724. static struct event_command trigger_traceoff_cmd = {
  725. .name = "traceoff",
  726. .trigger_type = ETT_TRACE_ONOFF,
  727. .func = event_trigger_callback,
  728. .reg = register_trigger,
  729. .unreg = unregister_trigger,
  730. .get_trigger_ops = onoff_get_trigger_ops,
  731. .set_filter = set_trigger_filter,
  732. };
  733. #ifdef CONFIG_TRACER_SNAPSHOT
  734. static void
  735. snapshot_trigger(struct event_trigger_data *data)
  736. {
  737. tracing_snapshot();
  738. }
  739. static void
  740. snapshot_count_trigger(struct event_trigger_data *data)
  741. {
  742. if (!data->count)
  743. return;
  744. if (data->count != -1)
  745. (data->count)--;
  746. snapshot_trigger(data);
  747. }
  748. static int
  749. register_snapshot_trigger(char *glob, struct event_trigger_ops *ops,
  750. struct event_trigger_data *data,
  751. struct ftrace_event_file *file)
  752. {
  753. int ret = register_trigger(glob, ops, data, file);
  754. if (ret > 0 && tracing_alloc_snapshot() != 0) {
  755. unregister_trigger(glob, ops, data, file);
  756. ret = 0;
  757. }
  758. return ret;
  759. }
  760. static int
  761. snapshot_trigger_print(struct seq_file *m, struct event_trigger_ops *ops,
  762. struct event_trigger_data *data)
  763. {
  764. return event_trigger_print("snapshot", m, (void *)data->count,
  765. data->filter_str);
  766. }
  767. static struct event_trigger_ops snapshot_trigger_ops = {
  768. .func = snapshot_trigger,
  769. .print = snapshot_trigger_print,
  770. .init = event_trigger_init,
  771. .free = event_trigger_free,
  772. };
  773. static struct event_trigger_ops snapshot_count_trigger_ops = {
  774. .func = snapshot_count_trigger,
  775. .print = snapshot_trigger_print,
  776. .init = event_trigger_init,
  777. .free = event_trigger_free,
  778. };
  779. static struct event_trigger_ops *
  780. snapshot_get_trigger_ops(char *cmd, char *param)
  781. {
  782. return param ? &snapshot_count_trigger_ops : &snapshot_trigger_ops;
  783. }
  784. static struct event_command trigger_snapshot_cmd = {
  785. .name = "snapshot",
  786. .trigger_type = ETT_SNAPSHOT,
  787. .func = event_trigger_callback,
  788. .reg = register_snapshot_trigger,
  789. .unreg = unregister_trigger,
  790. .get_trigger_ops = snapshot_get_trigger_ops,
  791. .set_filter = set_trigger_filter,
  792. };
  793. static __init int register_trigger_snapshot_cmd(void)
  794. {
  795. int ret;
  796. ret = register_event_command(&trigger_snapshot_cmd);
  797. WARN_ON(ret < 0);
  798. return ret;
  799. }
  800. #else
  801. static __init int register_trigger_snapshot_cmd(void) { return 0; }
  802. #endif /* CONFIG_TRACER_SNAPSHOT */
  803. #ifdef CONFIG_STACKTRACE
  804. /*
  805. * Skip 3:
  806. * stacktrace_trigger()
  807. * event_triggers_post_call()
  808. * ftrace_raw_event_xxx()
  809. */
  810. #define STACK_SKIP 3
  811. static void
  812. stacktrace_trigger(struct event_trigger_data *data)
  813. {
  814. trace_dump_stack(STACK_SKIP);
  815. }
  816. static void
  817. stacktrace_count_trigger(struct event_trigger_data *data)
  818. {
  819. if (!data->count)
  820. return;
  821. if (data->count != -1)
  822. (data->count)--;
  823. stacktrace_trigger(data);
  824. }
  825. static int
  826. stacktrace_trigger_print(struct seq_file *m, struct event_trigger_ops *ops,
  827. struct event_trigger_data *data)
  828. {
  829. return event_trigger_print("stacktrace", m, (void *)data->count,
  830. data->filter_str);
  831. }
  832. static struct event_trigger_ops stacktrace_trigger_ops = {
  833. .func = stacktrace_trigger,
  834. .print = stacktrace_trigger_print,
  835. .init = event_trigger_init,
  836. .free = event_trigger_free,
  837. };
  838. static struct event_trigger_ops stacktrace_count_trigger_ops = {
  839. .func = stacktrace_count_trigger,
  840. .print = stacktrace_trigger_print,
  841. .init = event_trigger_init,
  842. .free = event_trigger_free,
  843. };
  844. static struct event_trigger_ops *
  845. stacktrace_get_trigger_ops(char *cmd, char *param)
  846. {
  847. return param ? &stacktrace_count_trigger_ops : &stacktrace_trigger_ops;
  848. }
  849. static struct event_command trigger_stacktrace_cmd = {
  850. .name = "stacktrace",
  851. .trigger_type = ETT_STACKTRACE,
  852. .post_trigger = true,
  853. .func = event_trigger_callback,
  854. .reg = register_trigger,
  855. .unreg = unregister_trigger,
  856. .get_trigger_ops = stacktrace_get_trigger_ops,
  857. .set_filter = set_trigger_filter,
  858. };
  859. static __init int register_trigger_stacktrace_cmd(void)
  860. {
  861. int ret;
  862. ret = register_event_command(&trigger_stacktrace_cmd);
  863. WARN_ON(ret < 0);
  864. return ret;
  865. }
  866. #else
  867. static __init int register_trigger_stacktrace_cmd(void) { return 0; }
  868. #endif /* CONFIG_STACKTRACE */
  869. static __init void unregister_trigger_traceon_traceoff_cmds(void)
  870. {
  871. unregister_event_command(&trigger_traceon_cmd);
  872. unregister_event_command(&trigger_traceoff_cmd);
  873. }
  874. /* Avoid typos */
  875. #define ENABLE_EVENT_STR "enable_event"
  876. #define DISABLE_EVENT_STR "disable_event"
  877. struct enable_trigger_data {
  878. struct ftrace_event_file *file;
  879. bool enable;
  880. };
  881. static void
  882. event_enable_trigger(struct event_trigger_data *data)
  883. {
  884. struct enable_trigger_data *enable_data = data->private_data;
  885. if (enable_data->enable)
  886. clear_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &enable_data->file->flags);
  887. else
  888. set_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &enable_data->file->flags);
  889. }
  890. static void
  891. event_enable_count_trigger(struct event_trigger_data *data)
  892. {
  893. struct enable_trigger_data *enable_data = data->private_data;
  894. if (!data->count)
  895. return;
  896. /* Skip if the event is in a state we want to switch to */
  897. if (enable_data->enable == !(enable_data->file->flags & FTRACE_EVENT_FL_SOFT_DISABLED))
  898. return;
  899. if (data->count != -1)
  900. (data->count)--;
  901. event_enable_trigger(data);
  902. }
  903. static int
  904. event_enable_trigger_print(struct seq_file *m, struct event_trigger_ops *ops,
  905. struct event_trigger_data *data)
  906. {
  907. struct enable_trigger_data *enable_data = data->private_data;
  908. seq_printf(m, "%s:%s:%s",
  909. enable_data->enable ? ENABLE_EVENT_STR : DISABLE_EVENT_STR,
  910. enable_data->file->event_call->class->system,
  911. enable_data->file->event_call->name);
  912. if (data->count == -1)
  913. seq_puts(m, ":unlimited");
  914. else
  915. seq_printf(m, ":count=%ld", data->count);
  916. if (data->filter_str)
  917. seq_printf(m, " if %s\n", data->filter_str);
  918. else
  919. seq_puts(m, "\n");
  920. return 0;
  921. }
  922. static void
  923. event_enable_trigger_free(struct event_trigger_ops *ops,
  924. struct event_trigger_data *data)
  925. {
  926. struct enable_trigger_data *enable_data = data->private_data;
  927. if (WARN_ON_ONCE(data->ref <= 0))
  928. return;
  929. data->ref--;
  930. if (!data->ref) {
  931. /* Remove the SOFT_MODE flag */
  932. trace_event_enable_disable(enable_data->file, 0, 1);
  933. module_put(enable_data->file->event_call->mod);
  934. trigger_data_free(data);
  935. kfree(enable_data);
  936. }
  937. }
  938. static struct event_trigger_ops event_enable_trigger_ops = {
  939. .func = event_enable_trigger,
  940. .print = event_enable_trigger_print,
  941. .init = event_trigger_init,
  942. .free = event_enable_trigger_free,
  943. };
  944. static struct event_trigger_ops event_enable_count_trigger_ops = {
  945. .func = event_enable_count_trigger,
  946. .print = event_enable_trigger_print,
  947. .init = event_trigger_init,
  948. .free = event_enable_trigger_free,
  949. };
  950. static struct event_trigger_ops event_disable_trigger_ops = {
  951. .func = event_enable_trigger,
  952. .print = event_enable_trigger_print,
  953. .init = event_trigger_init,
  954. .free = event_enable_trigger_free,
  955. };
  956. static struct event_trigger_ops event_disable_count_trigger_ops = {
  957. .func = event_enable_count_trigger,
  958. .print = event_enable_trigger_print,
  959. .init = event_trigger_init,
  960. .free = event_enable_trigger_free,
  961. };
  962. static int
  963. event_enable_trigger_func(struct event_command *cmd_ops,
  964. struct ftrace_event_file *file,
  965. char *glob, char *cmd, char *param)
  966. {
  967. struct ftrace_event_file *event_enable_file;
  968. struct enable_trigger_data *enable_data;
  969. struct event_trigger_data *trigger_data;
  970. struct event_trigger_ops *trigger_ops;
  971. struct trace_array *tr = file->tr;
  972. const char *system;
  973. const char *event;
  974. char *trigger;
  975. char *number;
  976. bool enable;
  977. int ret;
  978. if (!param)
  979. return -EINVAL;
  980. /* separate the trigger from the filter (s:e:n [if filter]) */
  981. trigger = strsep(&param, " \t");
  982. if (!trigger)
  983. return -EINVAL;
  984. system = strsep(&trigger, ":");
  985. if (!trigger)
  986. return -EINVAL;
  987. event = strsep(&trigger, ":");
  988. ret = -EINVAL;
  989. event_enable_file = find_event_file(tr, system, event);
  990. if (!event_enable_file)
  991. goto out;
  992. enable = strcmp(cmd, ENABLE_EVENT_STR) == 0;
  993. trigger_ops = cmd_ops->get_trigger_ops(cmd, trigger);
  994. ret = -ENOMEM;
  995. trigger_data = kzalloc(sizeof(*trigger_data), GFP_KERNEL);
  996. if (!trigger_data)
  997. goto out;
  998. enable_data = kzalloc(sizeof(*enable_data), GFP_KERNEL);
  999. if (!enable_data) {
  1000. kfree(trigger_data);
  1001. goto out;
  1002. }
  1003. trigger_data->count = -1;
  1004. trigger_data->ops = trigger_ops;
  1005. trigger_data->cmd_ops = cmd_ops;
  1006. INIT_LIST_HEAD(&trigger_data->list);
  1007. RCU_INIT_POINTER(trigger_data->filter, NULL);
  1008. enable_data->enable = enable;
  1009. enable_data->file = event_enable_file;
  1010. trigger_data->private_data = enable_data;
  1011. if (glob[0] == '!') {
  1012. cmd_ops->unreg(glob+1, trigger_ops, trigger_data, file);
  1013. kfree(trigger_data);
  1014. kfree(enable_data);
  1015. ret = 0;
  1016. goto out;
  1017. }
  1018. if (trigger) {
  1019. number = strsep(&trigger, ":");
  1020. ret = -EINVAL;
  1021. if (!strlen(number))
  1022. goto out_free;
  1023. /*
  1024. * We use the callback data field (which is a pointer)
  1025. * as our counter.
  1026. */
  1027. ret = kstrtoul(number, 0, &trigger_data->count);
  1028. if (ret)
  1029. goto out_free;
  1030. }
  1031. if (!param) /* if param is non-empty, it's supposed to be a filter */
  1032. goto out_reg;
  1033. if (!cmd_ops->set_filter)
  1034. goto out_reg;
  1035. ret = cmd_ops->set_filter(param, trigger_data, file);
  1036. if (ret < 0)
  1037. goto out_free;
  1038. out_reg:
  1039. /* Don't let event modules unload while probe registered */
  1040. ret = try_module_get(event_enable_file->event_call->mod);
  1041. if (!ret) {
  1042. ret = -EBUSY;
  1043. goto out_free;
  1044. }
  1045. ret = trace_event_enable_disable(event_enable_file, 1, 1);
  1046. if (ret < 0)
  1047. goto out_put;
  1048. ret = cmd_ops->reg(glob, trigger_ops, trigger_data, file);
  1049. /*
  1050. * The above returns on success the # of functions enabled,
  1051. * but if it didn't find any functions it returns zero.
  1052. * Consider no functions a failure too.
  1053. */
  1054. if (!ret) {
  1055. ret = -ENOENT;
  1056. goto out_disable;
  1057. } else if (ret < 0)
  1058. goto out_disable;
  1059. /* Just return zero, not the number of enabled functions */
  1060. ret = 0;
  1061. out:
  1062. return ret;
  1063. out_disable:
  1064. trace_event_enable_disable(event_enable_file, 0, 1);
  1065. out_put:
  1066. module_put(event_enable_file->event_call->mod);
  1067. out_free:
  1068. if (cmd_ops->set_filter)
  1069. cmd_ops->set_filter(NULL, trigger_data, NULL);
  1070. kfree(trigger_data);
  1071. kfree(enable_data);
  1072. goto out;
  1073. }
  1074. static int event_enable_register_trigger(char *glob,
  1075. struct event_trigger_ops *ops,
  1076. struct event_trigger_data *data,
  1077. struct ftrace_event_file *file)
  1078. {
  1079. struct enable_trigger_data *enable_data = data->private_data;
  1080. struct enable_trigger_data *test_enable_data;
  1081. struct event_trigger_data *test;
  1082. int ret = 0;
  1083. list_for_each_entry_rcu(test, &file->triggers, list) {
  1084. test_enable_data = test->private_data;
  1085. if (test_enable_data &&
  1086. (test_enable_data->file == enable_data->file)) {
  1087. ret = -EEXIST;
  1088. goto out;
  1089. }
  1090. }
  1091. if (data->ops->init) {
  1092. ret = data->ops->init(data->ops, data);
  1093. if (ret < 0)
  1094. goto out;
  1095. }
  1096. list_add_rcu(&data->list, &file->triggers);
  1097. ret++;
  1098. if (trace_event_trigger_enable_disable(file, 1) < 0) {
  1099. list_del_rcu(&data->list);
  1100. ret--;
  1101. }
  1102. update_cond_flag(file);
  1103. out:
  1104. return ret;
  1105. }
  1106. static void event_enable_unregister_trigger(char *glob,
  1107. struct event_trigger_ops *ops,
  1108. struct event_trigger_data *test,
  1109. struct ftrace_event_file *file)
  1110. {
  1111. struct enable_trigger_data *test_enable_data = test->private_data;
  1112. struct enable_trigger_data *enable_data;
  1113. struct event_trigger_data *data;
  1114. bool unregistered = false;
  1115. list_for_each_entry_rcu(data, &file->triggers, list) {
  1116. enable_data = data->private_data;
  1117. if (enable_data &&
  1118. (enable_data->file == test_enable_data->file)) {
  1119. unregistered = true;
  1120. list_del_rcu(&data->list);
  1121. update_cond_flag(file);
  1122. trace_event_trigger_enable_disable(file, 0);
  1123. break;
  1124. }
  1125. }
  1126. if (unregistered && data->ops->free)
  1127. data->ops->free(data->ops, data);
  1128. }
  1129. static struct event_trigger_ops *
  1130. event_enable_get_trigger_ops(char *cmd, char *param)
  1131. {
  1132. struct event_trigger_ops *ops;
  1133. bool enable;
  1134. enable = strcmp(cmd, ENABLE_EVENT_STR) == 0;
  1135. if (enable)
  1136. ops = param ? &event_enable_count_trigger_ops :
  1137. &event_enable_trigger_ops;
  1138. else
  1139. ops = param ? &event_disable_count_trigger_ops :
  1140. &event_disable_trigger_ops;
  1141. return ops;
  1142. }
  1143. static struct event_command trigger_enable_cmd = {
  1144. .name = ENABLE_EVENT_STR,
  1145. .trigger_type = ETT_EVENT_ENABLE,
  1146. .func = event_enable_trigger_func,
  1147. .reg = event_enable_register_trigger,
  1148. .unreg = event_enable_unregister_trigger,
  1149. .get_trigger_ops = event_enable_get_trigger_ops,
  1150. .set_filter = set_trigger_filter,
  1151. };
  1152. static struct event_command trigger_disable_cmd = {
  1153. .name = DISABLE_EVENT_STR,
  1154. .trigger_type = ETT_EVENT_ENABLE,
  1155. .func = event_enable_trigger_func,
  1156. .reg = event_enable_register_trigger,
  1157. .unreg = event_enable_unregister_trigger,
  1158. .get_trigger_ops = event_enable_get_trigger_ops,
  1159. .set_filter = set_trigger_filter,
  1160. };
  1161. static __init void unregister_trigger_enable_disable_cmds(void)
  1162. {
  1163. unregister_event_command(&trigger_enable_cmd);
  1164. unregister_event_command(&trigger_disable_cmd);
  1165. }
  1166. static __init int register_trigger_enable_disable_cmds(void)
  1167. {
  1168. int ret;
  1169. ret = register_event_command(&trigger_enable_cmd);
  1170. if (WARN_ON(ret < 0))
  1171. return ret;
  1172. ret = register_event_command(&trigger_disable_cmd);
  1173. if (WARN_ON(ret < 0))
  1174. unregister_trigger_enable_disable_cmds();
  1175. return ret;
  1176. }
  1177. static __init int register_trigger_traceon_traceoff_cmds(void)
  1178. {
  1179. int ret;
  1180. ret = register_event_command(&trigger_traceon_cmd);
  1181. if (WARN_ON(ret < 0))
  1182. return ret;
  1183. ret = register_event_command(&trigger_traceoff_cmd);
  1184. if (WARN_ON(ret < 0))
  1185. unregister_trigger_traceon_traceoff_cmds();
  1186. return ret;
  1187. }
  1188. __init int register_trigger_cmds(void)
  1189. {
  1190. register_trigger_traceon_traceoff_cmds();
  1191. register_trigger_snapshot_cmd();
  1192. register_trigger_stacktrace_cmd();
  1193. register_trigger_enable_disable_cmds();
  1194. return 0;
  1195. }