trace_events_trigger.c 35 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434
  1. /*
  2. * trace_events_trigger - trace event triggers
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License as published by
  6. * the Free Software Foundation; either version 2 of the License, or
  7. * (at your option) any later version.
  8. *
  9. * This program is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program; if not, write to the Free Software
  16. * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  17. *
  18. * Copyright (C) 2013 Tom Zanussi <tom.zanussi@linux.intel.com>
  19. */
  20. #include <linux/module.h>
  21. #include <linux/ctype.h>
  22. #include <linux/mutex.h>
  23. #include <linux/slab.h>
  24. #include "trace.h"
  25. static LIST_HEAD(trigger_commands);
  26. static DEFINE_MUTEX(trigger_cmd_mutex);
  27. static void
  28. trigger_data_free(struct event_trigger_data *data)
  29. {
  30. if (data->cmd_ops->set_filter)
  31. data->cmd_ops->set_filter(NULL, data, NULL);
  32. synchronize_sched(); /* make sure current triggers exit before free */
  33. kfree(data);
  34. }
  35. /**
  36. * event_triggers_call - Call triggers associated with a trace event
  37. * @file: The trace_event_file associated with the event
  38. * @rec: The trace entry for the event, NULL for unconditional invocation
  39. *
  40. * For each trigger associated with an event, invoke the trigger
  41. * function registered with the associated trigger command. If rec is
  42. * non-NULL, it means that the trigger requires further processing and
  43. * shouldn't be unconditionally invoked. If rec is non-NULL and the
  44. * trigger has a filter associated with it, rec will checked against
  45. * the filter and if the record matches the trigger will be invoked.
  46. * If the trigger is a 'post_trigger', meaning it shouldn't be invoked
  47. * in any case until the current event is written, the trigger
  48. * function isn't invoked but the bit associated with the deferred
  49. * trigger is set in the return value.
  50. *
  51. * Returns an enum event_trigger_type value containing a set bit for
  52. * any trigger that should be deferred, ETT_NONE if nothing to defer.
  53. *
  54. * Called from tracepoint handlers (with rcu_read_lock_sched() held).
  55. *
  56. * Return: an enum event_trigger_type value containing a set bit for
  57. * any trigger that should be deferred, ETT_NONE if nothing to defer.
  58. */
  59. enum event_trigger_type
  60. event_triggers_call(struct trace_event_file *file, void *rec)
  61. {
  62. struct event_trigger_data *data;
  63. enum event_trigger_type tt = ETT_NONE;
  64. struct event_filter *filter;
  65. if (list_empty(&file->triggers))
  66. return tt;
  67. list_for_each_entry_rcu(data, &file->triggers, list) {
  68. if (!rec) {
  69. data->ops->func(data);
  70. continue;
  71. }
  72. filter = rcu_dereference_sched(data->filter);
  73. if (filter && !filter_match_preds(filter, rec))
  74. continue;
  75. if (data->cmd_ops->post_trigger) {
  76. tt |= data->cmd_ops->trigger_type;
  77. continue;
  78. }
  79. data->ops->func(data);
  80. }
  81. return tt;
  82. }
  83. EXPORT_SYMBOL_GPL(event_triggers_call);
  84. /**
  85. * event_triggers_post_call - Call 'post_triggers' for a trace event
  86. * @file: The trace_event_file associated with the event
  87. * @tt: enum event_trigger_type containing a set bit for each trigger to invoke
  88. *
  89. * For each trigger associated with an event, invoke the trigger
  90. * function registered with the associated trigger command, if the
  91. * corresponding bit is set in the tt enum passed into this function.
  92. * See @event_triggers_call for details on how those bits are set.
  93. *
  94. * Called from tracepoint handlers (with rcu_read_lock_sched() held).
  95. */
  96. void
  97. event_triggers_post_call(struct trace_event_file *file,
  98. enum event_trigger_type tt)
  99. {
  100. struct event_trigger_data *data;
  101. list_for_each_entry_rcu(data, &file->triggers, list) {
  102. if (data->cmd_ops->trigger_type & tt)
  103. data->ops->func(data);
  104. }
  105. }
  106. EXPORT_SYMBOL_GPL(event_triggers_post_call);
  107. #define SHOW_AVAILABLE_TRIGGERS (void *)(1UL)
  108. static void *trigger_next(struct seq_file *m, void *t, loff_t *pos)
  109. {
  110. struct trace_event_file *event_file = event_file_data(m->private);
  111. if (t == SHOW_AVAILABLE_TRIGGERS)
  112. return NULL;
  113. return seq_list_next(t, &event_file->triggers, pos);
  114. }
  115. static void *trigger_start(struct seq_file *m, loff_t *pos)
  116. {
  117. struct trace_event_file *event_file;
  118. /* ->stop() is called even if ->start() fails */
  119. mutex_lock(&event_mutex);
  120. event_file = event_file_data(m->private);
  121. if (unlikely(!event_file))
  122. return ERR_PTR(-ENODEV);
  123. if (list_empty(&event_file->triggers))
  124. return *pos == 0 ? SHOW_AVAILABLE_TRIGGERS : NULL;
  125. return seq_list_start(&event_file->triggers, *pos);
  126. }
  127. static void trigger_stop(struct seq_file *m, void *t)
  128. {
  129. mutex_unlock(&event_mutex);
  130. }
  131. static int trigger_show(struct seq_file *m, void *v)
  132. {
  133. struct event_trigger_data *data;
  134. struct event_command *p;
  135. if (v == SHOW_AVAILABLE_TRIGGERS) {
  136. seq_puts(m, "# Available triggers:\n");
  137. seq_putc(m, '#');
  138. mutex_lock(&trigger_cmd_mutex);
  139. list_for_each_entry_reverse(p, &trigger_commands, list)
  140. seq_printf(m, " %s", p->name);
  141. seq_putc(m, '\n');
  142. mutex_unlock(&trigger_cmd_mutex);
  143. return 0;
  144. }
  145. data = list_entry(v, struct event_trigger_data, list);
  146. data->ops->print(m, data->ops, data);
  147. return 0;
  148. }
  149. static const struct seq_operations event_triggers_seq_ops = {
  150. .start = trigger_start,
  151. .next = trigger_next,
  152. .stop = trigger_stop,
  153. .show = trigger_show,
  154. };
  155. static int event_trigger_regex_open(struct inode *inode, struct file *file)
  156. {
  157. int ret = 0;
  158. mutex_lock(&event_mutex);
  159. if (unlikely(!event_file_data(file))) {
  160. mutex_unlock(&event_mutex);
  161. return -ENODEV;
  162. }
  163. if (file->f_mode & FMODE_READ) {
  164. ret = seq_open(file, &event_triggers_seq_ops);
  165. if (!ret) {
  166. struct seq_file *m = file->private_data;
  167. m->private = file;
  168. }
  169. }
  170. mutex_unlock(&event_mutex);
  171. return ret;
  172. }
  173. static int trigger_process_regex(struct trace_event_file *file, char *buff)
  174. {
  175. char *command, *next = buff;
  176. struct event_command *p;
  177. int ret = -EINVAL;
  178. command = strsep(&next, ": \t");
  179. command = (command[0] != '!') ? command : command + 1;
  180. mutex_lock(&trigger_cmd_mutex);
  181. list_for_each_entry(p, &trigger_commands, list) {
  182. if (strcmp(p->name, command) == 0) {
  183. ret = p->func(p, file, buff, command, next);
  184. goto out_unlock;
  185. }
  186. }
  187. out_unlock:
  188. mutex_unlock(&trigger_cmd_mutex);
  189. return ret;
  190. }
  191. static ssize_t event_trigger_regex_write(struct file *file,
  192. const char __user *ubuf,
  193. size_t cnt, loff_t *ppos)
  194. {
  195. struct trace_event_file *event_file;
  196. ssize_t ret;
  197. char *buf;
  198. if (!cnt)
  199. return 0;
  200. if (cnt >= PAGE_SIZE)
  201. return -EINVAL;
  202. buf = memdup_user_nul(ubuf, cnt);
  203. if (IS_ERR(buf))
  204. return PTR_ERR(buf);
  205. strim(buf);
  206. mutex_lock(&event_mutex);
  207. event_file = event_file_data(file);
  208. if (unlikely(!event_file)) {
  209. mutex_unlock(&event_mutex);
  210. kfree(buf);
  211. return -ENODEV;
  212. }
  213. ret = trigger_process_regex(event_file, buf);
  214. mutex_unlock(&event_mutex);
  215. kfree(buf);
  216. if (ret < 0)
  217. goto out;
  218. *ppos += cnt;
  219. ret = cnt;
  220. out:
  221. return ret;
  222. }
  223. static int event_trigger_regex_release(struct inode *inode, struct file *file)
  224. {
  225. mutex_lock(&event_mutex);
  226. if (file->f_mode & FMODE_READ)
  227. seq_release(inode, file);
  228. mutex_unlock(&event_mutex);
  229. return 0;
  230. }
  231. static ssize_t
  232. event_trigger_write(struct file *filp, const char __user *ubuf,
  233. size_t cnt, loff_t *ppos)
  234. {
  235. return event_trigger_regex_write(filp, ubuf, cnt, ppos);
  236. }
  237. static int
  238. event_trigger_open(struct inode *inode, struct file *filp)
  239. {
  240. return event_trigger_regex_open(inode, filp);
  241. }
  242. static int
  243. event_trigger_release(struct inode *inode, struct file *file)
  244. {
  245. return event_trigger_regex_release(inode, file);
  246. }
  247. const struct file_operations event_trigger_fops = {
  248. .open = event_trigger_open,
  249. .read = seq_read,
  250. .write = event_trigger_write,
  251. .llseek = tracing_lseek,
  252. .release = event_trigger_release,
  253. };
  254. /*
  255. * Currently we only register event commands from __init, so mark this
  256. * __init too.
  257. */
  258. static __init int register_event_command(struct event_command *cmd)
  259. {
  260. struct event_command *p;
  261. int ret = 0;
  262. mutex_lock(&trigger_cmd_mutex);
  263. list_for_each_entry(p, &trigger_commands, list) {
  264. if (strcmp(cmd->name, p->name) == 0) {
  265. ret = -EBUSY;
  266. goto out_unlock;
  267. }
  268. }
  269. list_add(&cmd->list, &trigger_commands);
  270. out_unlock:
  271. mutex_unlock(&trigger_cmd_mutex);
  272. return ret;
  273. }
  274. /*
  275. * Currently we only unregister event commands from __init, so mark
  276. * this __init too.
  277. */
  278. static __init int unregister_event_command(struct event_command *cmd)
  279. {
  280. struct event_command *p, *n;
  281. int ret = -ENODEV;
  282. mutex_lock(&trigger_cmd_mutex);
  283. list_for_each_entry_safe(p, n, &trigger_commands, list) {
  284. if (strcmp(cmd->name, p->name) == 0) {
  285. ret = 0;
  286. list_del_init(&p->list);
  287. goto out_unlock;
  288. }
  289. }
  290. out_unlock:
  291. mutex_unlock(&trigger_cmd_mutex);
  292. return ret;
  293. }
  294. /**
  295. * event_trigger_print - Generic event_trigger_ops @print implementation
  296. * @name: The name of the event trigger
  297. * @m: The seq_file being printed to
  298. * @data: Trigger-specific data
  299. * @filter_str: filter_str to print, if present
  300. *
  301. * Common implementation for event triggers to print themselves.
  302. *
  303. * Usually wrapped by a function that simply sets the @name of the
  304. * trigger command and then invokes this.
  305. *
  306. * Return: 0 on success, errno otherwise
  307. */
  308. static int
  309. event_trigger_print(const char *name, struct seq_file *m,
  310. void *data, char *filter_str)
  311. {
  312. long count = (long)data;
  313. seq_puts(m, name);
  314. if (count == -1)
  315. seq_puts(m, ":unlimited");
  316. else
  317. seq_printf(m, ":count=%ld", count);
  318. if (filter_str)
  319. seq_printf(m, " if %s\n", filter_str);
  320. else
  321. seq_putc(m, '\n');
  322. return 0;
  323. }
  324. /**
  325. * event_trigger_init - Generic event_trigger_ops @init implementation
  326. * @ops: The trigger ops associated with the trigger
  327. * @data: Trigger-specific data
  328. *
  329. * Common implementation of event trigger initialization.
  330. *
  331. * Usually used directly as the @init method in event trigger
  332. * implementations.
  333. *
  334. * Return: 0 on success, errno otherwise
  335. */
  336. static int
  337. event_trigger_init(struct event_trigger_ops *ops,
  338. struct event_trigger_data *data)
  339. {
  340. data->ref++;
  341. return 0;
  342. }
  343. /**
  344. * event_trigger_free - Generic event_trigger_ops @free implementation
  345. * @ops: The trigger ops associated with the trigger
  346. * @data: Trigger-specific data
  347. *
  348. * Common implementation of event trigger de-initialization.
  349. *
  350. * Usually used directly as the @free method in event trigger
  351. * implementations.
  352. */
  353. static void
  354. event_trigger_free(struct event_trigger_ops *ops,
  355. struct event_trigger_data *data)
  356. {
  357. if (WARN_ON_ONCE(data->ref <= 0))
  358. return;
  359. data->ref--;
  360. if (!data->ref)
  361. trigger_data_free(data);
  362. }
  363. static int trace_event_trigger_enable_disable(struct trace_event_file *file,
  364. int trigger_enable)
  365. {
  366. int ret = 0;
  367. if (trigger_enable) {
  368. if (atomic_inc_return(&file->tm_ref) > 1)
  369. return ret;
  370. set_bit(EVENT_FILE_FL_TRIGGER_MODE_BIT, &file->flags);
  371. ret = trace_event_enable_disable(file, 1, 1);
  372. } else {
  373. if (atomic_dec_return(&file->tm_ref) > 0)
  374. return ret;
  375. clear_bit(EVENT_FILE_FL_TRIGGER_MODE_BIT, &file->flags);
  376. ret = trace_event_enable_disable(file, 0, 1);
  377. }
  378. return ret;
  379. }
  380. /**
  381. * clear_event_triggers - Clear all triggers associated with a trace array
  382. * @tr: The trace array to clear
  383. *
  384. * For each trigger, the triggering event has its tm_ref decremented
  385. * via trace_event_trigger_enable_disable(), and any associated event
  386. * (in the case of enable/disable_event triggers) will have its sm_ref
  387. * decremented via free()->trace_event_enable_disable(). That
  388. * combination effectively reverses the soft-mode/trigger state added
  389. * by trigger registration.
  390. *
  391. * Must be called with event_mutex held.
  392. */
  393. void
  394. clear_event_triggers(struct trace_array *tr)
  395. {
  396. struct trace_event_file *file;
  397. list_for_each_entry(file, &tr->events, list) {
  398. struct event_trigger_data *data;
  399. list_for_each_entry_rcu(data, &file->triggers, list) {
  400. trace_event_trigger_enable_disable(file, 0);
  401. if (data->ops->free)
  402. data->ops->free(data->ops, data);
  403. }
  404. }
  405. }
  406. /**
  407. * update_cond_flag - Set or reset the TRIGGER_COND bit
  408. * @file: The trace_event_file associated with the event
  409. *
  410. * If an event has triggers and any of those triggers has a filter or
  411. * a post_trigger, trigger invocation needs to be deferred until after
  412. * the current event has logged its data, and the event should have
  413. * its TRIGGER_COND bit set, otherwise the TRIGGER_COND bit should be
  414. * cleared.
  415. */
  416. static void update_cond_flag(struct trace_event_file *file)
  417. {
  418. struct event_trigger_data *data;
  419. bool set_cond = false;
  420. list_for_each_entry_rcu(data, &file->triggers, list) {
  421. if (data->filter || data->cmd_ops->post_trigger) {
  422. set_cond = true;
  423. break;
  424. }
  425. }
  426. if (set_cond)
  427. set_bit(EVENT_FILE_FL_TRIGGER_COND_BIT, &file->flags);
  428. else
  429. clear_bit(EVENT_FILE_FL_TRIGGER_COND_BIT, &file->flags);
  430. }
  431. /**
  432. * register_trigger - Generic event_command @reg implementation
  433. * @glob: The raw string used to register the trigger
  434. * @ops: The trigger ops associated with the trigger
  435. * @data: Trigger-specific data to associate with the trigger
  436. * @file: The trace_event_file associated with the event
  437. *
  438. * Common implementation for event trigger registration.
  439. *
  440. * Usually used directly as the @reg method in event command
  441. * implementations.
  442. *
  443. * Return: 0 on success, errno otherwise
  444. */
  445. static int register_trigger(char *glob, struct event_trigger_ops *ops,
  446. struct event_trigger_data *data,
  447. struct trace_event_file *file)
  448. {
  449. struct event_trigger_data *test;
  450. int ret = 0;
  451. list_for_each_entry_rcu(test, &file->triggers, list) {
  452. if (test->cmd_ops->trigger_type == data->cmd_ops->trigger_type) {
  453. ret = -EEXIST;
  454. goto out;
  455. }
  456. }
  457. if (data->ops->init) {
  458. ret = data->ops->init(data->ops, data);
  459. if (ret < 0)
  460. goto out;
  461. }
  462. list_add_rcu(&data->list, &file->triggers);
  463. ret++;
  464. update_cond_flag(file);
  465. if (trace_event_trigger_enable_disable(file, 1) < 0) {
  466. list_del_rcu(&data->list);
  467. update_cond_flag(file);
  468. ret--;
  469. }
  470. out:
  471. return ret;
  472. }
  473. /**
  474. * unregister_trigger - Generic event_command @unreg implementation
  475. * @glob: The raw string used to register the trigger
  476. * @ops: The trigger ops associated with the trigger
  477. * @test: Trigger-specific data used to find the trigger to remove
  478. * @file: The trace_event_file associated with the event
  479. *
  480. * Common implementation for event trigger unregistration.
  481. *
  482. * Usually used directly as the @unreg method in event command
  483. * implementations.
  484. */
  485. static void unregister_trigger(char *glob, struct event_trigger_ops *ops,
  486. struct event_trigger_data *test,
  487. struct trace_event_file *file)
  488. {
  489. struct event_trigger_data *data;
  490. bool unregistered = false;
  491. list_for_each_entry_rcu(data, &file->triggers, list) {
  492. if (data->cmd_ops->trigger_type == test->cmd_ops->trigger_type) {
  493. unregistered = true;
  494. list_del_rcu(&data->list);
  495. trace_event_trigger_enable_disable(file, 0);
  496. update_cond_flag(file);
  497. break;
  498. }
  499. }
  500. if (unregistered && data->ops->free)
  501. data->ops->free(data->ops, data);
  502. }
  503. /**
  504. * event_trigger_callback - Generic event_command @func implementation
  505. * @cmd_ops: The command ops, used for trigger registration
  506. * @file: The trace_event_file associated with the event
  507. * @glob: The raw string used to register the trigger
  508. * @cmd: The cmd portion of the string used to register the trigger
  509. * @param: The params portion of the string used to register the trigger
  510. *
  511. * Common implementation for event command parsing and trigger
  512. * instantiation.
  513. *
  514. * Usually used directly as the @func method in event command
  515. * implementations.
  516. *
  517. * Return: 0 on success, errno otherwise
  518. */
  519. static int
  520. event_trigger_callback(struct event_command *cmd_ops,
  521. struct trace_event_file *file,
  522. char *glob, char *cmd, char *param)
  523. {
  524. struct event_trigger_data *trigger_data;
  525. struct event_trigger_ops *trigger_ops;
  526. char *trigger = NULL;
  527. char *number;
  528. int ret;
  529. /* separate the trigger from the filter (t:n [if filter]) */
  530. if (param && isdigit(param[0]))
  531. trigger = strsep(&param, " \t");
  532. trigger_ops = cmd_ops->get_trigger_ops(cmd, trigger);
  533. ret = -ENOMEM;
  534. trigger_data = kzalloc(sizeof(*trigger_data), GFP_KERNEL);
  535. if (!trigger_data)
  536. goto out;
  537. trigger_data->count = -1;
  538. trigger_data->ops = trigger_ops;
  539. trigger_data->cmd_ops = cmd_ops;
  540. INIT_LIST_HEAD(&trigger_data->list);
  541. if (glob[0] == '!') {
  542. cmd_ops->unreg(glob+1, trigger_ops, trigger_data, file);
  543. kfree(trigger_data);
  544. ret = 0;
  545. goto out;
  546. }
  547. if (trigger) {
  548. number = strsep(&trigger, ":");
  549. ret = -EINVAL;
  550. if (!strlen(number))
  551. goto out_free;
  552. /*
  553. * We use the callback data field (which is a pointer)
  554. * as our counter.
  555. */
  556. ret = kstrtoul(number, 0, &trigger_data->count);
  557. if (ret)
  558. goto out_free;
  559. }
  560. if (!param) /* if param is non-empty, it's supposed to be a filter */
  561. goto out_reg;
  562. if (!cmd_ops->set_filter)
  563. goto out_reg;
  564. ret = cmd_ops->set_filter(param, trigger_data, file);
  565. if (ret < 0)
  566. goto out_free;
  567. out_reg:
  568. ret = cmd_ops->reg(glob, trigger_ops, trigger_data, file);
  569. /*
  570. * The above returns on success the # of functions enabled,
  571. * but if it didn't find any functions it returns zero.
  572. * Consider no functions a failure too.
  573. */
  574. if (!ret) {
  575. ret = -ENOENT;
  576. goto out_free;
  577. } else if (ret < 0)
  578. goto out_free;
  579. ret = 0;
  580. out:
  581. return ret;
  582. out_free:
  583. if (cmd_ops->set_filter)
  584. cmd_ops->set_filter(NULL, trigger_data, NULL);
  585. kfree(trigger_data);
  586. goto out;
  587. }
  588. /**
  589. * set_trigger_filter - Generic event_command @set_filter implementation
  590. * @filter_str: The filter string for the trigger, NULL to remove filter
  591. * @trigger_data: Trigger-specific data
  592. * @file: The trace_event_file associated with the event
  593. *
  594. * Common implementation for event command filter parsing and filter
  595. * instantiation.
  596. *
  597. * Usually used directly as the @set_filter method in event command
  598. * implementations.
  599. *
  600. * Also used to remove a filter (if filter_str = NULL).
  601. *
  602. * Return: 0 on success, errno otherwise
  603. */
  604. static int set_trigger_filter(char *filter_str,
  605. struct event_trigger_data *trigger_data,
  606. struct trace_event_file *file)
  607. {
  608. struct event_trigger_data *data = trigger_data;
  609. struct event_filter *filter = NULL, *tmp;
  610. int ret = -EINVAL;
  611. char *s;
  612. if (!filter_str) /* clear the current filter */
  613. goto assign;
  614. s = strsep(&filter_str, " \t");
  615. if (!strlen(s) || strcmp(s, "if") != 0)
  616. goto out;
  617. if (!filter_str)
  618. goto out;
  619. /* The filter is for the 'trigger' event, not the triggered event */
  620. ret = create_event_filter(file->event_call, filter_str, false, &filter);
  621. if (ret)
  622. goto out;
  623. assign:
  624. tmp = rcu_access_pointer(data->filter);
  625. rcu_assign_pointer(data->filter, filter);
  626. if (tmp) {
  627. /* Make sure the call is done with the filter */
  628. synchronize_sched();
  629. free_event_filter(tmp);
  630. }
  631. kfree(data->filter_str);
  632. data->filter_str = NULL;
  633. if (filter_str) {
  634. data->filter_str = kstrdup(filter_str, GFP_KERNEL);
  635. if (!data->filter_str) {
  636. free_event_filter(rcu_access_pointer(data->filter));
  637. data->filter = NULL;
  638. ret = -ENOMEM;
  639. }
  640. }
  641. out:
  642. return ret;
  643. }
  644. static void
  645. traceon_trigger(struct event_trigger_data *data)
  646. {
  647. if (tracing_is_on())
  648. return;
  649. tracing_on();
  650. }
  651. static void
  652. traceon_count_trigger(struct event_trigger_data *data)
  653. {
  654. if (tracing_is_on())
  655. return;
  656. if (!data->count)
  657. return;
  658. if (data->count != -1)
  659. (data->count)--;
  660. tracing_on();
  661. }
  662. static void
  663. traceoff_trigger(struct event_trigger_data *data)
  664. {
  665. if (!tracing_is_on())
  666. return;
  667. tracing_off();
  668. }
  669. static void
  670. traceoff_count_trigger(struct event_trigger_data *data)
  671. {
  672. if (!tracing_is_on())
  673. return;
  674. if (!data->count)
  675. return;
  676. if (data->count != -1)
  677. (data->count)--;
  678. tracing_off();
  679. }
  680. static int
  681. traceon_trigger_print(struct seq_file *m, struct event_trigger_ops *ops,
  682. struct event_trigger_data *data)
  683. {
  684. return event_trigger_print("traceon", m, (void *)data->count,
  685. data->filter_str);
  686. }
  687. static int
  688. traceoff_trigger_print(struct seq_file *m, struct event_trigger_ops *ops,
  689. struct event_trigger_data *data)
  690. {
  691. return event_trigger_print("traceoff", m, (void *)data->count,
  692. data->filter_str);
  693. }
  694. static struct event_trigger_ops traceon_trigger_ops = {
  695. .func = traceon_trigger,
  696. .print = traceon_trigger_print,
  697. .init = event_trigger_init,
  698. .free = event_trigger_free,
  699. };
  700. static struct event_trigger_ops traceon_count_trigger_ops = {
  701. .func = traceon_count_trigger,
  702. .print = traceon_trigger_print,
  703. .init = event_trigger_init,
  704. .free = event_trigger_free,
  705. };
  706. static struct event_trigger_ops traceoff_trigger_ops = {
  707. .func = traceoff_trigger,
  708. .print = traceoff_trigger_print,
  709. .init = event_trigger_init,
  710. .free = event_trigger_free,
  711. };
  712. static struct event_trigger_ops traceoff_count_trigger_ops = {
  713. .func = traceoff_count_trigger,
  714. .print = traceoff_trigger_print,
  715. .init = event_trigger_init,
  716. .free = event_trigger_free,
  717. };
  718. static struct event_trigger_ops *
  719. onoff_get_trigger_ops(char *cmd, char *param)
  720. {
  721. struct event_trigger_ops *ops;
  722. /* we register both traceon and traceoff to this callback */
  723. if (strcmp(cmd, "traceon") == 0)
  724. ops = param ? &traceon_count_trigger_ops :
  725. &traceon_trigger_ops;
  726. else
  727. ops = param ? &traceoff_count_trigger_ops :
  728. &traceoff_trigger_ops;
  729. return ops;
  730. }
  731. static struct event_command trigger_traceon_cmd = {
  732. .name = "traceon",
  733. .trigger_type = ETT_TRACE_ONOFF,
  734. .func = event_trigger_callback,
  735. .reg = register_trigger,
  736. .unreg = unregister_trigger,
  737. .get_trigger_ops = onoff_get_trigger_ops,
  738. .set_filter = set_trigger_filter,
  739. };
  740. static struct event_command trigger_traceoff_cmd = {
  741. .name = "traceoff",
  742. .trigger_type = ETT_TRACE_ONOFF,
  743. .func = event_trigger_callback,
  744. .reg = register_trigger,
  745. .unreg = unregister_trigger,
  746. .get_trigger_ops = onoff_get_trigger_ops,
  747. .set_filter = set_trigger_filter,
  748. };
  749. #ifdef CONFIG_TRACER_SNAPSHOT
  750. static void
  751. snapshot_trigger(struct event_trigger_data *data)
  752. {
  753. tracing_snapshot();
  754. }
  755. static void
  756. snapshot_count_trigger(struct event_trigger_data *data)
  757. {
  758. if (!data->count)
  759. return;
  760. if (data->count != -1)
  761. (data->count)--;
  762. snapshot_trigger(data);
  763. }
  764. static int
  765. register_snapshot_trigger(char *glob, struct event_trigger_ops *ops,
  766. struct event_trigger_data *data,
  767. struct trace_event_file *file)
  768. {
  769. int ret = register_trigger(glob, ops, data, file);
  770. if (ret > 0 && tracing_alloc_snapshot() != 0) {
  771. unregister_trigger(glob, ops, data, file);
  772. ret = 0;
  773. }
  774. return ret;
  775. }
  776. static int
  777. snapshot_trigger_print(struct seq_file *m, struct event_trigger_ops *ops,
  778. struct event_trigger_data *data)
  779. {
  780. return event_trigger_print("snapshot", m, (void *)data->count,
  781. data->filter_str);
  782. }
  783. static struct event_trigger_ops snapshot_trigger_ops = {
  784. .func = snapshot_trigger,
  785. .print = snapshot_trigger_print,
  786. .init = event_trigger_init,
  787. .free = event_trigger_free,
  788. };
  789. static struct event_trigger_ops snapshot_count_trigger_ops = {
  790. .func = snapshot_count_trigger,
  791. .print = snapshot_trigger_print,
  792. .init = event_trigger_init,
  793. .free = event_trigger_free,
  794. };
  795. static struct event_trigger_ops *
  796. snapshot_get_trigger_ops(char *cmd, char *param)
  797. {
  798. return param ? &snapshot_count_trigger_ops : &snapshot_trigger_ops;
  799. }
  800. static struct event_command trigger_snapshot_cmd = {
  801. .name = "snapshot",
  802. .trigger_type = ETT_SNAPSHOT,
  803. .func = event_trigger_callback,
  804. .reg = register_snapshot_trigger,
  805. .unreg = unregister_trigger,
  806. .get_trigger_ops = snapshot_get_trigger_ops,
  807. .set_filter = set_trigger_filter,
  808. };
  809. static __init int register_trigger_snapshot_cmd(void)
  810. {
  811. int ret;
  812. ret = register_event_command(&trigger_snapshot_cmd);
  813. WARN_ON(ret < 0);
  814. return ret;
  815. }
  816. #else
  817. static __init int register_trigger_snapshot_cmd(void) { return 0; }
  818. #endif /* CONFIG_TRACER_SNAPSHOT */
  819. #ifdef CONFIG_STACKTRACE
  820. /*
  821. * Skip 3:
  822. * stacktrace_trigger()
  823. * event_triggers_post_call()
  824. * trace_event_raw_event_xxx()
  825. */
  826. #define STACK_SKIP 3
  827. static void
  828. stacktrace_trigger(struct event_trigger_data *data)
  829. {
  830. trace_dump_stack(STACK_SKIP);
  831. }
  832. static void
  833. stacktrace_count_trigger(struct event_trigger_data *data)
  834. {
  835. if (!data->count)
  836. return;
  837. if (data->count != -1)
  838. (data->count)--;
  839. stacktrace_trigger(data);
  840. }
  841. static int
  842. stacktrace_trigger_print(struct seq_file *m, struct event_trigger_ops *ops,
  843. struct event_trigger_data *data)
  844. {
  845. return event_trigger_print("stacktrace", m, (void *)data->count,
  846. data->filter_str);
  847. }
  848. static struct event_trigger_ops stacktrace_trigger_ops = {
  849. .func = stacktrace_trigger,
  850. .print = stacktrace_trigger_print,
  851. .init = event_trigger_init,
  852. .free = event_trigger_free,
  853. };
  854. static struct event_trigger_ops stacktrace_count_trigger_ops = {
  855. .func = stacktrace_count_trigger,
  856. .print = stacktrace_trigger_print,
  857. .init = event_trigger_init,
  858. .free = event_trigger_free,
  859. };
  860. static struct event_trigger_ops *
  861. stacktrace_get_trigger_ops(char *cmd, char *param)
  862. {
  863. return param ? &stacktrace_count_trigger_ops : &stacktrace_trigger_ops;
  864. }
  865. static struct event_command trigger_stacktrace_cmd = {
  866. .name = "stacktrace",
  867. .trigger_type = ETT_STACKTRACE,
  868. .post_trigger = true,
  869. .func = event_trigger_callback,
  870. .reg = register_trigger,
  871. .unreg = unregister_trigger,
  872. .get_trigger_ops = stacktrace_get_trigger_ops,
  873. .set_filter = set_trigger_filter,
  874. };
  875. static __init int register_trigger_stacktrace_cmd(void)
  876. {
  877. int ret;
  878. ret = register_event_command(&trigger_stacktrace_cmd);
  879. WARN_ON(ret < 0);
  880. return ret;
  881. }
  882. #else
  883. static __init int register_trigger_stacktrace_cmd(void) { return 0; }
  884. #endif /* CONFIG_STACKTRACE */
  885. static __init void unregister_trigger_traceon_traceoff_cmds(void)
  886. {
  887. unregister_event_command(&trigger_traceon_cmd);
  888. unregister_event_command(&trigger_traceoff_cmd);
  889. }
  890. /* Avoid typos */
  891. #define ENABLE_EVENT_STR "enable_event"
  892. #define DISABLE_EVENT_STR "disable_event"
  893. struct enable_trigger_data {
  894. struct trace_event_file *file;
  895. bool enable;
  896. };
  897. static void
  898. event_enable_trigger(struct event_trigger_data *data)
  899. {
  900. struct enable_trigger_data *enable_data = data->private_data;
  901. if (enable_data->enable)
  902. clear_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &enable_data->file->flags);
  903. else
  904. set_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &enable_data->file->flags);
  905. }
  906. static void
  907. event_enable_count_trigger(struct event_trigger_data *data)
  908. {
  909. struct enable_trigger_data *enable_data = data->private_data;
  910. if (!data->count)
  911. return;
  912. /* Skip if the event is in a state we want to switch to */
  913. if (enable_data->enable == !(enable_data->file->flags & EVENT_FILE_FL_SOFT_DISABLED))
  914. return;
  915. if (data->count != -1)
  916. (data->count)--;
  917. event_enable_trigger(data);
  918. }
  919. static int
  920. event_enable_trigger_print(struct seq_file *m, struct event_trigger_ops *ops,
  921. struct event_trigger_data *data)
  922. {
  923. struct enable_trigger_data *enable_data = data->private_data;
  924. seq_printf(m, "%s:%s:%s",
  925. enable_data->enable ? ENABLE_EVENT_STR : DISABLE_EVENT_STR,
  926. enable_data->file->event_call->class->system,
  927. trace_event_name(enable_data->file->event_call));
  928. if (data->count == -1)
  929. seq_puts(m, ":unlimited");
  930. else
  931. seq_printf(m, ":count=%ld", data->count);
  932. if (data->filter_str)
  933. seq_printf(m, " if %s\n", data->filter_str);
  934. else
  935. seq_putc(m, '\n');
  936. return 0;
  937. }
  938. static void
  939. event_enable_trigger_free(struct event_trigger_ops *ops,
  940. struct event_trigger_data *data)
  941. {
  942. struct enable_trigger_data *enable_data = data->private_data;
  943. if (WARN_ON_ONCE(data->ref <= 0))
  944. return;
  945. data->ref--;
  946. if (!data->ref) {
  947. /* Remove the SOFT_MODE flag */
  948. trace_event_enable_disable(enable_data->file, 0, 1);
  949. module_put(enable_data->file->event_call->mod);
  950. trigger_data_free(data);
  951. kfree(enable_data);
  952. }
  953. }
  954. static struct event_trigger_ops event_enable_trigger_ops = {
  955. .func = event_enable_trigger,
  956. .print = event_enable_trigger_print,
  957. .init = event_trigger_init,
  958. .free = event_enable_trigger_free,
  959. };
  960. static struct event_trigger_ops event_enable_count_trigger_ops = {
  961. .func = event_enable_count_trigger,
  962. .print = event_enable_trigger_print,
  963. .init = event_trigger_init,
  964. .free = event_enable_trigger_free,
  965. };
  966. static struct event_trigger_ops event_disable_trigger_ops = {
  967. .func = event_enable_trigger,
  968. .print = event_enable_trigger_print,
  969. .init = event_trigger_init,
  970. .free = event_enable_trigger_free,
  971. };
  972. static struct event_trigger_ops event_disable_count_trigger_ops = {
  973. .func = event_enable_count_trigger,
  974. .print = event_enable_trigger_print,
  975. .init = event_trigger_init,
  976. .free = event_enable_trigger_free,
  977. };
  978. static int
  979. event_enable_trigger_func(struct event_command *cmd_ops,
  980. struct trace_event_file *file,
  981. char *glob, char *cmd, char *param)
  982. {
  983. struct trace_event_file *event_enable_file;
  984. struct enable_trigger_data *enable_data;
  985. struct event_trigger_data *trigger_data;
  986. struct event_trigger_ops *trigger_ops;
  987. struct trace_array *tr = file->tr;
  988. const char *system;
  989. const char *event;
  990. char *trigger;
  991. char *number;
  992. bool enable;
  993. int ret;
  994. if (!param)
  995. return -EINVAL;
  996. /* separate the trigger from the filter (s:e:n [if filter]) */
  997. trigger = strsep(&param, " \t");
  998. if (!trigger)
  999. return -EINVAL;
  1000. system = strsep(&trigger, ":");
  1001. if (!trigger)
  1002. return -EINVAL;
  1003. event = strsep(&trigger, ":");
  1004. ret = -EINVAL;
  1005. event_enable_file = find_event_file(tr, system, event);
  1006. if (!event_enable_file)
  1007. goto out;
  1008. enable = strcmp(cmd, ENABLE_EVENT_STR) == 0;
  1009. trigger_ops = cmd_ops->get_trigger_ops(cmd, trigger);
  1010. ret = -ENOMEM;
  1011. trigger_data = kzalloc(sizeof(*trigger_data), GFP_KERNEL);
  1012. if (!trigger_data)
  1013. goto out;
  1014. enable_data = kzalloc(sizeof(*enable_data), GFP_KERNEL);
  1015. if (!enable_data) {
  1016. kfree(trigger_data);
  1017. goto out;
  1018. }
  1019. trigger_data->count = -1;
  1020. trigger_data->ops = trigger_ops;
  1021. trigger_data->cmd_ops = cmd_ops;
  1022. INIT_LIST_HEAD(&trigger_data->list);
  1023. RCU_INIT_POINTER(trigger_data->filter, NULL);
  1024. enable_data->enable = enable;
  1025. enable_data->file = event_enable_file;
  1026. trigger_data->private_data = enable_data;
  1027. if (glob[0] == '!') {
  1028. cmd_ops->unreg(glob+1, trigger_ops, trigger_data, file);
  1029. kfree(trigger_data);
  1030. kfree(enable_data);
  1031. ret = 0;
  1032. goto out;
  1033. }
  1034. if (trigger) {
  1035. number = strsep(&trigger, ":");
  1036. ret = -EINVAL;
  1037. if (!strlen(number))
  1038. goto out_free;
  1039. /*
  1040. * We use the callback data field (which is a pointer)
  1041. * as our counter.
  1042. */
  1043. ret = kstrtoul(number, 0, &trigger_data->count);
  1044. if (ret)
  1045. goto out_free;
  1046. }
  1047. if (!param) /* if param is non-empty, it's supposed to be a filter */
  1048. goto out_reg;
  1049. if (!cmd_ops->set_filter)
  1050. goto out_reg;
  1051. ret = cmd_ops->set_filter(param, trigger_data, file);
  1052. if (ret < 0)
  1053. goto out_free;
  1054. out_reg:
  1055. /* Don't let event modules unload while probe registered */
  1056. ret = try_module_get(event_enable_file->event_call->mod);
  1057. if (!ret) {
  1058. ret = -EBUSY;
  1059. goto out_free;
  1060. }
  1061. ret = trace_event_enable_disable(event_enable_file, 1, 1);
  1062. if (ret < 0)
  1063. goto out_put;
  1064. ret = cmd_ops->reg(glob, trigger_ops, trigger_data, file);
  1065. /*
  1066. * The above returns on success the # of functions enabled,
  1067. * but if it didn't find any functions it returns zero.
  1068. * Consider no functions a failure too.
  1069. */
  1070. if (!ret) {
  1071. ret = -ENOENT;
  1072. goto out_disable;
  1073. } else if (ret < 0)
  1074. goto out_disable;
  1075. /* Just return zero, not the number of enabled functions */
  1076. ret = 0;
  1077. out:
  1078. return ret;
  1079. out_disable:
  1080. trace_event_enable_disable(event_enable_file, 0, 1);
  1081. out_put:
  1082. module_put(event_enable_file->event_call->mod);
  1083. out_free:
  1084. if (cmd_ops->set_filter)
  1085. cmd_ops->set_filter(NULL, trigger_data, NULL);
  1086. kfree(trigger_data);
  1087. kfree(enable_data);
  1088. goto out;
  1089. }
  1090. static int event_enable_register_trigger(char *glob,
  1091. struct event_trigger_ops *ops,
  1092. struct event_trigger_data *data,
  1093. struct trace_event_file *file)
  1094. {
  1095. struct enable_trigger_data *enable_data = data->private_data;
  1096. struct enable_trigger_data *test_enable_data;
  1097. struct event_trigger_data *test;
  1098. int ret = 0;
  1099. list_for_each_entry_rcu(test, &file->triggers, list) {
  1100. test_enable_data = test->private_data;
  1101. if (test_enable_data &&
  1102. (test_enable_data->file == enable_data->file)) {
  1103. ret = -EEXIST;
  1104. goto out;
  1105. }
  1106. }
  1107. if (data->ops->init) {
  1108. ret = data->ops->init(data->ops, data);
  1109. if (ret < 0)
  1110. goto out;
  1111. }
  1112. list_add_rcu(&data->list, &file->triggers);
  1113. ret++;
  1114. update_cond_flag(file);
  1115. if (trace_event_trigger_enable_disable(file, 1) < 0) {
  1116. list_del_rcu(&data->list);
  1117. update_cond_flag(file);
  1118. ret--;
  1119. }
  1120. out:
  1121. return ret;
  1122. }
  1123. static void event_enable_unregister_trigger(char *glob,
  1124. struct event_trigger_ops *ops,
  1125. struct event_trigger_data *test,
  1126. struct trace_event_file *file)
  1127. {
  1128. struct enable_trigger_data *test_enable_data = test->private_data;
  1129. struct enable_trigger_data *enable_data;
  1130. struct event_trigger_data *data;
  1131. bool unregistered = false;
  1132. list_for_each_entry_rcu(data, &file->triggers, list) {
  1133. enable_data = data->private_data;
  1134. if (enable_data &&
  1135. (enable_data->file == test_enable_data->file)) {
  1136. unregistered = true;
  1137. list_del_rcu(&data->list);
  1138. trace_event_trigger_enable_disable(file, 0);
  1139. update_cond_flag(file);
  1140. break;
  1141. }
  1142. }
  1143. if (unregistered && data->ops->free)
  1144. data->ops->free(data->ops, data);
  1145. }
  1146. static struct event_trigger_ops *
  1147. event_enable_get_trigger_ops(char *cmd, char *param)
  1148. {
  1149. struct event_trigger_ops *ops;
  1150. bool enable;
  1151. enable = strcmp(cmd, ENABLE_EVENT_STR) == 0;
  1152. if (enable)
  1153. ops = param ? &event_enable_count_trigger_ops :
  1154. &event_enable_trigger_ops;
  1155. else
  1156. ops = param ? &event_disable_count_trigger_ops :
  1157. &event_disable_trigger_ops;
  1158. return ops;
  1159. }
  1160. static struct event_command trigger_enable_cmd = {
  1161. .name = ENABLE_EVENT_STR,
  1162. .trigger_type = ETT_EVENT_ENABLE,
  1163. .func = event_enable_trigger_func,
  1164. .reg = event_enable_register_trigger,
  1165. .unreg = event_enable_unregister_trigger,
  1166. .get_trigger_ops = event_enable_get_trigger_ops,
  1167. .set_filter = set_trigger_filter,
  1168. };
  1169. static struct event_command trigger_disable_cmd = {
  1170. .name = DISABLE_EVENT_STR,
  1171. .trigger_type = ETT_EVENT_ENABLE,
  1172. .func = event_enable_trigger_func,
  1173. .reg = event_enable_register_trigger,
  1174. .unreg = event_enable_unregister_trigger,
  1175. .get_trigger_ops = event_enable_get_trigger_ops,
  1176. .set_filter = set_trigger_filter,
  1177. };
  1178. static __init void unregister_trigger_enable_disable_cmds(void)
  1179. {
  1180. unregister_event_command(&trigger_enable_cmd);
  1181. unregister_event_command(&trigger_disable_cmd);
  1182. }
  1183. static __init int register_trigger_enable_disable_cmds(void)
  1184. {
  1185. int ret;
  1186. ret = register_event_command(&trigger_enable_cmd);
  1187. if (WARN_ON(ret < 0))
  1188. return ret;
  1189. ret = register_event_command(&trigger_disable_cmd);
  1190. if (WARN_ON(ret < 0))
  1191. unregister_trigger_enable_disable_cmds();
  1192. return ret;
  1193. }
  1194. static __init int register_trigger_traceon_traceoff_cmds(void)
  1195. {
  1196. int ret;
  1197. ret = register_event_command(&trigger_traceon_cmd);
  1198. if (WARN_ON(ret < 0))
  1199. return ret;
  1200. ret = register_event_command(&trigger_traceoff_cmd);
  1201. if (WARN_ON(ret < 0))
  1202. unregister_trigger_traceon_traceoff_cmds();
  1203. return ret;
  1204. }
  1205. __init int register_trigger_cmds(void)
  1206. {
  1207. register_trigger_traceon_traceoff_cmds();
  1208. register_trigger_snapshot_cmd();
  1209. register_trigger_stacktrace_cmd();
  1210. register_trigger_enable_disable_cmds();
  1211. return 0;
  1212. }