trace_events_trigger.c 41 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638
  1. /*
  2. * trace_events_trigger - trace event triggers
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License as published by
  6. * the Free Software Foundation; either version 2 of the License, or
  7. * (at your option) any later version.
  8. *
  9. * This program is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program; if not, write to the Free Software
  16. * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  17. *
  18. * Copyright (C) 2013 Tom Zanussi <tom.zanussi@linux.intel.com>
  19. */
  20. #include <linux/module.h>
  21. #include <linux/ctype.h>
  22. #include <linux/mutex.h>
  23. #include <linux/slab.h>
  24. #include <linux/rculist.h>
  25. #include "trace.h"
  26. static LIST_HEAD(trigger_commands);
  27. static DEFINE_MUTEX(trigger_cmd_mutex);
  28. void trigger_data_free(struct event_trigger_data *data)
  29. {
  30. if (data->cmd_ops->set_filter)
  31. data->cmd_ops->set_filter(NULL, data, NULL);
  32. synchronize_sched(); /* make sure current triggers exit before free */
  33. kfree(data);
  34. }
  35. /**
  36. * event_triggers_call - Call triggers associated with a trace event
  37. * @file: The trace_event_file associated with the event
  38. * @rec: The trace entry for the event, NULL for unconditional invocation
  39. *
  40. * For each trigger associated with an event, invoke the trigger
  41. * function registered with the associated trigger command. If rec is
  42. * non-NULL, it means that the trigger requires further processing and
  43. * shouldn't be unconditionally invoked. If rec is non-NULL and the
  44. * trigger has a filter associated with it, rec will checked against
  45. * the filter and if the record matches the trigger will be invoked.
  46. * If the trigger is a 'post_trigger', meaning it shouldn't be invoked
  47. * in any case until the current event is written, the trigger
  48. * function isn't invoked but the bit associated with the deferred
  49. * trigger is set in the return value.
  50. *
  51. * Returns an enum event_trigger_type value containing a set bit for
  52. * any trigger that should be deferred, ETT_NONE if nothing to defer.
  53. *
  54. * Called from tracepoint handlers (with rcu_read_lock_sched() held).
  55. *
  56. * Return: an enum event_trigger_type value containing a set bit for
  57. * any trigger that should be deferred, ETT_NONE if nothing to defer.
  58. */
  59. enum event_trigger_type
  60. event_triggers_call(struct trace_event_file *file, void *rec,
  61. struct ring_buffer_event *event)
  62. {
  63. struct event_trigger_data *data;
  64. enum event_trigger_type tt = ETT_NONE;
  65. struct event_filter *filter;
  66. if (list_empty(&file->triggers))
  67. return tt;
  68. list_for_each_entry_rcu(data, &file->triggers, list) {
  69. if (data->paused)
  70. continue;
  71. if (!rec) {
  72. data->ops->func(data, rec, event);
  73. continue;
  74. }
  75. filter = rcu_dereference_sched(data->filter);
  76. if (filter && !filter_match_preds(filter, rec))
  77. continue;
  78. if (event_command_post_trigger(data->cmd_ops)) {
  79. tt |= data->cmd_ops->trigger_type;
  80. continue;
  81. }
  82. data->ops->func(data, rec, event);
  83. }
  84. return tt;
  85. }
  86. EXPORT_SYMBOL_GPL(event_triggers_call);
  87. /**
  88. * event_triggers_post_call - Call 'post_triggers' for a trace event
  89. * @file: The trace_event_file associated with the event
  90. * @tt: enum event_trigger_type containing a set bit for each trigger to invoke
  91. *
  92. * For each trigger associated with an event, invoke the trigger
  93. * function registered with the associated trigger command, if the
  94. * corresponding bit is set in the tt enum passed into this function.
  95. * See @event_triggers_call for details on how those bits are set.
  96. *
  97. * Called from tracepoint handlers (with rcu_read_lock_sched() held).
  98. */
  99. void
  100. event_triggers_post_call(struct trace_event_file *file,
  101. enum event_trigger_type tt)
  102. {
  103. struct event_trigger_data *data;
  104. list_for_each_entry_rcu(data, &file->triggers, list) {
  105. if (data->paused)
  106. continue;
  107. if (data->cmd_ops->trigger_type & tt)
  108. data->ops->func(data, NULL, NULL);
  109. }
  110. }
  111. EXPORT_SYMBOL_GPL(event_triggers_post_call);
  112. #define SHOW_AVAILABLE_TRIGGERS (void *)(1UL)
  113. static void *trigger_next(struct seq_file *m, void *t, loff_t *pos)
  114. {
  115. struct trace_event_file *event_file = event_file_data(m->private);
  116. if (t == SHOW_AVAILABLE_TRIGGERS)
  117. return NULL;
  118. return seq_list_next(t, &event_file->triggers, pos);
  119. }
  120. static void *trigger_start(struct seq_file *m, loff_t *pos)
  121. {
  122. struct trace_event_file *event_file;
  123. /* ->stop() is called even if ->start() fails */
  124. mutex_lock(&event_mutex);
  125. event_file = event_file_data(m->private);
  126. if (unlikely(!event_file))
  127. return ERR_PTR(-ENODEV);
  128. if (list_empty(&event_file->triggers))
  129. return *pos == 0 ? SHOW_AVAILABLE_TRIGGERS : NULL;
  130. return seq_list_start(&event_file->triggers, *pos);
  131. }
  132. static void trigger_stop(struct seq_file *m, void *t)
  133. {
  134. mutex_unlock(&event_mutex);
  135. }
  136. static int trigger_show(struct seq_file *m, void *v)
  137. {
  138. struct event_trigger_data *data;
  139. struct event_command *p;
  140. if (v == SHOW_AVAILABLE_TRIGGERS) {
  141. seq_puts(m, "# Available triggers:\n");
  142. seq_putc(m, '#');
  143. mutex_lock(&trigger_cmd_mutex);
  144. list_for_each_entry_reverse(p, &trigger_commands, list)
  145. seq_printf(m, " %s", p->name);
  146. seq_putc(m, '\n');
  147. mutex_unlock(&trigger_cmd_mutex);
  148. return 0;
  149. }
  150. data = list_entry(v, struct event_trigger_data, list);
  151. data->ops->print(m, data->ops, data);
  152. return 0;
  153. }
  154. static const struct seq_operations event_triggers_seq_ops = {
  155. .start = trigger_start,
  156. .next = trigger_next,
  157. .stop = trigger_stop,
  158. .show = trigger_show,
  159. };
  160. static int event_trigger_regex_open(struct inode *inode, struct file *file)
  161. {
  162. int ret = 0;
  163. mutex_lock(&event_mutex);
  164. if (unlikely(!event_file_data(file))) {
  165. mutex_unlock(&event_mutex);
  166. return -ENODEV;
  167. }
  168. if ((file->f_mode & FMODE_WRITE) &&
  169. (file->f_flags & O_TRUNC)) {
  170. struct trace_event_file *event_file;
  171. struct event_command *p;
  172. event_file = event_file_data(file);
  173. list_for_each_entry(p, &trigger_commands, list) {
  174. if (p->unreg_all)
  175. p->unreg_all(event_file);
  176. }
  177. }
  178. if (file->f_mode & FMODE_READ) {
  179. ret = seq_open(file, &event_triggers_seq_ops);
  180. if (!ret) {
  181. struct seq_file *m = file->private_data;
  182. m->private = file;
  183. }
  184. }
  185. mutex_unlock(&event_mutex);
  186. return ret;
  187. }
  188. static int trigger_process_regex(struct trace_event_file *file, char *buff)
  189. {
  190. char *command, *next = buff;
  191. struct event_command *p;
  192. int ret = -EINVAL;
  193. command = strsep(&next, ": \t");
  194. command = (command[0] != '!') ? command : command + 1;
  195. mutex_lock(&trigger_cmd_mutex);
  196. list_for_each_entry(p, &trigger_commands, list) {
  197. if (strcmp(p->name, command) == 0) {
  198. ret = p->func(p, file, buff, command, next);
  199. goto out_unlock;
  200. }
  201. }
  202. out_unlock:
  203. mutex_unlock(&trigger_cmd_mutex);
  204. return ret;
  205. }
  206. static ssize_t event_trigger_regex_write(struct file *file,
  207. const char __user *ubuf,
  208. size_t cnt, loff_t *ppos)
  209. {
  210. struct trace_event_file *event_file;
  211. ssize_t ret;
  212. char *buf;
  213. if (!cnt)
  214. return 0;
  215. if (cnt >= PAGE_SIZE)
  216. return -EINVAL;
  217. buf = memdup_user_nul(ubuf, cnt);
  218. if (IS_ERR(buf))
  219. return PTR_ERR(buf);
  220. strim(buf);
  221. mutex_lock(&event_mutex);
  222. event_file = event_file_data(file);
  223. if (unlikely(!event_file)) {
  224. mutex_unlock(&event_mutex);
  225. kfree(buf);
  226. return -ENODEV;
  227. }
  228. ret = trigger_process_regex(event_file, buf);
  229. mutex_unlock(&event_mutex);
  230. kfree(buf);
  231. if (ret < 0)
  232. goto out;
  233. *ppos += cnt;
  234. ret = cnt;
  235. out:
  236. return ret;
  237. }
  238. static int event_trigger_regex_release(struct inode *inode, struct file *file)
  239. {
  240. mutex_lock(&event_mutex);
  241. if (file->f_mode & FMODE_READ)
  242. seq_release(inode, file);
  243. mutex_unlock(&event_mutex);
  244. return 0;
  245. }
  246. static ssize_t
  247. event_trigger_write(struct file *filp, const char __user *ubuf,
  248. size_t cnt, loff_t *ppos)
  249. {
  250. return event_trigger_regex_write(filp, ubuf, cnt, ppos);
  251. }
  252. static int
  253. event_trigger_open(struct inode *inode, struct file *filp)
  254. {
  255. return event_trigger_regex_open(inode, filp);
  256. }
  257. static int
  258. event_trigger_release(struct inode *inode, struct file *file)
  259. {
  260. return event_trigger_regex_release(inode, file);
  261. }
  262. const struct file_operations event_trigger_fops = {
  263. .open = event_trigger_open,
  264. .read = seq_read,
  265. .write = event_trigger_write,
  266. .llseek = tracing_lseek,
  267. .release = event_trigger_release,
  268. };
  269. /*
  270. * Currently we only register event commands from __init, so mark this
  271. * __init too.
  272. */
  273. __init int register_event_command(struct event_command *cmd)
  274. {
  275. struct event_command *p;
  276. int ret = 0;
  277. mutex_lock(&trigger_cmd_mutex);
  278. list_for_each_entry(p, &trigger_commands, list) {
  279. if (strcmp(cmd->name, p->name) == 0) {
  280. ret = -EBUSY;
  281. goto out_unlock;
  282. }
  283. }
  284. list_add(&cmd->list, &trigger_commands);
  285. out_unlock:
  286. mutex_unlock(&trigger_cmd_mutex);
  287. return ret;
  288. }
  289. /*
  290. * Currently we only unregister event commands from __init, so mark
  291. * this __init too.
  292. */
  293. __init int unregister_event_command(struct event_command *cmd)
  294. {
  295. struct event_command *p, *n;
  296. int ret = -ENODEV;
  297. mutex_lock(&trigger_cmd_mutex);
  298. list_for_each_entry_safe(p, n, &trigger_commands, list) {
  299. if (strcmp(cmd->name, p->name) == 0) {
  300. ret = 0;
  301. list_del_init(&p->list);
  302. goto out_unlock;
  303. }
  304. }
  305. out_unlock:
  306. mutex_unlock(&trigger_cmd_mutex);
  307. return ret;
  308. }
  309. /**
  310. * event_trigger_print - Generic event_trigger_ops @print implementation
  311. * @name: The name of the event trigger
  312. * @m: The seq_file being printed to
  313. * @data: Trigger-specific data
  314. * @filter_str: filter_str to print, if present
  315. *
  316. * Common implementation for event triggers to print themselves.
  317. *
  318. * Usually wrapped by a function that simply sets the @name of the
  319. * trigger command and then invokes this.
  320. *
  321. * Return: 0 on success, errno otherwise
  322. */
  323. static int
  324. event_trigger_print(const char *name, struct seq_file *m,
  325. void *data, char *filter_str)
  326. {
  327. long count = (long)data;
  328. seq_puts(m, name);
  329. if (count == -1)
  330. seq_puts(m, ":unlimited");
  331. else
  332. seq_printf(m, ":count=%ld", count);
  333. if (filter_str)
  334. seq_printf(m, " if %s\n", filter_str);
  335. else
  336. seq_putc(m, '\n');
  337. return 0;
  338. }
  339. /**
  340. * event_trigger_init - Generic event_trigger_ops @init implementation
  341. * @ops: The trigger ops associated with the trigger
  342. * @data: Trigger-specific data
  343. *
  344. * Common implementation of event trigger initialization.
  345. *
  346. * Usually used directly as the @init method in event trigger
  347. * implementations.
  348. *
  349. * Return: 0 on success, errno otherwise
  350. */
  351. int event_trigger_init(struct event_trigger_ops *ops,
  352. struct event_trigger_data *data)
  353. {
  354. data->ref++;
  355. return 0;
  356. }
  357. /**
  358. * event_trigger_free - Generic event_trigger_ops @free implementation
  359. * @ops: The trigger ops associated with the trigger
  360. * @data: Trigger-specific data
  361. *
  362. * Common implementation of event trigger de-initialization.
  363. *
  364. * Usually used directly as the @free method in event trigger
  365. * implementations.
  366. */
  367. static void
  368. event_trigger_free(struct event_trigger_ops *ops,
  369. struct event_trigger_data *data)
  370. {
  371. if (WARN_ON_ONCE(data->ref <= 0))
  372. return;
  373. data->ref--;
  374. if (!data->ref)
  375. trigger_data_free(data);
  376. }
  377. int trace_event_trigger_enable_disable(struct trace_event_file *file,
  378. int trigger_enable)
  379. {
  380. int ret = 0;
  381. if (trigger_enable) {
  382. if (atomic_inc_return(&file->tm_ref) > 1)
  383. return ret;
  384. set_bit(EVENT_FILE_FL_TRIGGER_MODE_BIT, &file->flags);
  385. ret = trace_event_enable_disable(file, 1, 1);
  386. } else {
  387. if (atomic_dec_return(&file->tm_ref) > 0)
  388. return ret;
  389. clear_bit(EVENT_FILE_FL_TRIGGER_MODE_BIT, &file->flags);
  390. ret = trace_event_enable_disable(file, 0, 1);
  391. }
  392. return ret;
  393. }
  394. /**
  395. * clear_event_triggers - Clear all triggers associated with a trace array
  396. * @tr: The trace array to clear
  397. *
  398. * For each trigger, the triggering event has its tm_ref decremented
  399. * via trace_event_trigger_enable_disable(), and any associated event
  400. * (in the case of enable/disable_event triggers) will have its sm_ref
  401. * decremented via free()->trace_event_enable_disable(). That
  402. * combination effectively reverses the soft-mode/trigger state added
  403. * by trigger registration.
  404. *
  405. * Must be called with event_mutex held.
  406. */
  407. void
  408. clear_event_triggers(struct trace_array *tr)
  409. {
  410. struct trace_event_file *file;
  411. list_for_each_entry(file, &tr->events, list) {
  412. struct event_trigger_data *data, *n;
  413. list_for_each_entry_safe(data, n, &file->triggers, list) {
  414. trace_event_trigger_enable_disable(file, 0);
  415. list_del_rcu(&data->list);
  416. if (data->ops->free)
  417. data->ops->free(data->ops, data);
  418. }
  419. }
  420. }
  421. /**
  422. * update_cond_flag - Set or reset the TRIGGER_COND bit
  423. * @file: The trace_event_file associated with the event
  424. *
  425. * If an event has triggers and any of those triggers has a filter or
  426. * a post_trigger, trigger invocation needs to be deferred until after
  427. * the current event has logged its data, and the event should have
  428. * its TRIGGER_COND bit set, otherwise the TRIGGER_COND bit should be
  429. * cleared.
  430. */
  431. void update_cond_flag(struct trace_event_file *file)
  432. {
  433. struct event_trigger_data *data;
  434. bool set_cond = false;
  435. list_for_each_entry_rcu(data, &file->triggers, list) {
  436. if (data->filter || event_command_post_trigger(data->cmd_ops) ||
  437. event_command_needs_rec(data->cmd_ops)) {
  438. set_cond = true;
  439. break;
  440. }
  441. }
  442. if (set_cond)
  443. set_bit(EVENT_FILE_FL_TRIGGER_COND_BIT, &file->flags);
  444. else
  445. clear_bit(EVENT_FILE_FL_TRIGGER_COND_BIT, &file->flags);
  446. }
  447. /**
  448. * register_trigger - Generic event_command @reg implementation
  449. * @glob: The raw string used to register the trigger
  450. * @ops: The trigger ops associated with the trigger
  451. * @data: Trigger-specific data to associate with the trigger
  452. * @file: The trace_event_file associated with the event
  453. *
  454. * Common implementation for event trigger registration.
  455. *
  456. * Usually used directly as the @reg method in event command
  457. * implementations.
  458. *
  459. * Return: 0 on success, errno otherwise
  460. */
  461. static int register_trigger(char *glob, struct event_trigger_ops *ops,
  462. struct event_trigger_data *data,
  463. struct trace_event_file *file)
  464. {
  465. struct event_trigger_data *test;
  466. int ret = 0;
  467. list_for_each_entry_rcu(test, &file->triggers, list) {
  468. if (test->cmd_ops->trigger_type == data->cmd_ops->trigger_type) {
  469. ret = -EEXIST;
  470. goto out;
  471. }
  472. }
  473. if (data->ops->init) {
  474. ret = data->ops->init(data->ops, data);
  475. if (ret < 0)
  476. goto out;
  477. }
  478. list_add_rcu(&data->list, &file->triggers);
  479. ret++;
  480. update_cond_flag(file);
  481. if (trace_event_trigger_enable_disable(file, 1) < 0) {
  482. list_del_rcu(&data->list);
  483. update_cond_flag(file);
  484. ret--;
  485. }
  486. out:
  487. return ret;
  488. }
  489. /**
  490. * unregister_trigger - Generic event_command @unreg implementation
  491. * @glob: The raw string used to register the trigger
  492. * @ops: The trigger ops associated with the trigger
  493. * @test: Trigger-specific data used to find the trigger to remove
  494. * @file: The trace_event_file associated with the event
  495. *
  496. * Common implementation for event trigger unregistration.
  497. *
  498. * Usually used directly as the @unreg method in event command
  499. * implementations.
  500. */
  501. void unregister_trigger(char *glob, struct event_trigger_ops *ops,
  502. struct event_trigger_data *test,
  503. struct trace_event_file *file)
  504. {
  505. struct event_trigger_data *data;
  506. bool unregistered = false;
  507. list_for_each_entry_rcu(data, &file->triggers, list) {
  508. if (data->cmd_ops->trigger_type == test->cmd_ops->trigger_type) {
  509. unregistered = true;
  510. list_del_rcu(&data->list);
  511. trace_event_trigger_enable_disable(file, 0);
  512. update_cond_flag(file);
  513. break;
  514. }
  515. }
  516. if (unregistered && data->ops->free)
  517. data->ops->free(data->ops, data);
  518. }
  519. /**
  520. * event_trigger_callback - Generic event_command @func implementation
  521. * @cmd_ops: The command ops, used for trigger registration
  522. * @file: The trace_event_file associated with the event
  523. * @glob: The raw string used to register the trigger
  524. * @cmd: The cmd portion of the string used to register the trigger
  525. * @param: The params portion of the string used to register the trigger
  526. *
  527. * Common implementation for event command parsing and trigger
  528. * instantiation.
  529. *
  530. * Usually used directly as the @func method in event command
  531. * implementations.
  532. *
  533. * Return: 0 on success, errno otherwise
  534. */
  535. static int
  536. event_trigger_callback(struct event_command *cmd_ops,
  537. struct trace_event_file *file,
  538. char *glob, char *cmd, char *param)
  539. {
  540. struct event_trigger_data *trigger_data;
  541. struct event_trigger_ops *trigger_ops;
  542. char *trigger = NULL;
  543. char *number;
  544. int ret;
  545. /* separate the trigger from the filter (t:n [if filter]) */
  546. if (param && isdigit(param[0]))
  547. trigger = strsep(&param, " \t");
  548. trigger_ops = cmd_ops->get_trigger_ops(cmd, trigger);
  549. ret = -ENOMEM;
  550. trigger_data = kzalloc(sizeof(*trigger_data), GFP_KERNEL);
  551. if (!trigger_data)
  552. goto out;
  553. trigger_data->count = -1;
  554. trigger_data->ops = trigger_ops;
  555. trigger_data->cmd_ops = cmd_ops;
  556. trigger_data->private_data = file;
  557. INIT_LIST_HEAD(&trigger_data->list);
  558. INIT_LIST_HEAD(&trigger_data->named_list);
  559. if (glob[0] == '!') {
  560. cmd_ops->unreg(glob+1, trigger_ops, trigger_data, file);
  561. kfree(trigger_data);
  562. ret = 0;
  563. goto out;
  564. }
  565. if (trigger) {
  566. number = strsep(&trigger, ":");
  567. ret = -EINVAL;
  568. if (!strlen(number))
  569. goto out_free;
  570. /*
  571. * We use the callback data field (which is a pointer)
  572. * as our counter.
  573. */
  574. ret = kstrtoul(number, 0, &trigger_data->count);
  575. if (ret)
  576. goto out_free;
  577. }
  578. if (!param) /* if param is non-empty, it's supposed to be a filter */
  579. goto out_reg;
  580. if (!cmd_ops->set_filter)
  581. goto out_reg;
  582. ret = cmd_ops->set_filter(param, trigger_data, file);
  583. if (ret < 0)
  584. goto out_free;
  585. out_reg:
  586. ret = cmd_ops->reg(glob, trigger_ops, trigger_data, file);
  587. /*
  588. * The above returns on success the # of functions enabled,
  589. * but if it didn't find any functions it returns zero.
  590. * Consider no functions a failure too.
  591. */
  592. if (!ret) {
  593. ret = -ENOENT;
  594. goto out_free;
  595. } else if (ret < 0)
  596. goto out_free;
  597. ret = 0;
  598. out:
  599. return ret;
  600. out_free:
  601. if (cmd_ops->set_filter)
  602. cmd_ops->set_filter(NULL, trigger_data, NULL);
  603. kfree(trigger_data);
  604. goto out;
  605. }
  606. /**
  607. * set_trigger_filter - Generic event_command @set_filter implementation
  608. * @filter_str: The filter string for the trigger, NULL to remove filter
  609. * @trigger_data: Trigger-specific data
  610. * @file: The trace_event_file associated with the event
  611. *
  612. * Common implementation for event command filter parsing and filter
  613. * instantiation.
  614. *
  615. * Usually used directly as the @set_filter method in event command
  616. * implementations.
  617. *
  618. * Also used to remove a filter (if filter_str = NULL).
  619. *
  620. * Return: 0 on success, errno otherwise
  621. */
  622. int set_trigger_filter(char *filter_str,
  623. struct event_trigger_data *trigger_data,
  624. struct trace_event_file *file)
  625. {
  626. struct event_trigger_data *data = trigger_data;
  627. struct event_filter *filter = NULL, *tmp;
  628. int ret = -EINVAL;
  629. char *s;
  630. if (!filter_str) /* clear the current filter */
  631. goto assign;
  632. s = strsep(&filter_str, " \t");
  633. if (!strlen(s) || strcmp(s, "if") != 0)
  634. goto out;
  635. if (!filter_str)
  636. goto out;
  637. /* The filter is for the 'trigger' event, not the triggered event */
  638. ret = create_event_filter(file->event_call, filter_str, false, &filter);
  639. if (ret)
  640. goto out;
  641. assign:
  642. tmp = rcu_access_pointer(data->filter);
  643. rcu_assign_pointer(data->filter, filter);
  644. if (tmp) {
  645. /* Make sure the call is done with the filter */
  646. synchronize_sched();
  647. free_event_filter(tmp);
  648. }
  649. kfree(data->filter_str);
  650. data->filter_str = NULL;
  651. if (filter_str) {
  652. data->filter_str = kstrdup(filter_str, GFP_KERNEL);
  653. if (!data->filter_str) {
  654. free_event_filter(rcu_access_pointer(data->filter));
  655. data->filter = NULL;
  656. ret = -ENOMEM;
  657. }
  658. }
  659. out:
  660. return ret;
  661. }
  662. static LIST_HEAD(named_triggers);
  663. /**
  664. * find_named_trigger - Find the common named trigger associated with @name
  665. * @name: The name of the set of named triggers to find the common data for
  666. *
  667. * Named triggers are sets of triggers that share a common set of
  668. * trigger data. The first named trigger registered with a given name
  669. * owns the common trigger data that the others subsequently
  670. * registered with the same name will reference. This function
  671. * returns the common trigger data associated with that first
  672. * registered instance.
  673. *
  674. * Return: the common trigger data for the given named trigger on
  675. * success, NULL otherwise.
  676. */
  677. struct event_trigger_data *find_named_trigger(const char *name)
  678. {
  679. struct event_trigger_data *data;
  680. if (!name)
  681. return NULL;
  682. list_for_each_entry(data, &named_triggers, named_list) {
  683. if (data->named_data)
  684. continue;
  685. if (strcmp(data->name, name) == 0)
  686. return data;
  687. }
  688. return NULL;
  689. }
  690. /**
  691. * is_named_trigger - determine if a given trigger is a named trigger
  692. * @test: The trigger data to test
  693. *
  694. * Return: true if 'test' is a named trigger, false otherwise.
  695. */
  696. bool is_named_trigger(struct event_trigger_data *test)
  697. {
  698. struct event_trigger_data *data;
  699. list_for_each_entry(data, &named_triggers, named_list) {
  700. if (test == data)
  701. return true;
  702. }
  703. return false;
  704. }
  705. /**
  706. * save_named_trigger - save the trigger in the named trigger list
  707. * @name: The name of the named trigger set
  708. * @data: The trigger data to save
  709. *
  710. * Return: 0 if successful, negative error otherwise.
  711. */
  712. int save_named_trigger(const char *name, struct event_trigger_data *data)
  713. {
  714. data->name = kstrdup(name, GFP_KERNEL);
  715. if (!data->name)
  716. return -ENOMEM;
  717. list_add(&data->named_list, &named_triggers);
  718. return 0;
  719. }
  720. /**
  721. * del_named_trigger - delete a trigger from the named trigger list
  722. * @data: The trigger data to delete
  723. */
  724. void del_named_trigger(struct event_trigger_data *data)
  725. {
  726. kfree(data->name);
  727. data->name = NULL;
  728. list_del(&data->named_list);
  729. }
  730. static void __pause_named_trigger(struct event_trigger_data *data, bool pause)
  731. {
  732. struct event_trigger_data *test;
  733. list_for_each_entry(test, &named_triggers, named_list) {
  734. if (strcmp(test->name, data->name) == 0) {
  735. if (pause) {
  736. test->paused_tmp = test->paused;
  737. test->paused = true;
  738. } else {
  739. test->paused = test->paused_tmp;
  740. }
  741. }
  742. }
  743. }
  744. /**
  745. * pause_named_trigger - Pause all named triggers with the same name
  746. * @data: The trigger data of a named trigger to pause
  747. *
  748. * Pauses a named trigger along with all other triggers having the
  749. * same name. Because named triggers share a common set of data,
  750. * pausing only one is meaningless, so pausing one named trigger needs
  751. * to pause all triggers with the same name.
  752. */
  753. void pause_named_trigger(struct event_trigger_data *data)
  754. {
  755. __pause_named_trigger(data, true);
  756. }
  757. /**
  758. * unpause_named_trigger - Un-pause all named triggers with the same name
  759. * @data: The trigger data of a named trigger to unpause
  760. *
  761. * Un-pauses a named trigger along with all other triggers having the
  762. * same name. Because named triggers share a common set of data,
  763. * unpausing only one is meaningless, so unpausing one named trigger
  764. * needs to unpause all triggers with the same name.
  765. */
  766. void unpause_named_trigger(struct event_trigger_data *data)
  767. {
  768. __pause_named_trigger(data, false);
  769. }
  770. /**
  771. * set_named_trigger_data - Associate common named trigger data
  772. * @data: The trigger data of a named trigger to unpause
  773. *
  774. * Named triggers are sets of triggers that share a common set of
  775. * trigger data. The first named trigger registered with a given name
  776. * owns the common trigger data that the others subsequently
  777. * registered with the same name will reference. This function
  778. * associates the common trigger data from the first trigger with the
  779. * given trigger.
  780. */
  781. void set_named_trigger_data(struct event_trigger_data *data,
  782. struct event_trigger_data *named_data)
  783. {
  784. data->named_data = named_data;
  785. }
  786. struct event_trigger_data *
  787. get_named_trigger_data(struct event_trigger_data *data)
  788. {
  789. return data->named_data;
  790. }
  791. static void
  792. traceon_trigger(struct event_trigger_data *data, void *rec,
  793. struct ring_buffer_event *event)
  794. {
  795. if (tracing_is_on())
  796. return;
  797. tracing_on();
  798. }
  799. static void
  800. traceon_count_trigger(struct event_trigger_data *data, void *rec,
  801. struct ring_buffer_event *event)
  802. {
  803. if (tracing_is_on())
  804. return;
  805. if (!data->count)
  806. return;
  807. if (data->count != -1)
  808. (data->count)--;
  809. tracing_on();
  810. }
  811. static void
  812. traceoff_trigger(struct event_trigger_data *data, void *rec,
  813. struct ring_buffer_event *event)
  814. {
  815. if (!tracing_is_on())
  816. return;
  817. tracing_off();
  818. }
  819. static void
  820. traceoff_count_trigger(struct event_trigger_data *data, void *rec,
  821. struct ring_buffer_event *event)
  822. {
  823. if (!tracing_is_on())
  824. return;
  825. if (!data->count)
  826. return;
  827. if (data->count != -1)
  828. (data->count)--;
  829. tracing_off();
  830. }
  831. static int
  832. traceon_trigger_print(struct seq_file *m, struct event_trigger_ops *ops,
  833. struct event_trigger_data *data)
  834. {
  835. return event_trigger_print("traceon", m, (void *)data->count,
  836. data->filter_str);
  837. }
  838. static int
  839. traceoff_trigger_print(struct seq_file *m, struct event_trigger_ops *ops,
  840. struct event_trigger_data *data)
  841. {
  842. return event_trigger_print("traceoff", m, (void *)data->count,
  843. data->filter_str);
  844. }
  845. static struct event_trigger_ops traceon_trigger_ops = {
  846. .func = traceon_trigger,
  847. .print = traceon_trigger_print,
  848. .init = event_trigger_init,
  849. .free = event_trigger_free,
  850. };
  851. static struct event_trigger_ops traceon_count_trigger_ops = {
  852. .func = traceon_count_trigger,
  853. .print = traceon_trigger_print,
  854. .init = event_trigger_init,
  855. .free = event_trigger_free,
  856. };
  857. static struct event_trigger_ops traceoff_trigger_ops = {
  858. .func = traceoff_trigger,
  859. .print = traceoff_trigger_print,
  860. .init = event_trigger_init,
  861. .free = event_trigger_free,
  862. };
  863. static struct event_trigger_ops traceoff_count_trigger_ops = {
  864. .func = traceoff_count_trigger,
  865. .print = traceoff_trigger_print,
  866. .init = event_trigger_init,
  867. .free = event_trigger_free,
  868. };
  869. static struct event_trigger_ops *
  870. onoff_get_trigger_ops(char *cmd, char *param)
  871. {
  872. struct event_trigger_ops *ops;
  873. /* we register both traceon and traceoff to this callback */
  874. if (strcmp(cmd, "traceon") == 0)
  875. ops = param ? &traceon_count_trigger_ops :
  876. &traceon_trigger_ops;
  877. else
  878. ops = param ? &traceoff_count_trigger_ops :
  879. &traceoff_trigger_ops;
  880. return ops;
  881. }
  882. static struct event_command trigger_traceon_cmd = {
  883. .name = "traceon",
  884. .trigger_type = ETT_TRACE_ONOFF,
  885. .func = event_trigger_callback,
  886. .reg = register_trigger,
  887. .unreg = unregister_trigger,
  888. .get_trigger_ops = onoff_get_trigger_ops,
  889. .set_filter = set_trigger_filter,
  890. };
  891. static struct event_command trigger_traceoff_cmd = {
  892. .name = "traceoff",
  893. .trigger_type = ETT_TRACE_ONOFF,
  894. .flags = EVENT_CMD_FL_POST_TRIGGER,
  895. .func = event_trigger_callback,
  896. .reg = register_trigger,
  897. .unreg = unregister_trigger,
  898. .get_trigger_ops = onoff_get_trigger_ops,
  899. .set_filter = set_trigger_filter,
  900. };
  901. #ifdef CONFIG_TRACER_SNAPSHOT
  902. static void
  903. snapshot_trigger(struct event_trigger_data *data, void *rec,
  904. struct ring_buffer_event *event)
  905. {
  906. struct trace_event_file *file = data->private_data;
  907. if (file)
  908. tracing_snapshot_instance(file->tr);
  909. else
  910. tracing_snapshot();
  911. }
  912. static void
  913. snapshot_count_trigger(struct event_trigger_data *data, void *rec,
  914. struct ring_buffer_event *event)
  915. {
  916. if (!data->count)
  917. return;
  918. if (data->count != -1)
  919. (data->count)--;
  920. snapshot_trigger(data, rec, event);
  921. }
  922. static int
  923. register_snapshot_trigger(char *glob, struct event_trigger_ops *ops,
  924. struct event_trigger_data *data,
  925. struct trace_event_file *file)
  926. {
  927. int ret = register_trigger(glob, ops, data, file);
  928. if (ret > 0 && tracing_alloc_snapshot_instance(file->tr) != 0) {
  929. unregister_trigger(glob, ops, data, file);
  930. ret = 0;
  931. }
  932. return ret;
  933. }
  934. static int
  935. snapshot_trigger_print(struct seq_file *m, struct event_trigger_ops *ops,
  936. struct event_trigger_data *data)
  937. {
  938. return event_trigger_print("snapshot", m, (void *)data->count,
  939. data->filter_str);
  940. }
  941. static struct event_trigger_ops snapshot_trigger_ops = {
  942. .func = snapshot_trigger,
  943. .print = snapshot_trigger_print,
  944. .init = event_trigger_init,
  945. .free = event_trigger_free,
  946. };
  947. static struct event_trigger_ops snapshot_count_trigger_ops = {
  948. .func = snapshot_count_trigger,
  949. .print = snapshot_trigger_print,
  950. .init = event_trigger_init,
  951. .free = event_trigger_free,
  952. };
  953. static struct event_trigger_ops *
  954. snapshot_get_trigger_ops(char *cmd, char *param)
  955. {
  956. return param ? &snapshot_count_trigger_ops : &snapshot_trigger_ops;
  957. }
  958. static struct event_command trigger_snapshot_cmd = {
  959. .name = "snapshot",
  960. .trigger_type = ETT_SNAPSHOT,
  961. .func = event_trigger_callback,
  962. .reg = register_snapshot_trigger,
  963. .unreg = unregister_trigger,
  964. .get_trigger_ops = snapshot_get_trigger_ops,
  965. .set_filter = set_trigger_filter,
  966. };
  967. static __init int register_trigger_snapshot_cmd(void)
  968. {
  969. int ret;
  970. ret = register_event_command(&trigger_snapshot_cmd);
  971. WARN_ON(ret < 0);
  972. return ret;
  973. }
  974. #else
  975. static __init int register_trigger_snapshot_cmd(void) { return 0; }
  976. #endif /* CONFIG_TRACER_SNAPSHOT */
  977. #ifdef CONFIG_STACKTRACE
  978. #ifdef CONFIG_UNWINDER_ORC
  979. /* Skip 2:
  980. * event_triggers_post_call()
  981. * trace_event_raw_event_xxx()
  982. */
  983. # define STACK_SKIP 2
  984. #else
  985. /*
  986. * Skip 4:
  987. * stacktrace_trigger()
  988. * event_triggers_post_call()
  989. * trace_event_buffer_commit()
  990. * trace_event_raw_event_xxx()
  991. */
  992. #define STACK_SKIP 4
  993. #endif
  994. static void
  995. stacktrace_trigger(struct event_trigger_data *data, void *rec,
  996. struct ring_buffer_event *event)
  997. {
  998. trace_dump_stack(STACK_SKIP);
  999. }
  1000. static void
  1001. stacktrace_count_trigger(struct event_trigger_data *data, void *rec,
  1002. struct ring_buffer_event *event)
  1003. {
  1004. if (!data->count)
  1005. return;
  1006. if (data->count != -1)
  1007. (data->count)--;
  1008. stacktrace_trigger(data, rec, event);
  1009. }
  1010. static int
  1011. stacktrace_trigger_print(struct seq_file *m, struct event_trigger_ops *ops,
  1012. struct event_trigger_data *data)
  1013. {
  1014. return event_trigger_print("stacktrace", m, (void *)data->count,
  1015. data->filter_str);
  1016. }
  1017. static struct event_trigger_ops stacktrace_trigger_ops = {
  1018. .func = stacktrace_trigger,
  1019. .print = stacktrace_trigger_print,
  1020. .init = event_trigger_init,
  1021. .free = event_trigger_free,
  1022. };
  1023. static struct event_trigger_ops stacktrace_count_trigger_ops = {
  1024. .func = stacktrace_count_trigger,
  1025. .print = stacktrace_trigger_print,
  1026. .init = event_trigger_init,
  1027. .free = event_trigger_free,
  1028. };
  1029. static struct event_trigger_ops *
  1030. stacktrace_get_trigger_ops(char *cmd, char *param)
  1031. {
  1032. return param ? &stacktrace_count_trigger_ops : &stacktrace_trigger_ops;
  1033. }
  1034. static struct event_command trigger_stacktrace_cmd = {
  1035. .name = "stacktrace",
  1036. .trigger_type = ETT_STACKTRACE,
  1037. .flags = EVENT_CMD_FL_POST_TRIGGER,
  1038. .func = event_trigger_callback,
  1039. .reg = register_trigger,
  1040. .unreg = unregister_trigger,
  1041. .get_trigger_ops = stacktrace_get_trigger_ops,
  1042. .set_filter = set_trigger_filter,
  1043. };
  1044. static __init int register_trigger_stacktrace_cmd(void)
  1045. {
  1046. int ret;
  1047. ret = register_event_command(&trigger_stacktrace_cmd);
  1048. WARN_ON(ret < 0);
  1049. return ret;
  1050. }
  1051. #else
  1052. static __init int register_trigger_stacktrace_cmd(void) { return 0; }
  1053. #endif /* CONFIG_STACKTRACE */
  1054. static __init void unregister_trigger_traceon_traceoff_cmds(void)
  1055. {
  1056. unregister_event_command(&trigger_traceon_cmd);
  1057. unregister_event_command(&trigger_traceoff_cmd);
  1058. }
  1059. static void
  1060. event_enable_trigger(struct event_trigger_data *data, void *rec,
  1061. struct ring_buffer_event *event)
  1062. {
  1063. struct enable_trigger_data *enable_data = data->private_data;
  1064. if (enable_data->enable)
  1065. clear_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &enable_data->file->flags);
  1066. else
  1067. set_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &enable_data->file->flags);
  1068. }
  1069. static void
  1070. event_enable_count_trigger(struct event_trigger_data *data, void *rec,
  1071. struct ring_buffer_event *event)
  1072. {
  1073. struct enable_trigger_data *enable_data = data->private_data;
  1074. if (!data->count)
  1075. return;
  1076. /* Skip if the event is in a state we want to switch to */
  1077. if (enable_data->enable == !(enable_data->file->flags & EVENT_FILE_FL_SOFT_DISABLED))
  1078. return;
  1079. if (data->count != -1)
  1080. (data->count)--;
  1081. event_enable_trigger(data, rec, event);
  1082. }
  1083. int event_enable_trigger_print(struct seq_file *m,
  1084. struct event_trigger_ops *ops,
  1085. struct event_trigger_data *data)
  1086. {
  1087. struct enable_trigger_data *enable_data = data->private_data;
  1088. seq_printf(m, "%s:%s:%s",
  1089. enable_data->hist ?
  1090. (enable_data->enable ? ENABLE_HIST_STR : DISABLE_HIST_STR) :
  1091. (enable_data->enable ? ENABLE_EVENT_STR : DISABLE_EVENT_STR),
  1092. enable_data->file->event_call->class->system,
  1093. trace_event_name(enable_data->file->event_call));
  1094. if (data->count == -1)
  1095. seq_puts(m, ":unlimited");
  1096. else
  1097. seq_printf(m, ":count=%ld", data->count);
  1098. if (data->filter_str)
  1099. seq_printf(m, " if %s\n", data->filter_str);
  1100. else
  1101. seq_putc(m, '\n');
  1102. return 0;
  1103. }
  1104. void event_enable_trigger_free(struct event_trigger_ops *ops,
  1105. struct event_trigger_data *data)
  1106. {
  1107. struct enable_trigger_data *enable_data = data->private_data;
  1108. if (WARN_ON_ONCE(data->ref <= 0))
  1109. return;
  1110. data->ref--;
  1111. if (!data->ref) {
  1112. /* Remove the SOFT_MODE flag */
  1113. trace_event_enable_disable(enable_data->file, 0, 1);
  1114. module_put(enable_data->file->event_call->mod);
  1115. trigger_data_free(data);
  1116. kfree(enable_data);
  1117. }
  1118. }
  1119. static struct event_trigger_ops event_enable_trigger_ops = {
  1120. .func = event_enable_trigger,
  1121. .print = event_enable_trigger_print,
  1122. .init = event_trigger_init,
  1123. .free = event_enable_trigger_free,
  1124. };
  1125. static struct event_trigger_ops event_enable_count_trigger_ops = {
  1126. .func = event_enable_count_trigger,
  1127. .print = event_enable_trigger_print,
  1128. .init = event_trigger_init,
  1129. .free = event_enable_trigger_free,
  1130. };
  1131. static struct event_trigger_ops event_disable_trigger_ops = {
  1132. .func = event_enable_trigger,
  1133. .print = event_enable_trigger_print,
  1134. .init = event_trigger_init,
  1135. .free = event_enable_trigger_free,
  1136. };
  1137. static struct event_trigger_ops event_disable_count_trigger_ops = {
  1138. .func = event_enable_count_trigger,
  1139. .print = event_enable_trigger_print,
  1140. .init = event_trigger_init,
  1141. .free = event_enable_trigger_free,
  1142. };
  1143. int event_enable_trigger_func(struct event_command *cmd_ops,
  1144. struct trace_event_file *file,
  1145. char *glob, char *cmd, char *param)
  1146. {
  1147. struct trace_event_file *event_enable_file;
  1148. struct enable_trigger_data *enable_data;
  1149. struct event_trigger_data *trigger_data;
  1150. struct event_trigger_ops *trigger_ops;
  1151. struct trace_array *tr = file->tr;
  1152. const char *system;
  1153. const char *event;
  1154. bool hist = false;
  1155. char *trigger;
  1156. char *number;
  1157. bool enable;
  1158. int ret;
  1159. if (!param)
  1160. return -EINVAL;
  1161. /* separate the trigger from the filter (s:e:n [if filter]) */
  1162. trigger = strsep(&param, " \t");
  1163. if (!trigger)
  1164. return -EINVAL;
  1165. system = strsep(&trigger, ":");
  1166. if (!trigger)
  1167. return -EINVAL;
  1168. event = strsep(&trigger, ":");
  1169. ret = -EINVAL;
  1170. event_enable_file = find_event_file(tr, system, event);
  1171. if (!event_enable_file)
  1172. goto out;
  1173. #ifdef CONFIG_HIST_TRIGGERS
  1174. hist = ((strcmp(cmd, ENABLE_HIST_STR) == 0) ||
  1175. (strcmp(cmd, DISABLE_HIST_STR) == 0));
  1176. enable = ((strcmp(cmd, ENABLE_EVENT_STR) == 0) ||
  1177. (strcmp(cmd, ENABLE_HIST_STR) == 0));
  1178. #else
  1179. enable = strcmp(cmd, ENABLE_EVENT_STR) == 0;
  1180. #endif
  1181. trigger_ops = cmd_ops->get_trigger_ops(cmd, trigger);
  1182. ret = -ENOMEM;
  1183. trigger_data = kzalloc(sizeof(*trigger_data), GFP_KERNEL);
  1184. if (!trigger_data)
  1185. goto out;
  1186. enable_data = kzalloc(sizeof(*enable_data), GFP_KERNEL);
  1187. if (!enable_data) {
  1188. kfree(trigger_data);
  1189. goto out;
  1190. }
  1191. trigger_data->count = -1;
  1192. trigger_data->ops = trigger_ops;
  1193. trigger_data->cmd_ops = cmd_ops;
  1194. INIT_LIST_HEAD(&trigger_data->list);
  1195. RCU_INIT_POINTER(trigger_data->filter, NULL);
  1196. enable_data->hist = hist;
  1197. enable_data->enable = enable;
  1198. enable_data->file = event_enable_file;
  1199. trigger_data->private_data = enable_data;
  1200. if (glob[0] == '!') {
  1201. cmd_ops->unreg(glob+1, trigger_ops, trigger_data, file);
  1202. kfree(trigger_data);
  1203. kfree(enable_data);
  1204. ret = 0;
  1205. goto out;
  1206. }
  1207. if (trigger) {
  1208. number = strsep(&trigger, ":");
  1209. ret = -EINVAL;
  1210. if (!strlen(number))
  1211. goto out_free;
  1212. /*
  1213. * We use the callback data field (which is a pointer)
  1214. * as our counter.
  1215. */
  1216. ret = kstrtoul(number, 0, &trigger_data->count);
  1217. if (ret)
  1218. goto out_free;
  1219. }
  1220. if (!param) /* if param is non-empty, it's supposed to be a filter */
  1221. goto out_reg;
  1222. if (!cmd_ops->set_filter)
  1223. goto out_reg;
  1224. ret = cmd_ops->set_filter(param, trigger_data, file);
  1225. if (ret < 0)
  1226. goto out_free;
  1227. out_reg:
  1228. /* Don't let event modules unload while probe registered */
  1229. ret = try_module_get(event_enable_file->event_call->mod);
  1230. if (!ret) {
  1231. ret = -EBUSY;
  1232. goto out_free;
  1233. }
  1234. ret = trace_event_enable_disable(event_enable_file, 1, 1);
  1235. if (ret < 0)
  1236. goto out_put;
  1237. ret = cmd_ops->reg(glob, trigger_ops, trigger_data, file);
  1238. /*
  1239. * The above returns on success the # of functions enabled,
  1240. * but if it didn't find any functions it returns zero.
  1241. * Consider no functions a failure too.
  1242. */
  1243. if (!ret) {
  1244. ret = -ENOENT;
  1245. goto out_disable;
  1246. } else if (ret < 0)
  1247. goto out_disable;
  1248. /* Just return zero, not the number of enabled functions */
  1249. ret = 0;
  1250. out:
  1251. return ret;
  1252. out_disable:
  1253. trace_event_enable_disable(event_enable_file, 0, 1);
  1254. out_put:
  1255. module_put(event_enable_file->event_call->mod);
  1256. out_free:
  1257. if (cmd_ops->set_filter)
  1258. cmd_ops->set_filter(NULL, trigger_data, NULL);
  1259. kfree(trigger_data);
  1260. kfree(enable_data);
  1261. goto out;
  1262. }
  1263. int event_enable_register_trigger(char *glob,
  1264. struct event_trigger_ops *ops,
  1265. struct event_trigger_data *data,
  1266. struct trace_event_file *file)
  1267. {
  1268. struct enable_trigger_data *enable_data = data->private_data;
  1269. struct enable_trigger_data *test_enable_data;
  1270. struct event_trigger_data *test;
  1271. int ret = 0;
  1272. list_for_each_entry_rcu(test, &file->triggers, list) {
  1273. test_enable_data = test->private_data;
  1274. if (test_enable_data &&
  1275. (test->cmd_ops->trigger_type ==
  1276. data->cmd_ops->trigger_type) &&
  1277. (test_enable_data->file == enable_data->file)) {
  1278. ret = -EEXIST;
  1279. goto out;
  1280. }
  1281. }
  1282. if (data->ops->init) {
  1283. ret = data->ops->init(data->ops, data);
  1284. if (ret < 0)
  1285. goto out;
  1286. }
  1287. list_add_rcu(&data->list, &file->triggers);
  1288. ret++;
  1289. update_cond_flag(file);
  1290. if (trace_event_trigger_enable_disable(file, 1) < 0) {
  1291. list_del_rcu(&data->list);
  1292. update_cond_flag(file);
  1293. ret--;
  1294. }
  1295. out:
  1296. return ret;
  1297. }
  1298. void event_enable_unregister_trigger(char *glob,
  1299. struct event_trigger_ops *ops,
  1300. struct event_trigger_data *test,
  1301. struct trace_event_file *file)
  1302. {
  1303. struct enable_trigger_data *test_enable_data = test->private_data;
  1304. struct enable_trigger_data *enable_data;
  1305. struct event_trigger_data *data;
  1306. bool unregistered = false;
  1307. list_for_each_entry_rcu(data, &file->triggers, list) {
  1308. enable_data = data->private_data;
  1309. if (enable_data &&
  1310. (data->cmd_ops->trigger_type ==
  1311. test->cmd_ops->trigger_type) &&
  1312. (enable_data->file == test_enable_data->file)) {
  1313. unregistered = true;
  1314. list_del_rcu(&data->list);
  1315. trace_event_trigger_enable_disable(file, 0);
  1316. update_cond_flag(file);
  1317. break;
  1318. }
  1319. }
  1320. if (unregistered && data->ops->free)
  1321. data->ops->free(data->ops, data);
  1322. }
  1323. static struct event_trigger_ops *
  1324. event_enable_get_trigger_ops(char *cmd, char *param)
  1325. {
  1326. struct event_trigger_ops *ops;
  1327. bool enable;
  1328. #ifdef CONFIG_HIST_TRIGGERS
  1329. enable = ((strcmp(cmd, ENABLE_EVENT_STR) == 0) ||
  1330. (strcmp(cmd, ENABLE_HIST_STR) == 0));
  1331. #else
  1332. enable = strcmp(cmd, ENABLE_EVENT_STR) == 0;
  1333. #endif
  1334. if (enable)
  1335. ops = param ? &event_enable_count_trigger_ops :
  1336. &event_enable_trigger_ops;
  1337. else
  1338. ops = param ? &event_disable_count_trigger_ops :
  1339. &event_disable_trigger_ops;
  1340. return ops;
  1341. }
  1342. static struct event_command trigger_enable_cmd = {
  1343. .name = ENABLE_EVENT_STR,
  1344. .trigger_type = ETT_EVENT_ENABLE,
  1345. .func = event_enable_trigger_func,
  1346. .reg = event_enable_register_trigger,
  1347. .unreg = event_enable_unregister_trigger,
  1348. .get_trigger_ops = event_enable_get_trigger_ops,
  1349. .set_filter = set_trigger_filter,
  1350. };
  1351. static struct event_command trigger_disable_cmd = {
  1352. .name = DISABLE_EVENT_STR,
  1353. .trigger_type = ETT_EVENT_ENABLE,
  1354. .func = event_enable_trigger_func,
  1355. .reg = event_enable_register_trigger,
  1356. .unreg = event_enable_unregister_trigger,
  1357. .get_trigger_ops = event_enable_get_trigger_ops,
  1358. .set_filter = set_trigger_filter,
  1359. };
  1360. static __init void unregister_trigger_enable_disable_cmds(void)
  1361. {
  1362. unregister_event_command(&trigger_enable_cmd);
  1363. unregister_event_command(&trigger_disable_cmd);
  1364. }
  1365. static __init int register_trigger_enable_disable_cmds(void)
  1366. {
  1367. int ret;
  1368. ret = register_event_command(&trigger_enable_cmd);
  1369. if (WARN_ON(ret < 0))
  1370. return ret;
  1371. ret = register_event_command(&trigger_disable_cmd);
  1372. if (WARN_ON(ret < 0))
  1373. unregister_trigger_enable_disable_cmds();
  1374. return ret;
  1375. }
  1376. static __init int register_trigger_traceon_traceoff_cmds(void)
  1377. {
  1378. int ret;
  1379. ret = register_event_command(&trigger_traceon_cmd);
  1380. if (WARN_ON(ret < 0))
  1381. return ret;
  1382. ret = register_event_command(&trigger_traceoff_cmd);
  1383. if (WARN_ON(ret < 0))
  1384. unregister_trigger_traceon_traceoff_cmds();
  1385. return ret;
  1386. }
  1387. __init int register_trigger_cmds(void)
  1388. {
  1389. register_trigger_traceon_traceoff_cmds();
  1390. register_trigger_snapshot_cmd();
  1391. register_trigger_stacktrace_cmd();
  1392. register_trigger_enable_disable_cmds();
  1393. register_trigger_hist_enable_disable_cmds();
  1394. register_trigger_hist_cmd();
  1395. return 0;
  1396. }