tracepoint.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798
  1. /*
  2. * Copyright (C) 2008 Mathieu Desnoyers
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License as published by
  6. * the Free Software Foundation; either version 2 of the License, or
  7. * (at your option) any later version.
  8. *
  9. * This program is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program; if not, write to the Free Software
  16. * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  17. */
  18. #include <linux/module.h>
  19. #include <linux/mutex.h>
  20. #include <linux/types.h>
  21. #include <linux/jhash.h>
  22. #include <linux/list.h>
  23. #include <linux/rcupdate.h>
  24. #include <linux/tracepoint.h>
  25. #include <linux/err.h>
  26. #include <linux/slab.h>
  27. #include <linux/sched.h>
  28. #include <linux/static_key.h>
  29. extern struct tracepoint * const __start___tracepoints_ptrs[];
  30. extern struct tracepoint * const __stop___tracepoints_ptrs[];
  31. /* Set to 1 to enable tracepoint debug output */
  32. static const int tracepoint_debug;
  33. /*
  34. * Tracepoints mutex protects the builtin and module tracepoints and the hash
  35. * table, as well as the local module list.
  36. */
  37. static DEFINE_MUTEX(tracepoints_mutex);
  38. #ifdef CONFIG_MODULES
  39. /* Local list of struct module */
  40. static LIST_HEAD(tracepoint_module_list);
  41. #endif /* CONFIG_MODULES */
  42. /*
  43. * Tracepoint hash table, containing the active tracepoints.
  44. * Protected by tracepoints_mutex.
  45. */
  46. #define TRACEPOINT_HASH_BITS 6
  47. #define TRACEPOINT_TABLE_SIZE (1 << TRACEPOINT_HASH_BITS)
  48. static struct hlist_head tracepoint_table[TRACEPOINT_TABLE_SIZE];
  49. /*
  50. * Note about RCU :
  51. * It is used to delay the free of multiple probes array until a quiescent
  52. * state is reached.
  53. * Tracepoint entries modifications are protected by the tracepoints_mutex.
  54. */
  55. struct tracepoint_entry {
  56. struct hlist_node hlist;
  57. struct tracepoint_func *funcs;
  58. int refcount; /* Number of times armed. 0 if disarmed. */
  59. int enabled; /* Tracepoint enabled */
  60. char name[0];
  61. };
  62. struct tp_probes {
  63. union {
  64. struct rcu_head rcu;
  65. struct list_head list;
  66. } u;
  67. struct tracepoint_func probes[0];
  68. };
  69. static inline void *allocate_probes(int count)
  70. {
  71. struct tp_probes *p = kmalloc(count * sizeof(struct tracepoint_func)
  72. + sizeof(struct tp_probes), GFP_KERNEL);
  73. return p == NULL ? NULL : p->probes;
  74. }
  75. static void rcu_free_old_probes(struct rcu_head *head)
  76. {
  77. kfree(container_of(head, struct tp_probes, u.rcu));
  78. }
  79. static inline void release_probes(struct tracepoint_func *old)
  80. {
  81. if (old) {
  82. struct tp_probes *tp_probes = container_of(old,
  83. struct tp_probes, probes[0]);
  84. call_rcu_sched(&tp_probes->u.rcu, rcu_free_old_probes);
  85. }
  86. }
  87. static void debug_print_probes(struct tracepoint_entry *entry)
  88. {
  89. int i;
  90. if (!tracepoint_debug || !entry->funcs)
  91. return;
  92. for (i = 0; entry->funcs[i].func; i++)
  93. printk(KERN_DEBUG "Probe %d : %p\n", i, entry->funcs[i].func);
  94. }
  95. static struct tracepoint_func *
  96. tracepoint_entry_add_probe(struct tracepoint_entry *entry,
  97. void *probe, void *data)
  98. {
  99. int nr_probes = 0;
  100. struct tracepoint_func *old, *new;
  101. if (WARN_ON(!probe))
  102. return ERR_PTR(-EINVAL);
  103. debug_print_probes(entry);
  104. old = entry->funcs;
  105. if (old) {
  106. /* (N -> N+1), (N != 0, 1) probes */
  107. for (nr_probes = 0; old[nr_probes].func; nr_probes++)
  108. if (old[nr_probes].func == probe &&
  109. old[nr_probes].data == data)
  110. return ERR_PTR(-EEXIST);
  111. }
  112. /* + 2 : one for new probe, one for NULL func */
  113. new = allocate_probes(nr_probes + 2);
  114. if (new == NULL)
  115. return ERR_PTR(-ENOMEM);
  116. if (old)
  117. memcpy(new, old, nr_probes * sizeof(struct tracepoint_func));
  118. new[nr_probes].func = probe;
  119. new[nr_probes].data = data;
  120. new[nr_probes + 1].func = NULL;
  121. entry->refcount = nr_probes + 1;
  122. entry->funcs = new;
  123. debug_print_probes(entry);
  124. return old;
  125. }
  126. static void *
  127. tracepoint_entry_remove_probe(struct tracepoint_entry *entry,
  128. void *probe, void *data)
  129. {
  130. int nr_probes = 0, nr_del = 0, i;
  131. struct tracepoint_func *old, *new;
  132. old = entry->funcs;
  133. if (!old)
  134. return ERR_PTR(-ENOENT);
  135. debug_print_probes(entry);
  136. /* (N -> M), (N > 1, M >= 0) probes */
  137. if (probe) {
  138. for (nr_probes = 0; old[nr_probes].func; nr_probes++) {
  139. if (old[nr_probes].func == probe &&
  140. old[nr_probes].data == data)
  141. nr_del++;
  142. }
  143. }
  144. /*
  145. * If probe is NULL, then nr_probes = nr_del = 0, and then the
  146. * entire entry will be removed.
  147. */
  148. if (nr_probes - nr_del == 0) {
  149. /* N -> 0, (N > 1) */
  150. entry->funcs = NULL;
  151. entry->refcount = 0;
  152. debug_print_probes(entry);
  153. return old;
  154. } else {
  155. int j = 0;
  156. /* N -> M, (N > 1, M > 0) */
  157. /* + 1 for NULL */
  158. new = allocate_probes(nr_probes - nr_del + 1);
  159. if (new == NULL)
  160. return ERR_PTR(-ENOMEM);
  161. for (i = 0; old[i].func; i++)
  162. if (old[i].func != probe || old[i].data != data)
  163. new[j++] = old[i];
  164. new[nr_probes - nr_del].func = NULL;
  165. entry->refcount = nr_probes - nr_del;
  166. entry->funcs = new;
  167. }
  168. debug_print_probes(entry);
  169. return old;
  170. }
  171. /*
  172. * Get tracepoint if the tracepoint is present in the tracepoint hash table.
  173. * Must be called with tracepoints_mutex held.
  174. * Returns NULL if not present.
  175. */
  176. static struct tracepoint_entry *get_tracepoint(const char *name)
  177. {
  178. struct hlist_head *head;
  179. struct tracepoint_entry *e;
  180. u32 hash = jhash(name, strlen(name), 0);
  181. head = &tracepoint_table[hash & (TRACEPOINT_TABLE_SIZE - 1)];
  182. hlist_for_each_entry(e, head, hlist) {
  183. if (!strcmp(name, e->name))
  184. return e;
  185. }
  186. return NULL;
  187. }
  188. /*
  189. * Add the tracepoint to the tracepoint hash table. Must be called with
  190. * tracepoints_mutex held.
  191. */
  192. static struct tracepoint_entry *add_tracepoint(const char *name)
  193. {
  194. struct hlist_head *head;
  195. struct tracepoint_entry *e;
  196. size_t name_len = strlen(name) + 1;
  197. u32 hash = jhash(name, name_len-1, 0);
  198. head = &tracepoint_table[hash & (TRACEPOINT_TABLE_SIZE - 1)];
  199. hlist_for_each_entry(e, head, hlist) {
  200. if (!strcmp(name, e->name)) {
  201. printk(KERN_NOTICE
  202. "tracepoint %s busy\n", name);
  203. return ERR_PTR(-EEXIST); /* Already there */
  204. }
  205. }
  206. /*
  207. * Using kmalloc here to allocate a variable length element. Could
  208. * cause some memory fragmentation if overused.
  209. */
  210. e = kmalloc(sizeof(struct tracepoint_entry) + name_len, GFP_KERNEL);
  211. if (!e)
  212. return ERR_PTR(-ENOMEM);
  213. memcpy(&e->name[0], name, name_len);
  214. e->funcs = NULL;
  215. e->refcount = 0;
  216. e->enabled = 0;
  217. hlist_add_head(&e->hlist, head);
  218. return e;
  219. }
  220. /*
  221. * Remove the tracepoint from the tracepoint hash table. Must be called with
  222. * mutex_lock held.
  223. */
  224. static inline void remove_tracepoint(struct tracepoint_entry *e)
  225. {
  226. hlist_del(&e->hlist);
  227. kfree(e);
  228. }
  229. /*
  230. * Sets the probe callback corresponding to one tracepoint.
  231. */
  232. static void set_tracepoint(struct tracepoint_entry **entry,
  233. struct tracepoint *elem, int active)
  234. {
  235. WARN_ON(strcmp((*entry)->name, elem->name) != 0);
  236. if (elem->regfunc && !static_key_enabled(&elem->key) && active)
  237. elem->regfunc();
  238. else if (elem->unregfunc && static_key_enabled(&elem->key) && !active)
  239. elem->unregfunc();
  240. /*
  241. * rcu_assign_pointer has a smp_wmb() which makes sure that the new
  242. * probe callbacks array is consistent before setting a pointer to it.
  243. * This array is referenced by __DO_TRACE from
  244. * include/linux/tracepoints.h. A matching smp_read_barrier_depends()
  245. * is used.
  246. */
  247. rcu_assign_pointer(elem->funcs, (*entry)->funcs);
  248. if (active && !static_key_enabled(&elem->key))
  249. static_key_slow_inc(&elem->key);
  250. else if (!active && static_key_enabled(&elem->key))
  251. static_key_slow_dec(&elem->key);
  252. }
  253. /*
  254. * Disable a tracepoint and its probe callback.
  255. * Note: only waiting an RCU period after setting elem->call to the empty
  256. * function insures that the original callback is not used anymore. This insured
  257. * by preempt_disable around the call site.
  258. */
  259. static void disable_tracepoint(struct tracepoint *elem)
  260. {
  261. if (elem->unregfunc && static_key_enabled(&elem->key))
  262. elem->unregfunc();
  263. if (static_key_enabled(&elem->key))
  264. static_key_slow_dec(&elem->key);
  265. rcu_assign_pointer(elem->funcs, NULL);
  266. }
  267. /**
  268. * tracepoint_update_probe_range - Update a probe range
  269. * @begin: beginning of the range
  270. * @end: end of the range
  271. *
  272. * Updates the probe callback corresponding to a range of tracepoints.
  273. * Called with tracepoints_mutex held.
  274. */
  275. static void tracepoint_update_probe_range(struct tracepoint * const *begin,
  276. struct tracepoint * const *end)
  277. {
  278. struct tracepoint * const *iter;
  279. struct tracepoint_entry *mark_entry;
  280. if (!begin)
  281. return;
  282. for (iter = begin; iter < end; iter++) {
  283. mark_entry = get_tracepoint((*iter)->name);
  284. if (mark_entry) {
  285. set_tracepoint(&mark_entry, *iter,
  286. !!mark_entry->refcount);
  287. mark_entry->enabled = !!mark_entry->refcount;
  288. } else {
  289. disable_tracepoint(*iter);
  290. }
  291. }
  292. }
  293. #ifdef CONFIG_MODULES
  294. void module_update_tracepoints(void)
  295. {
  296. struct tp_module *tp_mod;
  297. list_for_each_entry(tp_mod, &tracepoint_module_list, list)
  298. tracepoint_update_probe_range(tp_mod->tracepoints_ptrs,
  299. tp_mod->tracepoints_ptrs + tp_mod->num_tracepoints);
  300. }
  301. #else /* CONFIG_MODULES */
  302. void module_update_tracepoints(void)
  303. {
  304. }
  305. #endif /* CONFIG_MODULES */
  306. /*
  307. * Update probes, removing the faulty probes.
  308. * Called with tracepoints_mutex held.
  309. */
  310. static void tracepoint_update_probes(void)
  311. {
  312. /* Core kernel tracepoints */
  313. tracepoint_update_probe_range(__start___tracepoints_ptrs,
  314. __stop___tracepoints_ptrs);
  315. /* tracepoints in modules. */
  316. module_update_tracepoints();
  317. }
  318. static struct tracepoint_func *
  319. tracepoint_add_probe(const char *name, void *probe, void *data)
  320. {
  321. struct tracepoint_entry *entry;
  322. struct tracepoint_func *old;
  323. entry = get_tracepoint(name);
  324. if (!entry) {
  325. entry = add_tracepoint(name);
  326. if (IS_ERR(entry))
  327. return (struct tracepoint_func *)entry;
  328. }
  329. old = tracepoint_entry_add_probe(entry, probe, data);
  330. if (IS_ERR(old) && !entry->refcount)
  331. remove_tracepoint(entry);
  332. return old;
  333. }
  334. /**
  335. * tracepoint_probe_register - Connect a probe to a tracepoint
  336. * @name: tracepoint name
  337. * @probe: probe handler
  338. * @data: probe private data
  339. *
  340. * Returns:
  341. * - 0 if the probe was successfully registered, and tracepoint
  342. * callsites are currently loaded for that probe,
  343. * - -ENODEV if the probe was successfully registered, but no tracepoint
  344. * callsite is currently loaded for that probe,
  345. * - other negative error value on error.
  346. *
  347. * When tracepoint_probe_register() returns either 0 or -ENODEV,
  348. * parameters @name, @probe, and @data may be used by the tracepoint
  349. * infrastructure until the probe is unregistered.
  350. *
  351. * The probe address must at least be aligned on the architecture pointer size.
  352. */
  353. int tracepoint_probe_register(const char *name, void *probe, void *data)
  354. {
  355. struct tracepoint_func *old;
  356. struct tracepoint_entry *entry;
  357. int ret = 0;
  358. mutex_lock(&tracepoints_mutex);
  359. old = tracepoint_add_probe(name, probe, data);
  360. if (IS_ERR(old)) {
  361. mutex_unlock(&tracepoints_mutex);
  362. return PTR_ERR(old);
  363. }
  364. tracepoint_update_probes(); /* may update entry */
  365. entry = get_tracepoint(name);
  366. /* Make sure the entry was enabled */
  367. if (!entry || !entry->enabled)
  368. ret = -ENODEV;
  369. mutex_unlock(&tracepoints_mutex);
  370. release_probes(old);
  371. return ret;
  372. }
  373. EXPORT_SYMBOL_GPL(tracepoint_probe_register);
  374. static struct tracepoint_func *
  375. tracepoint_remove_probe(const char *name, void *probe, void *data)
  376. {
  377. struct tracepoint_entry *entry;
  378. struct tracepoint_func *old;
  379. entry = get_tracepoint(name);
  380. if (!entry)
  381. return ERR_PTR(-ENOENT);
  382. old = tracepoint_entry_remove_probe(entry, probe, data);
  383. if (IS_ERR(old))
  384. return old;
  385. if (!entry->refcount)
  386. remove_tracepoint(entry);
  387. return old;
  388. }
  389. /**
  390. * tracepoint_probe_unregister - Disconnect a probe from a tracepoint
  391. * @name: tracepoint name
  392. * @probe: probe function pointer
  393. * @data: probe private data
  394. *
  395. * We do not need to call a synchronize_sched to make sure the probes have
  396. * finished running before doing a module unload, because the module unload
  397. * itself uses stop_machine(), which insures that every preempt disabled section
  398. * have finished.
  399. */
  400. int tracepoint_probe_unregister(const char *name, void *probe, void *data)
  401. {
  402. struct tracepoint_func *old;
  403. mutex_lock(&tracepoints_mutex);
  404. old = tracepoint_remove_probe(name, probe, data);
  405. if (IS_ERR(old)) {
  406. mutex_unlock(&tracepoints_mutex);
  407. return PTR_ERR(old);
  408. }
  409. tracepoint_update_probes(); /* may update entry */
  410. mutex_unlock(&tracepoints_mutex);
  411. release_probes(old);
  412. return 0;
  413. }
  414. EXPORT_SYMBOL_GPL(tracepoint_probe_unregister);
  415. static LIST_HEAD(old_probes);
  416. static int need_update;
  417. static void tracepoint_add_old_probes(void *old)
  418. {
  419. need_update = 1;
  420. if (old) {
  421. struct tp_probes *tp_probes = container_of(old,
  422. struct tp_probes, probes[0]);
  423. list_add(&tp_probes->u.list, &old_probes);
  424. }
  425. }
  426. /**
  427. * tracepoint_probe_register_noupdate - register a probe but not connect
  428. * @name: tracepoint name
  429. * @probe: probe handler
  430. * @data: probe private data
  431. *
  432. * caller must call tracepoint_probe_update_all()
  433. */
  434. int tracepoint_probe_register_noupdate(const char *name, void *probe,
  435. void *data)
  436. {
  437. struct tracepoint_func *old;
  438. mutex_lock(&tracepoints_mutex);
  439. old = tracepoint_add_probe(name, probe, data);
  440. if (IS_ERR(old)) {
  441. mutex_unlock(&tracepoints_mutex);
  442. return PTR_ERR(old);
  443. }
  444. tracepoint_add_old_probes(old);
  445. mutex_unlock(&tracepoints_mutex);
  446. return 0;
  447. }
  448. EXPORT_SYMBOL_GPL(tracepoint_probe_register_noupdate);
  449. /**
  450. * tracepoint_probe_unregister_noupdate - remove a probe but not disconnect
  451. * @name: tracepoint name
  452. * @probe: probe function pointer
  453. * @data: probe private data
  454. *
  455. * caller must call tracepoint_probe_update_all()
  456. */
  457. int tracepoint_probe_unregister_noupdate(const char *name, void *probe,
  458. void *data)
  459. {
  460. struct tracepoint_func *old;
  461. mutex_lock(&tracepoints_mutex);
  462. old = tracepoint_remove_probe(name, probe, data);
  463. if (IS_ERR(old)) {
  464. mutex_unlock(&tracepoints_mutex);
  465. return PTR_ERR(old);
  466. }
  467. tracepoint_add_old_probes(old);
  468. mutex_unlock(&tracepoints_mutex);
  469. return 0;
  470. }
  471. EXPORT_SYMBOL_GPL(tracepoint_probe_unregister_noupdate);
  472. /**
  473. * tracepoint_probe_update_all - update tracepoints
  474. */
  475. void tracepoint_probe_update_all(void)
  476. {
  477. LIST_HEAD(release_probes);
  478. struct tp_probes *pos, *next;
  479. mutex_lock(&tracepoints_mutex);
  480. if (!need_update) {
  481. mutex_unlock(&tracepoints_mutex);
  482. return;
  483. }
  484. if (!list_empty(&old_probes))
  485. list_replace_init(&old_probes, &release_probes);
  486. need_update = 0;
  487. tracepoint_update_probes();
  488. mutex_unlock(&tracepoints_mutex);
  489. list_for_each_entry_safe(pos, next, &release_probes, u.list) {
  490. list_del(&pos->u.list);
  491. call_rcu_sched(&pos->u.rcu, rcu_free_old_probes);
  492. }
  493. }
  494. EXPORT_SYMBOL_GPL(tracepoint_probe_update_all);
  495. /**
  496. * tracepoint_get_iter_range - Get a next tracepoint iterator given a range.
  497. * @tracepoint: current tracepoints (in), next tracepoint (out)
  498. * @begin: beginning of the range
  499. * @end: end of the range
  500. *
  501. * Returns whether a next tracepoint has been found (1) or not (0).
  502. * Will return the first tracepoint in the range if the input tracepoint is
  503. * NULL.
  504. */
  505. static int tracepoint_get_iter_range(struct tracepoint * const **tracepoint,
  506. struct tracepoint * const *begin, struct tracepoint * const *end)
  507. {
  508. if (!*tracepoint && begin != end) {
  509. *tracepoint = begin;
  510. return 1;
  511. }
  512. if (*tracepoint >= begin && *tracepoint < end)
  513. return 1;
  514. return 0;
  515. }
  516. #ifdef CONFIG_MODULES
  517. static void tracepoint_get_iter(struct tracepoint_iter *iter)
  518. {
  519. int found = 0;
  520. struct tp_module *iter_mod;
  521. /* Core kernel tracepoints */
  522. if (!iter->module) {
  523. found = tracepoint_get_iter_range(&iter->tracepoint,
  524. __start___tracepoints_ptrs,
  525. __stop___tracepoints_ptrs);
  526. if (found)
  527. goto end;
  528. }
  529. /* Tracepoints in modules */
  530. mutex_lock(&tracepoints_mutex);
  531. list_for_each_entry(iter_mod, &tracepoint_module_list, list) {
  532. /*
  533. * Sorted module list
  534. */
  535. if (iter_mod < iter->module)
  536. continue;
  537. else if (iter_mod > iter->module)
  538. iter->tracepoint = NULL;
  539. found = tracepoint_get_iter_range(&iter->tracepoint,
  540. iter_mod->tracepoints_ptrs,
  541. iter_mod->tracepoints_ptrs
  542. + iter_mod->num_tracepoints);
  543. if (found) {
  544. iter->module = iter_mod;
  545. break;
  546. }
  547. }
  548. mutex_unlock(&tracepoints_mutex);
  549. end:
  550. if (!found)
  551. tracepoint_iter_reset(iter);
  552. }
  553. #else /* CONFIG_MODULES */
  554. static void tracepoint_get_iter(struct tracepoint_iter *iter)
  555. {
  556. int found = 0;
  557. /* Core kernel tracepoints */
  558. found = tracepoint_get_iter_range(&iter->tracepoint,
  559. __start___tracepoints_ptrs,
  560. __stop___tracepoints_ptrs);
  561. if (!found)
  562. tracepoint_iter_reset(iter);
  563. }
  564. #endif /* CONFIG_MODULES */
  565. void tracepoint_iter_start(struct tracepoint_iter *iter)
  566. {
  567. tracepoint_get_iter(iter);
  568. }
  569. EXPORT_SYMBOL_GPL(tracepoint_iter_start);
  570. void tracepoint_iter_next(struct tracepoint_iter *iter)
  571. {
  572. iter->tracepoint++;
  573. /*
  574. * iter->tracepoint may be invalid because we blindly incremented it.
  575. * Make sure it is valid by marshalling on the tracepoints, getting the
  576. * tracepoints from following modules if necessary.
  577. */
  578. tracepoint_get_iter(iter);
  579. }
  580. EXPORT_SYMBOL_GPL(tracepoint_iter_next);
  581. void tracepoint_iter_stop(struct tracepoint_iter *iter)
  582. {
  583. }
  584. EXPORT_SYMBOL_GPL(tracepoint_iter_stop);
  585. void tracepoint_iter_reset(struct tracepoint_iter *iter)
  586. {
  587. #ifdef CONFIG_MODULES
  588. iter->module = NULL;
  589. #endif /* CONFIG_MODULES */
  590. iter->tracepoint = NULL;
  591. }
  592. EXPORT_SYMBOL_GPL(tracepoint_iter_reset);
  593. #ifdef CONFIG_MODULES
  594. static int tracepoint_module_coming(struct module *mod)
  595. {
  596. struct tp_module *tp_mod, *iter;
  597. int ret = 0;
  598. if (!mod->num_tracepoints)
  599. return 0;
  600. /*
  601. * We skip modules that taint the kernel, especially those with different
  602. * module headers (for forced load), to make sure we don't cause a crash.
  603. * Staging and out-of-tree GPL modules are fine.
  604. */
  605. if (mod->taints & ~((1 << TAINT_OOT_MODULE) | (1 << TAINT_CRAP)))
  606. return 0;
  607. mutex_lock(&tracepoints_mutex);
  608. tp_mod = kmalloc(sizeof(struct tp_module), GFP_KERNEL);
  609. if (!tp_mod) {
  610. ret = -ENOMEM;
  611. goto end;
  612. }
  613. tp_mod->num_tracepoints = mod->num_tracepoints;
  614. tp_mod->tracepoints_ptrs = mod->tracepoints_ptrs;
  615. /*
  616. * tracepoint_module_list is kept sorted by struct module pointer
  617. * address for iteration on tracepoints from a seq_file that can release
  618. * the mutex between calls.
  619. */
  620. list_for_each_entry_reverse(iter, &tracepoint_module_list, list) {
  621. BUG_ON(iter == tp_mod); /* Should never be in the list twice */
  622. if (iter < tp_mod) {
  623. /* We belong to the location right after iter. */
  624. list_add(&tp_mod->list, &iter->list);
  625. goto module_added;
  626. }
  627. }
  628. /* We belong to the beginning of the list */
  629. list_add(&tp_mod->list, &tracepoint_module_list);
  630. module_added:
  631. tracepoint_update_probe_range(mod->tracepoints_ptrs,
  632. mod->tracepoints_ptrs + mod->num_tracepoints);
  633. end:
  634. mutex_unlock(&tracepoints_mutex);
  635. return ret;
  636. }
  637. static int tracepoint_module_going(struct module *mod)
  638. {
  639. struct tp_module *pos;
  640. if (!mod->num_tracepoints)
  641. return 0;
  642. mutex_lock(&tracepoints_mutex);
  643. tracepoint_update_probe_range(mod->tracepoints_ptrs,
  644. mod->tracepoints_ptrs + mod->num_tracepoints);
  645. list_for_each_entry(pos, &tracepoint_module_list, list) {
  646. if (pos->tracepoints_ptrs == mod->tracepoints_ptrs) {
  647. list_del(&pos->list);
  648. kfree(pos);
  649. break;
  650. }
  651. }
  652. /*
  653. * In the case of modules that were tainted at "coming", we'll simply
  654. * walk through the list without finding it. We cannot use the "tainted"
  655. * flag on "going", in case a module taints the kernel only after being
  656. * loaded.
  657. */
  658. mutex_unlock(&tracepoints_mutex);
  659. return 0;
  660. }
  661. int tracepoint_module_notify(struct notifier_block *self,
  662. unsigned long val, void *data)
  663. {
  664. struct module *mod = data;
  665. int ret = 0;
  666. switch (val) {
  667. case MODULE_STATE_COMING:
  668. ret = tracepoint_module_coming(mod);
  669. break;
  670. case MODULE_STATE_LIVE:
  671. break;
  672. case MODULE_STATE_GOING:
  673. ret = tracepoint_module_going(mod);
  674. break;
  675. }
  676. return ret;
  677. }
  678. struct notifier_block tracepoint_module_nb = {
  679. .notifier_call = tracepoint_module_notify,
  680. .priority = 0,
  681. };
  682. static int init_tracepoints(void)
  683. {
  684. return register_module_notifier(&tracepoint_module_nb);
  685. }
  686. __initcall(init_tracepoints);
  687. #endif /* CONFIG_MODULES */
  688. #ifdef CONFIG_HAVE_SYSCALL_TRACEPOINTS
  689. /* NB: reg/unreg are called while guarded with the tracepoints_mutex */
  690. static int sys_tracepoint_refcount;
  691. void syscall_regfunc(void)
  692. {
  693. unsigned long flags;
  694. struct task_struct *g, *t;
  695. if (!sys_tracepoint_refcount) {
  696. read_lock_irqsave(&tasklist_lock, flags);
  697. do_each_thread(g, t) {
  698. /* Skip kernel threads. */
  699. if (t->mm)
  700. set_tsk_thread_flag(t, TIF_SYSCALL_TRACEPOINT);
  701. } while_each_thread(g, t);
  702. read_unlock_irqrestore(&tasklist_lock, flags);
  703. }
  704. sys_tracepoint_refcount++;
  705. }
  706. void syscall_unregfunc(void)
  707. {
  708. unsigned long flags;
  709. struct task_struct *g, *t;
  710. sys_tracepoint_refcount--;
  711. if (!sys_tracepoint_refcount) {
  712. read_lock_irqsave(&tasklist_lock, flags);
  713. do_each_thread(g, t) {
  714. clear_tsk_thread_flag(t, TIF_SYSCALL_TRACEPOINT);
  715. } while_each_thread(g, t);
  716. read_unlock_irqrestore(&tasklist_lock, flags);
  717. }
  718. }
  719. #endif