ftrace.c 27 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301
  1. /*
  2. * Infrastructure for profiling code inserted by 'gcc -pg'.
  3. *
  4. * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
  5. * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
  6. *
  7. * Originally ported from the -rt patch by:
  8. * Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
  9. *
  10. * Based on code in the latency_tracer, that is:
  11. *
  12. * Copyright (C) 2004-2006 Ingo Molnar
  13. * Copyright (C) 2004 William Lee Irwin III
  14. */
  15. #include <linux/stop_machine.h>
  16. #include <linux/clocksource.h>
  17. #include <linux/kallsyms.h>
  18. #include <linux/seq_file.h>
  19. #include <linux/debugfs.h>
  20. #include <linux/kthread.h>
  21. #include <linux/hardirq.h>
  22. #include <linux/ftrace.h>
  23. #include <linux/uaccess.h>
  24. #include <linux/sysctl.h>
  25. #include <linux/hash.h>
  26. #include <linux/ctype.h>
  27. #include <linux/list.h>
  28. #include "trace.h"
  29. int ftrace_enabled;
  30. static int last_ftrace_enabled;
  31. static DEFINE_SPINLOCK(ftrace_lock);
  32. static DEFINE_MUTEX(ftrace_sysctl_lock);
  33. static struct ftrace_ops ftrace_list_end __read_mostly =
  34. {
  35. .func = ftrace_stub,
  36. };
  37. static struct ftrace_ops *ftrace_list __read_mostly = &ftrace_list_end;
  38. ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
  39. /* mcount is defined per arch in assembly */
  40. EXPORT_SYMBOL(mcount);
  41. notrace void ftrace_list_func(unsigned long ip, unsigned long parent_ip)
  42. {
  43. struct ftrace_ops *op = ftrace_list;
  44. /* in case someone actually ports this to alpha! */
  45. read_barrier_depends();
  46. while (op != &ftrace_list_end) {
  47. /* silly alpha */
  48. read_barrier_depends();
  49. op->func(ip, parent_ip);
  50. op = op->next;
  51. };
  52. }
  53. /**
  54. * clear_ftrace_function - reset the ftrace function
  55. *
  56. * This NULLs the ftrace function and in essence stops
  57. * tracing. There may be lag
  58. */
  59. void clear_ftrace_function(void)
  60. {
  61. ftrace_trace_function = ftrace_stub;
  62. }
  63. static int notrace __register_ftrace_function(struct ftrace_ops *ops)
  64. {
  65. /* Should never be called by interrupts */
  66. spin_lock(&ftrace_lock);
  67. ops->next = ftrace_list;
  68. /*
  69. * We are entering ops into the ftrace_list but another
  70. * CPU might be walking that list. We need to make sure
  71. * the ops->next pointer is valid before another CPU sees
  72. * the ops pointer included into the ftrace_list.
  73. */
  74. smp_wmb();
  75. ftrace_list = ops;
  76. if (ftrace_enabled) {
  77. /*
  78. * For one func, simply call it directly.
  79. * For more than one func, call the chain.
  80. */
  81. if (ops->next == &ftrace_list_end)
  82. ftrace_trace_function = ops->func;
  83. else
  84. ftrace_trace_function = ftrace_list_func;
  85. }
  86. spin_unlock(&ftrace_lock);
  87. return 0;
  88. }
  89. static int notrace __unregister_ftrace_function(struct ftrace_ops *ops)
  90. {
  91. struct ftrace_ops **p;
  92. int ret = 0;
  93. spin_lock(&ftrace_lock);
  94. /*
  95. * If we are removing the last function, then simply point
  96. * to the ftrace_stub.
  97. */
  98. if (ftrace_list == ops && ops->next == &ftrace_list_end) {
  99. ftrace_trace_function = ftrace_stub;
  100. ftrace_list = &ftrace_list_end;
  101. goto out;
  102. }
  103. for (p = &ftrace_list; *p != &ftrace_list_end; p = &(*p)->next)
  104. if (*p == ops)
  105. break;
  106. if (*p != ops) {
  107. ret = -1;
  108. goto out;
  109. }
  110. *p = (*p)->next;
  111. if (ftrace_enabled) {
  112. /* If we only have one func left, then call that directly */
  113. if (ftrace_list == &ftrace_list_end ||
  114. ftrace_list->next == &ftrace_list_end)
  115. ftrace_trace_function = ftrace_list->func;
  116. }
  117. out:
  118. spin_unlock(&ftrace_lock);
  119. return ret;
  120. }
  121. #ifdef CONFIG_DYNAMIC_FTRACE
  122. static struct task_struct *ftraced_task;
  123. static DECLARE_WAIT_QUEUE_HEAD(ftraced_waiters);
  124. static unsigned long ftraced_iteration_counter;
  125. enum {
  126. FTRACE_ENABLE_CALLS = (1 << 0),
  127. FTRACE_DISABLE_CALLS = (1 << 1),
  128. FTRACE_UPDATE_TRACE_FUNC = (1 << 2),
  129. FTRACE_ENABLE_MCOUNT = (1 << 3),
  130. FTRACE_DISABLE_MCOUNT = (1 << 4),
  131. };
  132. static int ftrace_filtered;
  133. static struct hlist_head ftrace_hash[FTRACE_HASHSIZE];
  134. static DEFINE_PER_CPU(int, ftrace_shutdown_disable_cpu);
  135. static DEFINE_SPINLOCK(ftrace_shutdown_lock);
  136. static DEFINE_MUTEX(ftraced_lock);
  137. static DEFINE_MUTEX(ftrace_filter_lock);
  138. struct ftrace_page {
  139. struct ftrace_page *next;
  140. int index;
  141. struct dyn_ftrace records[];
  142. } __attribute__((packed));
  143. #define ENTRIES_PER_PAGE \
  144. ((PAGE_SIZE - sizeof(struct ftrace_page)) / sizeof(struct dyn_ftrace))
  145. /* estimate from running different kernels */
  146. #define NR_TO_INIT 10000
  147. static struct ftrace_page *ftrace_pages_start;
  148. static struct ftrace_page *ftrace_pages;
  149. static int ftraced_trigger;
  150. static int ftraced_suspend;
  151. static int ftrace_record_suspend;
  152. static struct dyn_ftrace *ftrace_free_records;
  153. static inline int
  154. notrace ftrace_ip_in_hash(unsigned long ip, unsigned long key)
  155. {
  156. struct dyn_ftrace *p;
  157. struct hlist_node *t;
  158. int found = 0;
  159. hlist_for_each_entry(p, t, &ftrace_hash[key], node) {
  160. if (p->ip == ip) {
  161. found = 1;
  162. break;
  163. }
  164. }
  165. return found;
  166. }
  167. static inline void notrace
  168. ftrace_add_hash(struct dyn_ftrace *node, unsigned long key)
  169. {
  170. hlist_add_head(&node->node, &ftrace_hash[key]);
  171. }
  172. static notrace void ftrace_free_rec(struct dyn_ftrace *rec)
  173. {
  174. /* no locking, only called from kstop_machine */
  175. rec->ip = (unsigned long)ftrace_free_records;
  176. ftrace_free_records = rec;
  177. rec->flags |= FTRACE_FL_FREE;
  178. }
  179. static notrace struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip)
  180. {
  181. struct dyn_ftrace *rec;
  182. /* First check for freed records */
  183. if (ftrace_free_records) {
  184. rec = ftrace_free_records;
  185. /* todo, disable tracing altogether on this warning */
  186. if (unlikely(!(rec->flags & FTRACE_FL_FREE))) {
  187. WARN_ON_ONCE(1);
  188. ftrace_free_records = NULL;
  189. return NULL;
  190. }
  191. ftrace_free_records = (void *)rec->ip;
  192. memset(rec, 0, sizeof(*rec));
  193. return rec;
  194. }
  195. if (ftrace_pages->index == ENTRIES_PER_PAGE) {
  196. if (!ftrace_pages->next)
  197. return NULL;
  198. ftrace_pages = ftrace_pages->next;
  199. }
  200. return &ftrace_pages->records[ftrace_pages->index++];
  201. }
  202. static void notrace
  203. ftrace_record_ip(unsigned long ip)
  204. {
  205. struct dyn_ftrace *node;
  206. unsigned long flags;
  207. unsigned long key;
  208. int resched;
  209. int atomic;
  210. if (!ftrace_enabled)
  211. return;
  212. resched = need_resched();
  213. preempt_disable_notrace();
  214. /* We simply need to protect against recursion */
  215. __get_cpu_var(ftrace_shutdown_disable_cpu)++;
  216. if (__get_cpu_var(ftrace_shutdown_disable_cpu) != 1)
  217. goto out;
  218. if (unlikely(ftrace_record_suspend))
  219. goto out;
  220. key = hash_long(ip, FTRACE_HASHBITS);
  221. WARN_ON_ONCE(key >= FTRACE_HASHSIZE);
  222. if (ftrace_ip_in_hash(ip, key))
  223. goto out;
  224. atomic = irqs_disabled();
  225. spin_lock_irqsave(&ftrace_shutdown_lock, flags);
  226. /* This ip may have hit the hash before the lock */
  227. if (ftrace_ip_in_hash(ip, key))
  228. goto out_unlock;
  229. /*
  230. * There's a slight race that the ftraced will update the
  231. * hash and reset here. If it is already converted, skip it.
  232. */
  233. if (ftrace_ip_converted(ip))
  234. goto out_unlock;
  235. node = ftrace_alloc_dyn_node(ip);
  236. if (!node)
  237. goto out_unlock;
  238. node->ip = ip;
  239. ftrace_add_hash(node, key);
  240. ftraced_trigger = 1;
  241. out_unlock:
  242. spin_unlock_irqrestore(&ftrace_shutdown_lock, flags);
  243. out:
  244. __get_cpu_var(ftrace_shutdown_disable_cpu)--;
  245. /* prevent recursion with scheduler */
  246. if (resched)
  247. preempt_enable_no_resched_notrace();
  248. else
  249. preempt_enable_notrace();
  250. }
  251. #define FTRACE_ADDR ((long)(&ftrace_caller))
  252. #define MCOUNT_ADDR ((long)(&mcount))
  253. static void notrace
  254. __ftrace_replace_code(struct dyn_ftrace *rec,
  255. unsigned char *old, unsigned char *new, int enable)
  256. {
  257. unsigned long ip;
  258. int failed;
  259. ip = rec->ip;
  260. if (ftrace_filtered && enable) {
  261. unsigned long fl;
  262. /*
  263. * If filtering is on:
  264. *
  265. * If this record is set to be filtered and
  266. * is enabled then do nothing.
  267. *
  268. * If this record is set to be filtered and
  269. * it is not enabled, enable it.
  270. *
  271. * If this record is not set to be filtered
  272. * and it is not enabled do nothing.
  273. *
  274. * If this record is not set to be filtered and
  275. * it is enabled, disable it.
  276. */
  277. fl = rec->flags & (FTRACE_FL_FILTER | FTRACE_FL_ENABLED);
  278. if ((fl == (FTRACE_FL_FILTER | FTRACE_FL_ENABLED)) ||
  279. (fl == 0))
  280. return;
  281. /*
  282. * If it is enabled disable it,
  283. * otherwise enable it!
  284. */
  285. if (fl == FTRACE_FL_ENABLED) {
  286. /* swap new and old */
  287. new = old;
  288. old = ftrace_call_replace(ip, FTRACE_ADDR);
  289. rec->flags &= ~FTRACE_FL_ENABLED;
  290. } else {
  291. new = ftrace_call_replace(ip, FTRACE_ADDR);
  292. rec->flags |= FTRACE_FL_ENABLED;
  293. }
  294. } else {
  295. if (enable)
  296. new = ftrace_call_replace(ip, FTRACE_ADDR);
  297. else
  298. old = ftrace_call_replace(ip, FTRACE_ADDR);
  299. if (enable) {
  300. if (rec->flags & FTRACE_FL_ENABLED)
  301. return;
  302. rec->flags |= FTRACE_FL_ENABLED;
  303. } else {
  304. if (!(rec->flags & FTRACE_FL_ENABLED))
  305. return;
  306. rec->flags &= ~FTRACE_FL_ENABLED;
  307. }
  308. }
  309. failed = ftrace_modify_code(ip, old, new);
  310. if (failed) {
  311. unsigned long key;
  312. /* It is possible that the function hasn't been converted yet */
  313. key = hash_long(ip, FTRACE_HASHBITS);
  314. if (!ftrace_ip_in_hash(ip, key)) {
  315. rec->flags |= FTRACE_FL_FAILED;
  316. ftrace_free_rec(rec);
  317. }
  318. }
  319. }
  320. static void notrace ftrace_replace_code(int enable)
  321. {
  322. unsigned char *new = NULL, *old = NULL;
  323. struct dyn_ftrace *rec;
  324. struct ftrace_page *pg;
  325. int i;
  326. if (enable)
  327. old = ftrace_nop_replace();
  328. else
  329. new = ftrace_nop_replace();
  330. for (pg = ftrace_pages_start; pg; pg = pg->next) {
  331. for (i = 0; i < pg->index; i++) {
  332. rec = &pg->records[i];
  333. /* don't modify code that has already faulted */
  334. if (rec->flags & FTRACE_FL_FAILED)
  335. continue;
  336. __ftrace_replace_code(rec, old, new, enable);
  337. }
  338. }
  339. }
  340. static notrace void ftrace_shutdown_replenish(void)
  341. {
  342. if (ftrace_pages->next)
  343. return;
  344. /* allocate another page */
  345. ftrace_pages->next = (void *)get_zeroed_page(GFP_KERNEL);
  346. }
  347. static notrace void
  348. ftrace_code_disable(struct dyn_ftrace *rec)
  349. {
  350. unsigned long ip;
  351. unsigned char *nop, *call;
  352. int failed;
  353. ip = rec->ip;
  354. nop = ftrace_nop_replace();
  355. call = ftrace_call_replace(ip, MCOUNT_ADDR);
  356. failed = ftrace_modify_code(ip, call, nop);
  357. if (failed) {
  358. rec->flags |= FTRACE_FL_FAILED;
  359. ftrace_free_rec(rec);
  360. }
  361. }
  362. static int notrace __ftrace_modify_code(void *data)
  363. {
  364. unsigned long addr;
  365. int *command = data;
  366. if (*command & FTRACE_ENABLE_CALLS)
  367. ftrace_replace_code(1);
  368. else if (*command & FTRACE_DISABLE_CALLS)
  369. ftrace_replace_code(0);
  370. if (*command & FTRACE_UPDATE_TRACE_FUNC)
  371. ftrace_update_ftrace_func(ftrace_trace_function);
  372. if (*command & FTRACE_ENABLE_MCOUNT) {
  373. addr = (unsigned long)ftrace_record_ip;
  374. ftrace_mcount_set(&addr);
  375. } else if (*command & FTRACE_DISABLE_MCOUNT) {
  376. addr = (unsigned long)ftrace_stub;
  377. ftrace_mcount_set(&addr);
  378. }
  379. return 0;
  380. }
  381. static void notrace ftrace_run_update_code(int command)
  382. {
  383. stop_machine_run(__ftrace_modify_code, &command, NR_CPUS);
  384. }
  385. static ftrace_func_t saved_ftrace_func;
  386. static void notrace ftrace_startup(void)
  387. {
  388. int command = 0;
  389. mutex_lock(&ftraced_lock);
  390. ftraced_suspend++;
  391. if (ftraced_suspend == 1)
  392. command |= FTRACE_ENABLE_CALLS;
  393. if (saved_ftrace_func != ftrace_trace_function) {
  394. saved_ftrace_func = ftrace_trace_function;
  395. command |= FTRACE_UPDATE_TRACE_FUNC;
  396. }
  397. if (!command || !ftrace_enabled)
  398. goto out;
  399. ftrace_run_update_code(command);
  400. out:
  401. mutex_unlock(&ftraced_lock);
  402. }
  403. static void notrace ftrace_shutdown(void)
  404. {
  405. int command = 0;
  406. mutex_lock(&ftraced_lock);
  407. ftraced_suspend--;
  408. if (!ftraced_suspend)
  409. command |= FTRACE_DISABLE_CALLS;
  410. if (saved_ftrace_func != ftrace_trace_function) {
  411. saved_ftrace_func = ftrace_trace_function;
  412. command |= FTRACE_UPDATE_TRACE_FUNC;
  413. }
  414. if (!command || !ftrace_enabled)
  415. goto out;
  416. ftrace_run_update_code(command);
  417. out:
  418. mutex_unlock(&ftraced_lock);
  419. }
  420. static void notrace ftrace_startup_sysctl(void)
  421. {
  422. int command = FTRACE_ENABLE_MCOUNT;
  423. mutex_lock(&ftraced_lock);
  424. /* Force update next time */
  425. saved_ftrace_func = NULL;
  426. /* ftraced_suspend is true if we want ftrace running */
  427. if (ftraced_suspend)
  428. command |= FTRACE_ENABLE_CALLS;
  429. ftrace_run_update_code(command);
  430. mutex_unlock(&ftraced_lock);
  431. }
  432. static void notrace ftrace_shutdown_sysctl(void)
  433. {
  434. int command = FTRACE_DISABLE_MCOUNT;
  435. mutex_lock(&ftraced_lock);
  436. /* ftraced_suspend is true if ftrace is running */
  437. if (ftraced_suspend)
  438. command |= FTRACE_DISABLE_CALLS;
  439. ftrace_run_update_code(command);
  440. mutex_unlock(&ftraced_lock);
  441. }
  442. static cycle_t ftrace_update_time;
  443. static unsigned long ftrace_update_cnt;
  444. unsigned long ftrace_update_tot_cnt;
  445. static int notrace __ftrace_update_code(void *ignore)
  446. {
  447. struct dyn_ftrace *p;
  448. struct hlist_head head;
  449. struct hlist_node *t;
  450. int save_ftrace_enabled;
  451. cycle_t start, stop;
  452. int i;
  453. /* Don't be recording funcs now */
  454. save_ftrace_enabled = ftrace_enabled;
  455. ftrace_enabled = 0;
  456. start = ftrace_now(raw_smp_processor_id());
  457. ftrace_update_cnt = 0;
  458. /* No locks needed, the machine is stopped! */
  459. for (i = 0; i < FTRACE_HASHSIZE; i++) {
  460. if (hlist_empty(&ftrace_hash[i]))
  461. continue;
  462. head = ftrace_hash[i];
  463. INIT_HLIST_HEAD(&ftrace_hash[i]);
  464. /* all CPUS are stopped, we are safe to modify code */
  465. hlist_for_each_entry(p, t, &head, node) {
  466. ftrace_code_disable(p);
  467. ftrace_update_cnt++;
  468. }
  469. }
  470. stop = ftrace_now(raw_smp_processor_id());
  471. ftrace_update_time = stop - start;
  472. ftrace_update_tot_cnt += ftrace_update_cnt;
  473. ftrace_enabled = save_ftrace_enabled;
  474. return 0;
  475. }
  476. static void notrace ftrace_update_code(void)
  477. {
  478. stop_machine_run(__ftrace_update_code, NULL, NR_CPUS);
  479. }
  480. static int notrace ftraced(void *ignore)
  481. {
  482. unsigned long usecs;
  483. set_current_state(TASK_INTERRUPTIBLE);
  484. while (!kthread_should_stop()) {
  485. /* check once a second */
  486. schedule_timeout(HZ);
  487. mutex_lock(&ftrace_sysctl_lock);
  488. mutex_lock(&ftraced_lock);
  489. if (ftrace_enabled && ftraced_trigger && !ftraced_suspend) {
  490. ftrace_record_suspend++;
  491. ftrace_update_code();
  492. usecs = nsecs_to_usecs(ftrace_update_time);
  493. if (ftrace_update_tot_cnt > 100000) {
  494. ftrace_update_tot_cnt = 0;
  495. pr_info("hm, dftrace overflow: %lu change%s"
  496. " (%lu total) in %lu usec%s\n",
  497. ftrace_update_cnt,
  498. ftrace_update_cnt != 1 ? "s" : "",
  499. ftrace_update_tot_cnt,
  500. usecs, usecs != 1 ? "s" : "");
  501. WARN_ON_ONCE(1);
  502. }
  503. ftraced_trigger = 0;
  504. ftrace_record_suspend--;
  505. }
  506. ftraced_iteration_counter++;
  507. mutex_unlock(&ftraced_lock);
  508. mutex_unlock(&ftrace_sysctl_lock);
  509. wake_up_interruptible(&ftraced_waiters);
  510. ftrace_shutdown_replenish();
  511. set_current_state(TASK_INTERRUPTIBLE);
  512. }
  513. __set_current_state(TASK_RUNNING);
  514. return 0;
  515. }
  516. static int __init ftrace_dyn_table_alloc(void)
  517. {
  518. struct ftrace_page *pg;
  519. int cnt;
  520. int i;
  521. /* allocate a few pages */
  522. ftrace_pages_start = (void *)get_zeroed_page(GFP_KERNEL);
  523. if (!ftrace_pages_start)
  524. return -1;
  525. /*
  526. * Allocate a few more pages.
  527. *
  528. * TODO: have some parser search vmlinux before
  529. * final linking to find all calls to ftrace.
  530. * Then we can:
  531. * a) know how many pages to allocate.
  532. * and/or
  533. * b) set up the table then.
  534. *
  535. * The dynamic code is still necessary for
  536. * modules.
  537. */
  538. pg = ftrace_pages = ftrace_pages_start;
  539. cnt = NR_TO_INIT / ENTRIES_PER_PAGE;
  540. for (i = 0; i < cnt; i++) {
  541. pg->next = (void *)get_zeroed_page(GFP_KERNEL);
  542. /* If we fail, we'll try later anyway */
  543. if (!pg->next)
  544. break;
  545. pg = pg->next;
  546. }
  547. return 0;
  548. }
  549. enum {
  550. FTRACE_ITER_FILTER = (1 << 0),
  551. FTRACE_ITER_CONT = (1 << 1),
  552. };
  553. #define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
  554. struct ftrace_iterator {
  555. loff_t pos;
  556. struct ftrace_page *pg;
  557. unsigned idx;
  558. unsigned flags;
  559. unsigned char buffer[FTRACE_BUFF_MAX+1];
  560. unsigned buffer_idx;
  561. unsigned filtered;
  562. };
  563. static void notrace *
  564. t_next(struct seq_file *m, void *v, loff_t *pos)
  565. {
  566. struct ftrace_iterator *iter = m->private;
  567. struct dyn_ftrace *rec = NULL;
  568. (*pos)++;
  569. retry:
  570. if (iter->idx >= iter->pg->index) {
  571. if (iter->pg->next) {
  572. iter->pg = iter->pg->next;
  573. iter->idx = 0;
  574. goto retry;
  575. }
  576. } else {
  577. rec = &iter->pg->records[iter->idx++];
  578. if ((rec->flags & FTRACE_FL_FAILED) ||
  579. ((iter->flags & FTRACE_ITER_FILTER) &&
  580. !(rec->flags & FTRACE_FL_FILTER))) {
  581. rec = NULL;
  582. goto retry;
  583. }
  584. }
  585. iter->pos = *pos;
  586. return rec;
  587. }
  588. static void *t_start(struct seq_file *m, loff_t *pos)
  589. {
  590. struct ftrace_iterator *iter = m->private;
  591. void *p = NULL;
  592. loff_t l = -1;
  593. if (*pos != iter->pos) {
  594. for (p = t_next(m, p, &l); p && l < *pos; p = t_next(m, p, &l))
  595. ;
  596. } else {
  597. l = *pos;
  598. p = t_next(m, p, &l);
  599. }
  600. return p;
  601. }
  602. static void t_stop(struct seq_file *m, void *p)
  603. {
  604. }
  605. static int t_show(struct seq_file *m, void *v)
  606. {
  607. struct dyn_ftrace *rec = v;
  608. char str[KSYM_SYMBOL_LEN];
  609. if (!rec)
  610. return 0;
  611. kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
  612. seq_printf(m, "%s\n", str);
  613. return 0;
  614. }
  615. static struct seq_operations show_ftrace_seq_ops = {
  616. .start = t_start,
  617. .next = t_next,
  618. .stop = t_stop,
  619. .show = t_show,
  620. };
  621. static int notrace
  622. ftrace_avail_open(struct inode *inode, struct file *file)
  623. {
  624. struct ftrace_iterator *iter;
  625. int ret;
  626. iter = kzalloc(sizeof(*iter), GFP_KERNEL);
  627. if (!iter)
  628. return -ENOMEM;
  629. iter->pg = ftrace_pages_start;
  630. iter->pos = -1;
  631. ret = seq_open(file, &show_ftrace_seq_ops);
  632. if (!ret) {
  633. struct seq_file *m = file->private_data;
  634. m->private = iter;
  635. } else {
  636. kfree(iter);
  637. }
  638. return ret;
  639. }
  640. int ftrace_avail_release(struct inode *inode, struct file *file)
  641. {
  642. struct seq_file *m = (struct seq_file *)file->private_data;
  643. struct ftrace_iterator *iter = m->private;
  644. seq_release(inode, file);
  645. kfree(iter);
  646. return 0;
  647. }
  648. static void notrace ftrace_filter_reset(void)
  649. {
  650. struct ftrace_page *pg;
  651. struct dyn_ftrace *rec;
  652. unsigned i;
  653. /* keep kstop machine from running */
  654. preempt_disable();
  655. ftrace_filtered = 0;
  656. pg = ftrace_pages_start;
  657. while (pg) {
  658. for (i = 0; i < pg->index; i++) {
  659. rec = &pg->records[i];
  660. if (rec->flags & FTRACE_FL_FAILED)
  661. continue;
  662. rec->flags &= ~FTRACE_FL_FILTER;
  663. }
  664. pg = pg->next;
  665. }
  666. preempt_enable();
  667. }
  668. static int notrace
  669. ftrace_filter_open(struct inode *inode, struct file *file)
  670. {
  671. struct ftrace_iterator *iter;
  672. int ret = 0;
  673. iter = kzalloc(sizeof(*iter), GFP_KERNEL);
  674. if (!iter)
  675. return -ENOMEM;
  676. mutex_lock(&ftrace_filter_lock);
  677. if ((file->f_mode & FMODE_WRITE) &&
  678. !(file->f_flags & O_APPEND))
  679. ftrace_filter_reset();
  680. if (file->f_mode & FMODE_READ) {
  681. iter->pg = ftrace_pages_start;
  682. iter->pos = -1;
  683. iter->flags = FTRACE_ITER_FILTER;
  684. ret = seq_open(file, &show_ftrace_seq_ops);
  685. if (!ret) {
  686. struct seq_file *m = file->private_data;
  687. m->private = iter;
  688. } else
  689. kfree(iter);
  690. } else
  691. file->private_data = iter;
  692. mutex_unlock(&ftrace_filter_lock);
  693. return ret;
  694. }
  695. static ssize_t notrace
  696. ftrace_filter_read(struct file *file, char __user *ubuf,
  697. size_t cnt, loff_t *ppos)
  698. {
  699. if (file->f_mode & FMODE_READ)
  700. return seq_read(file, ubuf, cnt, ppos);
  701. else
  702. return -EPERM;
  703. }
  704. static loff_t notrace
  705. ftrace_filter_lseek(struct file *file, loff_t offset, int origin)
  706. {
  707. loff_t ret;
  708. if (file->f_mode & FMODE_READ)
  709. ret = seq_lseek(file, offset, origin);
  710. else
  711. file->f_pos = ret = 1;
  712. return ret;
  713. }
  714. enum {
  715. MATCH_FULL,
  716. MATCH_FRONT_ONLY,
  717. MATCH_MIDDLE_ONLY,
  718. MATCH_END_ONLY,
  719. };
  720. static void notrace
  721. ftrace_match(unsigned char *buff, int len)
  722. {
  723. char str[KSYM_SYMBOL_LEN];
  724. char *search = NULL;
  725. struct ftrace_page *pg;
  726. struct dyn_ftrace *rec;
  727. int type = MATCH_FULL;
  728. unsigned i, match = 0, search_len = 0;
  729. for (i = 0; i < len; i++) {
  730. if (buff[i] == '*') {
  731. if (!i) {
  732. search = buff + i + 1;
  733. type = MATCH_END_ONLY;
  734. search_len = len - (i + 1);
  735. } else {
  736. if (type == MATCH_END_ONLY) {
  737. type = MATCH_MIDDLE_ONLY;
  738. } else {
  739. match = i;
  740. type = MATCH_FRONT_ONLY;
  741. }
  742. buff[i] = 0;
  743. break;
  744. }
  745. }
  746. }
  747. /* keep kstop machine from running */
  748. preempt_disable();
  749. ftrace_filtered = 1;
  750. pg = ftrace_pages_start;
  751. while (pg) {
  752. for (i = 0; i < pg->index; i++) {
  753. int matched = 0;
  754. char *ptr;
  755. rec = &pg->records[i];
  756. if (rec->flags & FTRACE_FL_FAILED)
  757. continue;
  758. kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
  759. switch (type) {
  760. case MATCH_FULL:
  761. if (strcmp(str, buff) == 0)
  762. matched = 1;
  763. break;
  764. case MATCH_FRONT_ONLY:
  765. if (memcmp(str, buff, match) == 0)
  766. matched = 1;
  767. break;
  768. case MATCH_MIDDLE_ONLY:
  769. if (strstr(str, search))
  770. matched = 1;
  771. break;
  772. case MATCH_END_ONLY:
  773. ptr = strstr(str, search);
  774. if (ptr && (ptr[search_len] == 0))
  775. matched = 1;
  776. break;
  777. }
  778. if (matched)
  779. rec->flags |= FTRACE_FL_FILTER;
  780. }
  781. pg = pg->next;
  782. }
  783. preempt_enable();
  784. }
  785. static ssize_t notrace
  786. ftrace_filter_write(struct file *file, const char __user *ubuf,
  787. size_t cnt, loff_t *ppos)
  788. {
  789. struct ftrace_iterator *iter;
  790. char ch;
  791. size_t read = 0;
  792. ssize_t ret;
  793. if (!cnt || cnt < 0)
  794. return 0;
  795. mutex_lock(&ftrace_filter_lock);
  796. if (file->f_mode & FMODE_READ) {
  797. struct seq_file *m = file->private_data;
  798. iter = m->private;
  799. } else
  800. iter = file->private_data;
  801. if (!*ppos) {
  802. iter->flags &= ~FTRACE_ITER_CONT;
  803. iter->buffer_idx = 0;
  804. }
  805. ret = get_user(ch, ubuf++);
  806. if (ret)
  807. goto out;
  808. read++;
  809. cnt--;
  810. if (!(iter->flags & ~FTRACE_ITER_CONT)) {
  811. /* skip white space */
  812. while (cnt && isspace(ch)) {
  813. ret = get_user(ch, ubuf++);
  814. if (ret)
  815. goto out;
  816. read++;
  817. cnt--;
  818. }
  819. if (isspace(ch)) {
  820. file->f_pos += read;
  821. ret = read;
  822. goto out;
  823. }
  824. iter->buffer_idx = 0;
  825. }
  826. while (cnt && !isspace(ch)) {
  827. if (iter->buffer_idx < FTRACE_BUFF_MAX)
  828. iter->buffer[iter->buffer_idx++] = ch;
  829. else {
  830. ret = -EINVAL;
  831. goto out;
  832. }
  833. ret = get_user(ch, ubuf++);
  834. if (ret)
  835. goto out;
  836. read++;
  837. cnt--;
  838. }
  839. if (isspace(ch)) {
  840. iter->filtered++;
  841. iter->buffer[iter->buffer_idx] = 0;
  842. ftrace_match(iter->buffer, iter->buffer_idx);
  843. iter->buffer_idx = 0;
  844. } else
  845. iter->flags |= FTRACE_ITER_CONT;
  846. file->f_pos += read;
  847. ret = read;
  848. out:
  849. mutex_unlock(&ftrace_filter_lock);
  850. return ret;
  851. }
  852. /**
  853. * ftrace_set_filter - set a function to filter on in ftrace
  854. * @buf - the string that holds the function filter text.
  855. * @len - the length of the string.
  856. * @reset - non zero to reset all filters before applying this filter.
  857. *
  858. * Filters denote which functions should be enabled when tracing is enabled.
  859. * If @buf is NULL and reset is set, all functions will be enabled for tracing.
  860. */
  861. notrace void ftrace_set_filter(unsigned char *buf, int len, int reset)
  862. {
  863. mutex_lock(&ftrace_filter_lock);
  864. if (reset)
  865. ftrace_filter_reset();
  866. if (buf)
  867. ftrace_match(buf, len);
  868. mutex_unlock(&ftrace_filter_lock);
  869. }
  870. static int notrace
  871. ftrace_filter_release(struct inode *inode, struct file *file)
  872. {
  873. struct seq_file *m = (struct seq_file *)file->private_data;
  874. struct ftrace_iterator *iter;
  875. mutex_lock(&ftrace_filter_lock);
  876. if (file->f_mode & FMODE_READ) {
  877. iter = m->private;
  878. seq_release(inode, file);
  879. } else
  880. iter = file->private_data;
  881. if (iter->buffer_idx) {
  882. iter->filtered++;
  883. iter->buffer[iter->buffer_idx] = 0;
  884. ftrace_match(iter->buffer, iter->buffer_idx);
  885. }
  886. mutex_lock(&ftrace_sysctl_lock);
  887. mutex_lock(&ftraced_lock);
  888. if (iter->filtered && ftraced_suspend && ftrace_enabled)
  889. ftrace_run_update_code(FTRACE_ENABLE_CALLS);
  890. mutex_unlock(&ftraced_lock);
  891. mutex_unlock(&ftrace_sysctl_lock);
  892. kfree(iter);
  893. mutex_unlock(&ftrace_filter_lock);
  894. return 0;
  895. }
  896. static struct file_operations ftrace_avail_fops = {
  897. .open = ftrace_avail_open,
  898. .read = seq_read,
  899. .llseek = seq_lseek,
  900. .release = ftrace_avail_release,
  901. };
  902. static struct file_operations ftrace_filter_fops = {
  903. .open = ftrace_filter_open,
  904. .read = ftrace_filter_read,
  905. .write = ftrace_filter_write,
  906. .llseek = ftrace_filter_lseek,
  907. .release = ftrace_filter_release,
  908. };
  909. /**
  910. * ftrace_force_update - force an update to all recording ftrace functions
  911. *
  912. * The ftrace dynamic update daemon only wakes up once a second.
  913. * There may be cases where an update needs to be done immediately
  914. * for tests or internal kernel tracing to begin. This function
  915. * wakes the daemon to do an update and will not return until the
  916. * update is complete.
  917. */
  918. int ftrace_force_update(void)
  919. {
  920. unsigned long last_counter;
  921. DECLARE_WAITQUEUE(wait, current);
  922. int ret = 0;
  923. if (!ftraced_task)
  924. return -ENODEV;
  925. mutex_lock(&ftraced_lock);
  926. last_counter = ftraced_iteration_counter;
  927. set_current_state(TASK_INTERRUPTIBLE);
  928. add_wait_queue(&ftraced_waiters, &wait);
  929. do {
  930. mutex_unlock(&ftraced_lock);
  931. wake_up_process(ftraced_task);
  932. schedule();
  933. mutex_lock(&ftraced_lock);
  934. if (signal_pending(current)) {
  935. ret = -EINTR;
  936. break;
  937. }
  938. set_current_state(TASK_INTERRUPTIBLE);
  939. } while (last_counter == ftraced_iteration_counter);
  940. mutex_unlock(&ftraced_lock);
  941. remove_wait_queue(&ftraced_waiters, &wait);
  942. set_current_state(TASK_RUNNING);
  943. return ret;
  944. }
  945. static __init int ftrace_init_debugfs(void)
  946. {
  947. struct dentry *d_tracer;
  948. struct dentry *entry;
  949. d_tracer = tracing_init_dentry();
  950. entry = debugfs_create_file("available_filter_functions", 0444,
  951. d_tracer, NULL, &ftrace_avail_fops);
  952. if (!entry)
  953. pr_warning("Could not create debugfs "
  954. "'available_filter_functions' entry\n");
  955. entry = debugfs_create_file("set_ftrace_filter", 0644, d_tracer,
  956. NULL, &ftrace_filter_fops);
  957. if (!entry)
  958. pr_warning("Could not create debugfs "
  959. "'set_ftrace_filter' entry\n");
  960. return 0;
  961. }
  962. fs_initcall(ftrace_init_debugfs);
  963. static int __init notrace ftrace_dynamic_init(void)
  964. {
  965. struct task_struct *p;
  966. unsigned long addr;
  967. int ret;
  968. addr = (unsigned long)ftrace_record_ip;
  969. stop_machine_run(ftrace_dyn_arch_init, &addr, NR_CPUS);
  970. /* ftrace_dyn_arch_init places the return code in addr */
  971. if (addr)
  972. return addr;
  973. ret = ftrace_dyn_table_alloc();
  974. if (ret)
  975. return ret;
  976. p = kthread_run(ftraced, NULL, "ftraced");
  977. if (IS_ERR(p))
  978. return -1;
  979. last_ftrace_enabled = ftrace_enabled = 1;
  980. ftraced_task = p;
  981. return 0;
  982. }
  983. core_initcall(ftrace_dynamic_init);
  984. #else
  985. # define ftrace_startup() do { } while (0)
  986. # define ftrace_shutdown() do { } while (0)
  987. # define ftrace_startup_sysctl() do { } while (0)
  988. # define ftrace_shutdown_sysctl() do { } while (0)
  989. #endif /* CONFIG_DYNAMIC_FTRACE */
  990. /**
  991. * register_ftrace_function - register a function for profiling
  992. * @ops - ops structure that holds the function for profiling.
  993. *
  994. * Register a function to be called by all functions in the
  995. * kernel.
  996. *
  997. * Note: @ops->func and all the functions it calls must be labeled
  998. * with "notrace", otherwise it will go into a
  999. * recursive loop.
  1000. */
  1001. int register_ftrace_function(struct ftrace_ops *ops)
  1002. {
  1003. int ret;
  1004. mutex_lock(&ftrace_sysctl_lock);
  1005. ret = __register_ftrace_function(ops);
  1006. ftrace_startup();
  1007. mutex_unlock(&ftrace_sysctl_lock);
  1008. return ret;
  1009. }
  1010. /**
  1011. * unregister_ftrace_function - unresgister a function for profiling.
  1012. * @ops - ops structure that holds the function to unregister
  1013. *
  1014. * Unregister a function that was added to be called by ftrace profiling.
  1015. */
  1016. int unregister_ftrace_function(struct ftrace_ops *ops)
  1017. {
  1018. int ret;
  1019. mutex_lock(&ftrace_sysctl_lock);
  1020. ret = __unregister_ftrace_function(ops);
  1021. ftrace_shutdown();
  1022. mutex_unlock(&ftrace_sysctl_lock);
  1023. return ret;
  1024. }
  1025. notrace int
  1026. ftrace_enable_sysctl(struct ctl_table *table, int write,
  1027. struct file *file, void __user *buffer, size_t *lenp,
  1028. loff_t *ppos)
  1029. {
  1030. int ret;
  1031. mutex_lock(&ftrace_sysctl_lock);
  1032. ret = proc_dointvec(table, write, file, buffer, lenp, ppos);
  1033. if (ret || !write || (last_ftrace_enabled == ftrace_enabled))
  1034. goto out;
  1035. last_ftrace_enabled = ftrace_enabled;
  1036. if (ftrace_enabled) {
  1037. ftrace_startup_sysctl();
  1038. /* we are starting ftrace again */
  1039. if (ftrace_list != &ftrace_list_end) {
  1040. if (ftrace_list->next == &ftrace_list_end)
  1041. ftrace_trace_function = ftrace_list->func;
  1042. else
  1043. ftrace_trace_function = ftrace_list_func;
  1044. }
  1045. } else {
  1046. /* stopping ftrace calls (just send to ftrace_stub) */
  1047. ftrace_trace_function = ftrace_stub;
  1048. ftrace_shutdown_sysctl();
  1049. }
  1050. out:
  1051. mutex_unlock(&ftrace_sysctl_lock);
  1052. return ret;
  1053. }