trace.c 91 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039404040414042404340444045404640474048404940504051405240534054405540564057405840594060406140624063406440654066406740684069407040714072407340744075407640774078407940804081408240834084408540864087408840894090409140924093409440954096
  1. /*
  2. * ring buffer based function tracer
  3. *
  4. * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
  5. * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
  6. *
  7. * Originally taken from the RT patch by:
  8. * Arnaldo Carvalho de Melo <acme@redhat.com>
  9. *
  10. * Based on code from the latency_tracer, that is:
  11. * Copyright (C) 2004-2006 Ingo Molnar
  12. * Copyright (C) 2004 William Lee Irwin III
  13. */
  14. #include <linux/ring_buffer.h>
  15. #include <linux/utsrelease.h>
  16. #include <linux/stacktrace.h>
  17. #include <linux/writeback.h>
  18. #include <linux/kallsyms.h>
  19. #include <linux/seq_file.h>
  20. #include <linux/notifier.h>
  21. #include <linux/irqflags.h>
  22. #include <linux/debugfs.h>
  23. #include <linux/pagemap.h>
  24. #include <linux/hardirq.h>
  25. #include <linux/linkage.h>
  26. #include <linux/uaccess.h>
  27. #include <linux/kprobes.h>
  28. #include <linux/ftrace.h>
  29. #include <linux/module.h>
  30. #include <linux/percpu.h>
  31. #include <linux/splice.h>
  32. #include <linux/kdebug.h>
  33. #include <linux/ctype.h>
  34. #include <linux/init.h>
  35. #include <linux/poll.h>
  36. #include <linux/gfp.h>
  37. #include <linux/fs.h>
  38. #include "trace.h"
  39. #include "trace_output.h"
  40. #define TRACE_BUFFER_FLAGS (RB_FL_OVERWRITE)
  41. unsigned long __read_mostly tracing_max_latency;
  42. unsigned long __read_mostly tracing_thresh;
  43. /*
  44. * We need to change this state when a selftest is running.
  45. * A selftest will lurk into the ring-buffer to count the
  46. * entries inserted during the selftest although some concurrent
  47. * insertions into the ring-buffer such as trace_printk could occurred
  48. * at the same time, giving false positive or negative results.
  49. */
  50. static bool __read_mostly tracing_selftest_running;
  51. /*
  52. * If a tracer is running, we do not want to run SELFTEST.
  53. */
  54. static bool __read_mostly tracing_selftest_disabled;
  55. /* For tracers that don't implement custom flags */
  56. static struct tracer_opt dummy_tracer_opt[] = {
  57. { }
  58. };
  59. static struct tracer_flags dummy_tracer_flags = {
  60. .val = 0,
  61. .opts = dummy_tracer_opt
  62. };
  63. static int dummy_set_flag(u32 old_flags, u32 bit, int set)
  64. {
  65. return 0;
  66. }
  67. /*
  68. * Kill all tracing for good (never come back).
  69. * It is initialized to 1 but will turn to zero if the initialization
  70. * of the tracer is successful. But that is the only place that sets
  71. * this back to zero.
  72. */
  73. static int tracing_disabled = 1;
  74. static DEFINE_PER_CPU(local_t, ftrace_cpu_disabled);
  75. static inline void ftrace_disable_cpu(void)
  76. {
  77. preempt_disable();
  78. local_inc(&__get_cpu_var(ftrace_cpu_disabled));
  79. }
  80. static inline void ftrace_enable_cpu(void)
  81. {
  82. local_dec(&__get_cpu_var(ftrace_cpu_disabled));
  83. preempt_enable();
  84. }
  85. static cpumask_var_t __read_mostly tracing_buffer_mask;
  86. /* Define which cpu buffers are currently read in trace_pipe */
  87. static cpumask_var_t tracing_reader_cpumask;
  88. #define for_each_tracing_cpu(cpu) \
  89. for_each_cpu(cpu, tracing_buffer_mask)
  90. /*
  91. * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
  92. *
  93. * If there is an oops (or kernel panic) and the ftrace_dump_on_oops
  94. * is set, then ftrace_dump is called. This will output the contents
  95. * of the ftrace buffers to the console. This is very useful for
  96. * capturing traces that lead to crashes and outputing it to a
  97. * serial console.
  98. *
  99. * It is default off, but you can enable it with either specifying
  100. * "ftrace_dump_on_oops" in the kernel command line, or setting
  101. * /proc/sys/kernel/ftrace_dump_on_oops to true.
  102. */
  103. int ftrace_dump_on_oops;
  104. static int tracing_set_tracer(const char *buf);
  105. #define BOOTUP_TRACER_SIZE 100
  106. static char bootup_tracer_buf[BOOTUP_TRACER_SIZE] __initdata;
  107. static char *default_bootup_tracer;
  108. static int __init set_ftrace(char *str)
  109. {
  110. strncpy(bootup_tracer_buf, str, BOOTUP_TRACER_SIZE);
  111. default_bootup_tracer = bootup_tracer_buf;
  112. return 1;
  113. }
  114. __setup("ftrace=", set_ftrace);
  115. static int __init set_ftrace_dump_on_oops(char *str)
  116. {
  117. ftrace_dump_on_oops = 1;
  118. return 1;
  119. }
  120. __setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
  121. long
  122. ns2usecs(cycle_t nsec)
  123. {
  124. nsec += 500;
  125. do_div(nsec, 1000);
  126. return nsec;
  127. }
  128. cycle_t ftrace_now(int cpu)
  129. {
  130. u64 ts = ring_buffer_time_stamp(cpu);
  131. ring_buffer_normalize_time_stamp(cpu, &ts);
  132. return ts;
  133. }
  134. /*
  135. * The global_trace is the descriptor that holds the tracing
  136. * buffers for the live tracing. For each CPU, it contains
  137. * a link list of pages that will store trace entries. The
  138. * page descriptor of the pages in the memory is used to hold
  139. * the link list by linking the lru item in the page descriptor
  140. * to each of the pages in the buffer per CPU.
  141. *
  142. * For each active CPU there is a data field that holds the
  143. * pages for the buffer for that CPU. Each CPU has the same number
  144. * of pages allocated for its buffer.
  145. */
  146. static struct trace_array global_trace;
  147. static DEFINE_PER_CPU(struct trace_array_cpu, global_trace_cpu);
  148. /*
  149. * The max_tr is used to snapshot the global_trace when a maximum
  150. * latency is reached. Some tracers will use this to store a maximum
  151. * trace while it continues examining live traces.
  152. *
  153. * The buffers for the max_tr are set up the same as the global_trace.
  154. * When a snapshot is taken, the link list of the max_tr is swapped
  155. * with the link list of the global_trace and the buffers are reset for
  156. * the global_trace so the tracing can continue.
  157. */
  158. static struct trace_array max_tr;
  159. static DEFINE_PER_CPU(struct trace_array_cpu, max_data);
  160. /* tracer_enabled is used to toggle activation of a tracer */
  161. static int tracer_enabled = 1;
  162. /**
  163. * tracing_is_enabled - return tracer_enabled status
  164. *
  165. * This function is used by other tracers to know the status
  166. * of the tracer_enabled flag. Tracers may use this function
  167. * to know if it should enable their features when starting
  168. * up. See irqsoff tracer for an example (start_irqsoff_tracer).
  169. */
  170. int tracing_is_enabled(void)
  171. {
  172. return tracer_enabled;
  173. }
  174. /*
  175. * trace_buf_size is the size in bytes that is allocated
  176. * for a buffer. Note, the number of bytes is always rounded
  177. * to page size.
  178. *
  179. * This number is purposely set to a low number of 16384.
  180. * If the dump on oops happens, it will be much appreciated
  181. * to not have to wait for all that output. Anyway this can be
  182. * boot time and run time configurable.
  183. */
  184. #define TRACE_BUF_SIZE_DEFAULT 1441792UL /* 16384 * 88 (sizeof(entry)) */
  185. static unsigned long trace_buf_size = TRACE_BUF_SIZE_DEFAULT;
  186. /* trace_types holds a link list of available tracers. */
  187. static struct tracer *trace_types __read_mostly;
  188. /* current_trace points to the tracer that is currently active */
  189. static struct tracer *current_trace __read_mostly;
  190. /*
  191. * max_tracer_type_len is used to simplify the allocating of
  192. * buffers to read userspace tracer names. We keep track of
  193. * the longest tracer name registered.
  194. */
  195. static int max_tracer_type_len;
  196. /*
  197. * trace_types_lock is used to protect the trace_types list.
  198. * This lock is also used to keep user access serialized.
  199. * Accesses from userspace will grab this lock while userspace
  200. * activities happen inside the kernel.
  201. */
  202. static DEFINE_MUTEX(trace_types_lock);
  203. /* trace_wait is a waitqueue for tasks blocked on trace_poll */
  204. static DECLARE_WAIT_QUEUE_HEAD(trace_wait);
  205. /* trace_flags holds trace_options default values */
  206. unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK |
  207. TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO;
  208. /**
  209. * trace_wake_up - wake up tasks waiting for trace input
  210. *
  211. * Simply wakes up any task that is blocked on the trace_wait
  212. * queue. These is used with trace_poll for tasks polling the trace.
  213. */
  214. void trace_wake_up(void)
  215. {
  216. /*
  217. * The runqueue_is_locked() can fail, but this is the best we
  218. * have for now:
  219. */
  220. if (!(trace_flags & TRACE_ITER_BLOCK) && !runqueue_is_locked())
  221. wake_up(&trace_wait);
  222. }
  223. static int __init set_buf_size(char *str)
  224. {
  225. unsigned long buf_size;
  226. int ret;
  227. if (!str)
  228. return 0;
  229. ret = strict_strtoul(str, 0, &buf_size);
  230. /* nr_entries can not be zero */
  231. if (ret < 0 || buf_size == 0)
  232. return 0;
  233. trace_buf_size = buf_size;
  234. return 1;
  235. }
  236. __setup("trace_buf_size=", set_buf_size);
  237. unsigned long nsecs_to_usecs(unsigned long nsecs)
  238. {
  239. return nsecs / 1000;
  240. }
  241. /* These must match the bit postions in trace_iterator_flags */
  242. static const char *trace_options[] = {
  243. "print-parent",
  244. "sym-offset",
  245. "sym-addr",
  246. "verbose",
  247. "raw",
  248. "hex",
  249. "bin",
  250. "block",
  251. "stacktrace",
  252. "sched-tree",
  253. "trace_printk",
  254. "ftrace_preempt",
  255. "branch",
  256. "annotate",
  257. "userstacktrace",
  258. "sym-userobj",
  259. "printk-msg-only",
  260. "context-info",
  261. "latency-format",
  262. NULL
  263. };
  264. /*
  265. * ftrace_max_lock is used to protect the swapping of buffers
  266. * when taking a max snapshot. The buffers themselves are
  267. * protected by per_cpu spinlocks. But the action of the swap
  268. * needs its own lock.
  269. *
  270. * This is defined as a raw_spinlock_t in order to help
  271. * with performance when lockdep debugging is enabled.
  272. */
  273. static raw_spinlock_t ftrace_max_lock =
  274. (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
  275. /*
  276. * Copy the new maximum trace into the separate maximum-trace
  277. * structure. (this way the maximum trace is permanently saved,
  278. * for later retrieval via /debugfs/tracing/latency_trace)
  279. */
  280. static void
  281. __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
  282. {
  283. struct trace_array_cpu *data = tr->data[cpu];
  284. max_tr.cpu = cpu;
  285. max_tr.time_start = data->preempt_timestamp;
  286. data = max_tr.data[cpu];
  287. data->saved_latency = tracing_max_latency;
  288. memcpy(data->comm, tsk->comm, TASK_COMM_LEN);
  289. data->pid = tsk->pid;
  290. data->uid = task_uid(tsk);
  291. data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
  292. data->policy = tsk->policy;
  293. data->rt_priority = tsk->rt_priority;
  294. /* record this tasks comm */
  295. tracing_record_cmdline(tsk);
  296. }
  297. ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf, size_t cnt)
  298. {
  299. int len;
  300. int ret;
  301. if (!cnt)
  302. return 0;
  303. if (s->len <= s->readpos)
  304. return -EBUSY;
  305. len = s->len - s->readpos;
  306. if (cnt > len)
  307. cnt = len;
  308. ret = copy_to_user(ubuf, s->buffer + s->readpos, cnt);
  309. if (ret == cnt)
  310. return -EFAULT;
  311. cnt -= ret;
  312. s->readpos += cnt;
  313. return cnt;
  314. }
  315. ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
  316. {
  317. int len;
  318. void *ret;
  319. if (s->len <= s->readpos)
  320. return -EBUSY;
  321. len = s->len - s->readpos;
  322. if (cnt > len)
  323. cnt = len;
  324. ret = memcpy(buf, s->buffer + s->readpos, cnt);
  325. if (!ret)
  326. return -EFAULT;
  327. s->readpos += cnt;
  328. return cnt;
  329. }
  330. static void
  331. trace_print_seq(struct seq_file *m, struct trace_seq *s)
  332. {
  333. int len = s->len >= PAGE_SIZE ? PAGE_SIZE - 1 : s->len;
  334. s->buffer[len] = 0;
  335. seq_puts(m, s->buffer);
  336. trace_seq_init(s);
  337. }
  338. /**
  339. * update_max_tr - snapshot all trace buffers from global_trace to max_tr
  340. * @tr: tracer
  341. * @tsk: the task with the latency
  342. * @cpu: The cpu that initiated the trace.
  343. *
  344. * Flip the buffers between the @tr and the max_tr and record information
  345. * about which task was the cause of this latency.
  346. */
  347. void
  348. update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
  349. {
  350. struct ring_buffer *buf = tr->buffer;
  351. WARN_ON_ONCE(!irqs_disabled());
  352. __raw_spin_lock(&ftrace_max_lock);
  353. tr->buffer = max_tr.buffer;
  354. max_tr.buffer = buf;
  355. ftrace_disable_cpu();
  356. ring_buffer_reset(tr->buffer);
  357. ftrace_enable_cpu();
  358. __update_max_tr(tr, tsk, cpu);
  359. __raw_spin_unlock(&ftrace_max_lock);
  360. }
  361. /**
  362. * update_max_tr_single - only copy one trace over, and reset the rest
  363. * @tr - tracer
  364. * @tsk - task with the latency
  365. * @cpu - the cpu of the buffer to copy.
  366. *
  367. * Flip the trace of a single CPU buffer between the @tr and the max_tr.
  368. */
  369. void
  370. update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
  371. {
  372. int ret;
  373. WARN_ON_ONCE(!irqs_disabled());
  374. __raw_spin_lock(&ftrace_max_lock);
  375. ftrace_disable_cpu();
  376. ring_buffer_reset(max_tr.buffer);
  377. ret = ring_buffer_swap_cpu(max_tr.buffer, tr->buffer, cpu);
  378. ftrace_enable_cpu();
  379. WARN_ON_ONCE(ret && ret != -EAGAIN);
  380. __update_max_tr(tr, tsk, cpu);
  381. __raw_spin_unlock(&ftrace_max_lock);
  382. }
  383. /**
  384. * register_tracer - register a tracer with the ftrace system.
  385. * @type - the plugin for the tracer
  386. *
  387. * Register a new plugin tracer.
  388. */
  389. int register_tracer(struct tracer *type)
  390. __releases(kernel_lock)
  391. __acquires(kernel_lock)
  392. {
  393. struct tracer *t;
  394. int len;
  395. int ret = 0;
  396. if (!type->name) {
  397. pr_info("Tracer must have a name\n");
  398. return -1;
  399. }
  400. /*
  401. * When this gets called we hold the BKL which means that
  402. * preemption is disabled. Various trace selftests however
  403. * need to disable and enable preemption for successful tests.
  404. * So we drop the BKL here and grab it after the tests again.
  405. */
  406. unlock_kernel();
  407. mutex_lock(&trace_types_lock);
  408. tracing_selftest_running = true;
  409. for (t = trace_types; t; t = t->next) {
  410. if (strcmp(type->name, t->name) == 0) {
  411. /* already found */
  412. pr_info("Trace %s already registered\n",
  413. type->name);
  414. ret = -1;
  415. goto out;
  416. }
  417. }
  418. if (!type->set_flag)
  419. type->set_flag = &dummy_set_flag;
  420. if (!type->flags)
  421. type->flags = &dummy_tracer_flags;
  422. else
  423. if (!type->flags->opts)
  424. type->flags->opts = dummy_tracer_opt;
  425. if (!type->wait_pipe)
  426. type->wait_pipe = default_wait_pipe;
  427. #ifdef CONFIG_FTRACE_STARTUP_TEST
  428. if (type->selftest && !tracing_selftest_disabled) {
  429. struct tracer *saved_tracer = current_trace;
  430. struct trace_array *tr = &global_trace;
  431. int i;
  432. /*
  433. * Run a selftest on this tracer.
  434. * Here we reset the trace buffer, and set the current
  435. * tracer to be this tracer. The tracer can then run some
  436. * internal tracing to verify that everything is in order.
  437. * If we fail, we do not register this tracer.
  438. */
  439. for_each_tracing_cpu(i)
  440. tracing_reset(tr, i);
  441. current_trace = type;
  442. /* the test is responsible for initializing and enabling */
  443. pr_info("Testing tracer %s: ", type->name);
  444. ret = type->selftest(type, tr);
  445. /* the test is responsible for resetting too */
  446. current_trace = saved_tracer;
  447. if (ret) {
  448. printk(KERN_CONT "FAILED!\n");
  449. goto out;
  450. }
  451. /* Only reset on passing, to avoid touching corrupted buffers */
  452. for_each_tracing_cpu(i)
  453. tracing_reset(tr, i);
  454. printk(KERN_CONT "PASSED\n");
  455. }
  456. #endif
  457. type->next = trace_types;
  458. trace_types = type;
  459. len = strlen(type->name);
  460. if (len > max_tracer_type_len)
  461. max_tracer_type_len = len;
  462. out:
  463. tracing_selftest_running = false;
  464. mutex_unlock(&trace_types_lock);
  465. if (ret || !default_bootup_tracer)
  466. goto out_unlock;
  467. if (strncmp(default_bootup_tracer, type->name, BOOTUP_TRACER_SIZE))
  468. goto out_unlock;
  469. printk(KERN_INFO "Starting tracer '%s'\n", type->name);
  470. /* Do we want this tracer to start on bootup? */
  471. tracing_set_tracer(type->name);
  472. default_bootup_tracer = NULL;
  473. /* disable other selftests, since this will break it. */
  474. tracing_selftest_disabled = 1;
  475. #ifdef CONFIG_FTRACE_STARTUP_TEST
  476. printk(KERN_INFO "Disabling FTRACE selftests due to running tracer '%s'\n",
  477. type->name);
  478. #endif
  479. out_unlock:
  480. lock_kernel();
  481. return ret;
  482. }
  483. void unregister_tracer(struct tracer *type)
  484. {
  485. struct tracer **t;
  486. int len;
  487. mutex_lock(&trace_types_lock);
  488. for (t = &trace_types; *t; t = &(*t)->next) {
  489. if (*t == type)
  490. goto found;
  491. }
  492. pr_info("Trace %s not registered\n", type->name);
  493. goto out;
  494. found:
  495. *t = (*t)->next;
  496. if (type == current_trace && tracer_enabled) {
  497. tracer_enabled = 0;
  498. tracing_stop();
  499. if (current_trace->stop)
  500. current_trace->stop(&global_trace);
  501. current_trace = &nop_trace;
  502. }
  503. if (strlen(type->name) != max_tracer_type_len)
  504. goto out;
  505. max_tracer_type_len = 0;
  506. for (t = &trace_types; *t; t = &(*t)->next) {
  507. len = strlen((*t)->name);
  508. if (len > max_tracer_type_len)
  509. max_tracer_type_len = len;
  510. }
  511. out:
  512. mutex_unlock(&trace_types_lock);
  513. }
  514. void tracing_reset(struct trace_array *tr, int cpu)
  515. {
  516. ftrace_disable_cpu();
  517. ring_buffer_reset_cpu(tr->buffer, cpu);
  518. ftrace_enable_cpu();
  519. }
  520. void tracing_reset_online_cpus(struct trace_array *tr)
  521. {
  522. int cpu;
  523. tr->time_start = ftrace_now(tr->cpu);
  524. for_each_online_cpu(cpu)
  525. tracing_reset(tr, cpu);
  526. }
  527. #define SAVED_CMDLINES 128
  528. static unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
  529. static unsigned map_cmdline_to_pid[SAVED_CMDLINES];
  530. static char saved_cmdlines[SAVED_CMDLINES][TASK_COMM_LEN];
  531. static int cmdline_idx;
  532. static raw_spinlock_t trace_cmdline_lock = __RAW_SPIN_LOCK_UNLOCKED;
  533. /* temporary disable recording */
  534. static atomic_t trace_record_cmdline_disabled __read_mostly;
  535. static void trace_init_cmdlines(void)
  536. {
  537. memset(&map_pid_to_cmdline, -1, sizeof(map_pid_to_cmdline));
  538. memset(&map_cmdline_to_pid, -1, sizeof(map_cmdline_to_pid));
  539. cmdline_idx = 0;
  540. }
  541. static int trace_stop_count;
  542. static DEFINE_SPINLOCK(tracing_start_lock);
  543. /**
  544. * ftrace_off_permanent - disable all ftrace code permanently
  545. *
  546. * This should only be called when a serious anomally has
  547. * been detected. This will turn off the function tracing,
  548. * ring buffers, and other tracing utilites. It takes no
  549. * locks and can be called from any context.
  550. */
  551. void ftrace_off_permanent(void)
  552. {
  553. tracing_disabled = 1;
  554. ftrace_stop();
  555. tracing_off_permanent();
  556. }
  557. /**
  558. * tracing_start - quick start of the tracer
  559. *
  560. * If tracing is enabled but was stopped by tracing_stop,
  561. * this will start the tracer back up.
  562. */
  563. void tracing_start(void)
  564. {
  565. struct ring_buffer *buffer;
  566. unsigned long flags;
  567. if (tracing_disabled)
  568. return;
  569. spin_lock_irqsave(&tracing_start_lock, flags);
  570. if (--trace_stop_count) {
  571. if (trace_stop_count < 0) {
  572. /* Someone screwed up their debugging */
  573. WARN_ON_ONCE(1);
  574. trace_stop_count = 0;
  575. }
  576. goto out;
  577. }
  578. buffer = global_trace.buffer;
  579. if (buffer)
  580. ring_buffer_record_enable(buffer);
  581. buffer = max_tr.buffer;
  582. if (buffer)
  583. ring_buffer_record_enable(buffer);
  584. ftrace_start();
  585. out:
  586. spin_unlock_irqrestore(&tracing_start_lock, flags);
  587. }
  588. /**
  589. * tracing_stop - quick stop of the tracer
  590. *
  591. * Light weight way to stop tracing. Use in conjunction with
  592. * tracing_start.
  593. */
  594. void tracing_stop(void)
  595. {
  596. struct ring_buffer *buffer;
  597. unsigned long flags;
  598. ftrace_stop();
  599. spin_lock_irqsave(&tracing_start_lock, flags);
  600. if (trace_stop_count++)
  601. goto out;
  602. buffer = global_trace.buffer;
  603. if (buffer)
  604. ring_buffer_record_disable(buffer);
  605. buffer = max_tr.buffer;
  606. if (buffer)
  607. ring_buffer_record_disable(buffer);
  608. out:
  609. spin_unlock_irqrestore(&tracing_start_lock, flags);
  610. }
  611. void trace_stop_cmdline_recording(void);
  612. static void trace_save_cmdline(struct task_struct *tsk)
  613. {
  614. unsigned map;
  615. unsigned idx;
  616. if (!tsk->pid || unlikely(tsk->pid > PID_MAX_DEFAULT))
  617. return;
  618. /*
  619. * It's not the end of the world if we don't get
  620. * the lock, but we also don't want to spin
  621. * nor do we want to disable interrupts,
  622. * so if we miss here, then better luck next time.
  623. */
  624. if (!__raw_spin_trylock(&trace_cmdline_lock))
  625. return;
  626. idx = map_pid_to_cmdline[tsk->pid];
  627. if (idx >= SAVED_CMDLINES) {
  628. idx = (cmdline_idx + 1) % SAVED_CMDLINES;
  629. map = map_cmdline_to_pid[idx];
  630. if (map <= PID_MAX_DEFAULT)
  631. map_pid_to_cmdline[map] = (unsigned)-1;
  632. map_pid_to_cmdline[tsk->pid] = idx;
  633. cmdline_idx = idx;
  634. }
  635. memcpy(&saved_cmdlines[idx], tsk->comm, TASK_COMM_LEN);
  636. __raw_spin_unlock(&trace_cmdline_lock);
  637. }
  638. char *trace_find_cmdline(int pid)
  639. {
  640. char *cmdline = "<...>";
  641. unsigned map;
  642. if (!pid)
  643. return "<idle>";
  644. if (pid > PID_MAX_DEFAULT)
  645. goto out;
  646. map = map_pid_to_cmdline[pid];
  647. if (map >= SAVED_CMDLINES)
  648. goto out;
  649. cmdline = saved_cmdlines[map];
  650. out:
  651. return cmdline;
  652. }
  653. void tracing_record_cmdline(struct task_struct *tsk)
  654. {
  655. if (atomic_read(&trace_record_cmdline_disabled))
  656. return;
  657. trace_save_cmdline(tsk);
  658. }
  659. void
  660. tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
  661. int pc)
  662. {
  663. struct task_struct *tsk = current;
  664. entry->preempt_count = pc & 0xff;
  665. entry->pid = (tsk) ? tsk->pid : 0;
  666. entry->tgid = (tsk) ? tsk->tgid : 0;
  667. entry->flags =
  668. #ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
  669. (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
  670. #else
  671. TRACE_FLAG_IRQS_NOSUPPORT |
  672. #endif
  673. ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
  674. ((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) |
  675. (need_resched() ? TRACE_FLAG_NEED_RESCHED : 0);
  676. }
  677. struct ring_buffer_event *trace_buffer_lock_reserve(struct trace_array *tr,
  678. unsigned char type,
  679. unsigned long len,
  680. unsigned long flags, int pc)
  681. {
  682. struct ring_buffer_event *event;
  683. event = ring_buffer_lock_reserve(tr->buffer, len);
  684. if (event != NULL) {
  685. struct trace_entry *ent = ring_buffer_event_data(event);
  686. tracing_generic_entry_update(ent, flags, pc);
  687. ent->type = type;
  688. }
  689. return event;
  690. }
  691. static void ftrace_trace_stack(struct trace_array *tr,
  692. unsigned long flags, int skip, int pc);
  693. static void ftrace_trace_userstack(struct trace_array *tr,
  694. unsigned long flags, int pc);
  695. void trace_buffer_unlock_commit(struct trace_array *tr,
  696. struct ring_buffer_event *event,
  697. unsigned long flags, int pc)
  698. {
  699. ring_buffer_unlock_commit(tr->buffer, event);
  700. ftrace_trace_stack(tr, flags, 6, pc);
  701. ftrace_trace_userstack(tr, flags, pc);
  702. trace_wake_up();
  703. }
  704. struct ring_buffer_event *
  705. trace_current_buffer_lock_reserve(unsigned char type, unsigned long len,
  706. unsigned long flags, int pc)
  707. {
  708. return trace_buffer_lock_reserve(&global_trace,
  709. type, len, flags, pc);
  710. }
  711. void trace_current_buffer_unlock_commit(struct ring_buffer_event *event,
  712. unsigned long flags, int pc)
  713. {
  714. return trace_buffer_unlock_commit(&global_trace, event, flags, pc);
  715. }
  716. void
  717. trace_function(struct trace_array *tr,
  718. unsigned long ip, unsigned long parent_ip, unsigned long flags,
  719. int pc)
  720. {
  721. struct ring_buffer_event *event;
  722. struct ftrace_entry *entry;
  723. /* If we are reading the ring buffer, don't trace */
  724. if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled))))
  725. return;
  726. event = trace_buffer_lock_reserve(tr, TRACE_FN, sizeof(*entry),
  727. flags, pc);
  728. if (!event)
  729. return;
  730. entry = ring_buffer_event_data(event);
  731. entry->ip = ip;
  732. entry->parent_ip = parent_ip;
  733. ring_buffer_unlock_commit(tr->buffer, event);
  734. }
  735. #ifdef CONFIG_FUNCTION_GRAPH_TRACER
  736. static void __trace_graph_entry(struct trace_array *tr,
  737. struct ftrace_graph_ent *trace,
  738. unsigned long flags,
  739. int pc)
  740. {
  741. struct ring_buffer_event *event;
  742. struct ftrace_graph_ent_entry *entry;
  743. if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled))))
  744. return;
  745. event = trace_buffer_lock_reserve(&global_trace, TRACE_GRAPH_ENT,
  746. sizeof(*entry), flags, pc);
  747. if (!event)
  748. return;
  749. entry = ring_buffer_event_data(event);
  750. entry->graph_ent = *trace;
  751. ring_buffer_unlock_commit(global_trace.buffer, event);
  752. }
  753. static void __trace_graph_return(struct trace_array *tr,
  754. struct ftrace_graph_ret *trace,
  755. unsigned long flags,
  756. int pc)
  757. {
  758. struct ring_buffer_event *event;
  759. struct ftrace_graph_ret_entry *entry;
  760. if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled))))
  761. return;
  762. event = trace_buffer_lock_reserve(&global_trace, TRACE_GRAPH_RET,
  763. sizeof(*entry), flags, pc);
  764. if (!event)
  765. return;
  766. entry = ring_buffer_event_data(event);
  767. entry->ret = *trace;
  768. ring_buffer_unlock_commit(global_trace.buffer, event);
  769. }
  770. #endif
  771. void
  772. ftrace(struct trace_array *tr, struct trace_array_cpu *data,
  773. unsigned long ip, unsigned long parent_ip, unsigned long flags,
  774. int pc)
  775. {
  776. if (likely(!atomic_read(&data->disabled)))
  777. trace_function(tr, ip, parent_ip, flags, pc);
  778. }
  779. static void __ftrace_trace_stack(struct trace_array *tr,
  780. unsigned long flags,
  781. int skip, int pc)
  782. {
  783. #ifdef CONFIG_STACKTRACE
  784. struct ring_buffer_event *event;
  785. struct stack_entry *entry;
  786. struct stack_trace trace;
  787. event = trace_buffer_lock_reserve(tr, TRACE_STACK,
  788. sizeof(*entry), flags, pc);
  789. if (!event)
  790. return;
  791. entry = ring_buffer_event_data(event);
  792. memset(&entry->caller, 0, sizeof(entry->caller));
  793. trace.nr_entries = 0;
  794. trace.max_entries = FTRACE_STACK_ENTRIES;
  795. trace.skip = skip;
  796. trace.entries = entry->caller;
  797. save_stack_trace(&trace);
  798. ring_buffer_unlock_commit(tr->buffer, event);
  799. #endif
  800. }
  801. static void ftrace_trace_stack(struct trace_array *tr,
  802. unsigned long flags,
  803. int skip, int pc)
  804. {
  805. if (!(trace_flags & TRACE_ITER_STACKTRACE))
  806. return;
  807. __ftrace_trace_stack(tr, flags, skip, pc);
  808. }
  809. void __trace_stack(struct trace_array *tr,
  810. unsigned long flags,
  811. int skip, int pc)
  812. {
  813. __ftrace_trace_stack(tr, flags, skip, pc);
  814. }
  815. static void ftrace_trace_userstack(struct trace_array *tr,
  816. unsigned long flags, int pc)
  817. {
  818. #ifdef CONFIG_STACKTRACE
  819. struct ring_buffer_event *event;
  820. struct userstack_entry *entry;
  821. struct stack_trace trace;
  822. if (!(trace_flags & TRACE_ITER_USERSTACKTRACE))
  823. return;
  824. event = trace_buffer_lock_reserve(tr, TRACE_USER_STACK,
  825. sizeof(*entry), flags, pc);
  826. if (!event)
  827. return;
  828. entry = ring_buffer_event_data(event);
  829. memset(&entry->caller, 0, sizeof(entry->caller));
  830. trace.nr_entries = 0;
  831. trace.max_entries = FTRACE_STACK_ENTRIES;
  832. trace.skip = 0;
  833. trace.entries = entry->caller;
  834. save_stack_trace_user(&trace);
  835. ring_buffer_unlock_commit(tr->buffer, event);
  836. #endif
  837. }
  838. #ifdef UNUSED
  839. static void __trace_userstack(struct trace_array *tr, unsigned long flags)
  840. {
  841. ftrace_trace_userstack(tr, flags, preempt_count());
  842. }
  843. #endif /* UNUSED */
  844. static void
  845. ftrace_trace_special(void *__tr,
  846. unsigned long arg1, unsigned long arg2, unsigned long arg3,
  847. int pc)
  848. {
  849. struct ring_buffer_event *event;
  850. struct trace_array *tr = __tr;
  851. struct special_entry *entry;
  852. event = trace_buffer_lock_reserve(tr, TRACE_SPECIAL,
  853. sizeof(*entry), 0, pc);
  854. if (!event)
  855. return;
  856. entry = ring_buffer_event_data(event);
  857. entry->arg1 = arg1;
  858. entry->arg2 = arg2;
  859. entry->arg3 = arg3;
  860. trace_buffer_unlock_commit(tr, event, 0, pc);
  861. }
  862. void
  863. __trace_special(void *__tr, void *__data,
  864. unsigned long arg1, unsigned long arg2, unsigned long arg3)
  865. {
  866. ftrace_trace_special(__tr, arg1, arg2, arg3, preempt_count());
  867. }
  868. void
  869. tracing_sched_switch_trace(struct trace_array *tr,
  870. struct task_struct *prev,
  871. struct task_struct *next,
  872. unsigned long flags, int pc)
  873. {
  874. struct ring_buffer_event *event;
  875. struct ctx_switch_entry *entry;
  876. event = trace_buffer_lock_reserve(tr, TRACE_CTX,
  877. sizeof(*entry), flags, pc);
  878. if (!event)
  879. return;
  880. entry = ring_buffer_event_data(event);
  881. entry->prev_pid = prev->pid;
  882. entry->prev_prio = prev->prio;
  883. entry->prev_state = prev->state;
  884. entry->next_pid = next->pid;
  885. entry->next_prio = next->prio;
  886. entry->next_state = next->state;
  887. entry->next_cpu = task_cpu(next);
  888. trace_buffer_unlock_commit(tr, event, flags, pc);
  889. }
  890. void
  891. tracing_sched_wakeup_trace(struct trace_array *tr,
  892. struct task_struct *wakee,
  893. struct task_struct *curr,
  894. unsigned long flags, int pc)
  895. {
  896. struct ring_buffer_event *event;
  897. struct ctx_switch_entry *entry;
  898. event = trace_buffer_lock_reserve(tr, TRACE_WAKE,
  899. sizeof(*entry), flags, pc);
  900. if (!event)
  901. return;
  902. entry = ring_buffer_event_data(event);
  903. entry->prev_pid = curr->pid;
  904. entry->prev_prio = curr->prio;
  905. entry->prev_state = curr->state;
  906. entry->next_pid = wakee->pid;
  907. entry->next_prio = wakee->prio;
  908. entry->next_state = wakee->state;
  909. entry->next_cpu = task_cpu(wakee);
  910. ring_buffer_unlock_commit(tr->buffer, event);
  911. ftrace_trace_stack(tr, flags, 6, pc);
  912. ftrace_trace_userstack(tr, flags, pc);
  913. }
  914. void
  915. ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3)
  916. {
  917. struct trace_array *tr = &global_trace;
  918. struct trace_array_cpu *data;
  919. unsigned long flags;
  920. int cpu;
  921. int pc;
  922. if (tracing_disabled)
  923. return;
  924. pc = preempt_count();
  925. local_irq_save(flags);
  926. cpu = raw_smp_processor_id();
  927. data = tr->data[cpu];
  928. if (likely(atomic_inc_return(&data->disabled) == 1))
  929. ftrace_trace_special(tr, arg1, arg2, arg3, pc);
  930. atomic_dec(&data->disabled);
  931. local_irq_restore(flags);
  932. }
  933. #ifdef CONFIG_FUNCTION_GRAPH_TRACER
  934. int trace_graph_entry(struct ftrace_graph_ent *trace)
  935. {
  936. struct trace_array *tr = &global_trace;
  937. struct trace_array_cpu *data;
  938. unsigned long flags;
  939. long disabled;
  940. int cpu;
  941. int pc;
  942. if (!ftrace_trace_task(current))
  943. return 0;
  944. if (!ftrace_graph_addr(trace->func))
  945. return 0;
  946. local_irq_save(flags);
  947. cpu = raw_smp_processor_id();
  948. data = tr->data[cpu];
  949. disabled = atomic_inc_return(&data->disabled);
  950. if (likely(disabled == 1)) {
  951. pc = preempt_count();
  952. __trace_graph_entry(tr, trace, flags, pc);
  953. }
  954. /* Only do the atomic if it is not already set */
  955. if (!test_tsk_trace_graph(current))
  956. set_tsk_trace_graph(current);
  957. atomic_dec(&data->disabled);
  958. local_irq_restore(flags);
  959. return 1;
  960. }
  961. void trace_graph_return(struct ftrace_graph_ret *trace)
  962. {
  963. struct trace_array *tr = &global_trace;
  964. struct trace_array_cpu *data;
  965. unsigned long flags;
  966. long disabled;
  967. int cpu;
  968. int pc;
  969. local_irq_save(flags);
  970. cpu = raw_smp_processor_id();
  971. data = tr->data[cpu];
  972. disabled = atomic_inc_return(&data->disabled);
  973. if (likely(disabled == 1)) {
  974. pc = preempt_count();
  975. __trace_graph_return(tr, trace, flags, pc);
  976. }
  977. if (!trace->depth)
  978. clear_tsk_trace_graph(current);
  979. atomic_dec(&data->disabled);
  980. local_irq_restore(flags);
  981. }
  982. #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
  983. enum trace_file_type {
  984. TRACE_FILE_LAT_FMT = 1,
  985. TRACE_FILE_ANNOTATE = 2,
  986. };
  987. static void trace_iterator_increment(struct trace_iterator *iter)
  988. {
  989. /* Don't allow ftrace to trace into the ring buffers */
  990. ftrace_disable_cpu();
  991. iter->idx++;
  992. if (iter->buffer_iter[iter->cpu])
  993. ring_buffer_read(iter->buffer_iter[iter->cpu], NULL);
  994. ftrace_enable_cpu();
  995. }
  996. static struct trace_entry *
  997. peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts)
  998. {
  999. struct ring_buffer_event *event;
  1000. struct ring_buffer_iter *buf_iter = iter->buffer_iter[cpu];
  1001. /* Don't allow ftrace to trace into the ring buffers */
  1002. ftrace_disable_cpu();
  1003. if (buf_iter)
  1004. event = ring_buffer_iter_peek(buf_iter, ts);
  1005. else
  1006. event = ring_buffer_peek(iter->tr->buffer, cpu, ts);
  1007. ftrace_enable_cpu();
  1008. return event ? ring_buffer_event_data(event) : NULL;
  1009. }
  1010. static struct trace_entry *
  1011. __find_next_entry(struct trace_iterator *iter, int *ent_cpu, u64 *ent_ts)
  1012. {
  1013. struct ring_buffer *buffer = iter->tr->buffer;
  1014. struct trace_entry *ent, *next = NULL;
  1015. int cpu_file = iter->cpu_file;
  1016. u64 next_ts = 0, ts;
  1017. int next_cpu = -1;
  1018. int cpu;
  1019. /*
  1020. * If we are in a per_cpu trace file, don't bother by iterating over
  1021. * all cpu and peek directly.
  1022. */
  1023. if (cpu_file > TRACE_PIPE_ALL_CPU) {
  1024. if (ring_buffer_empty_cpu(buffer, cpu_file))
  1025. return NULL;
  1026. ent = peek_next_entry(iter, cpu_file, ent_ts);
  1027. if (ent_cpu)
  1028. *ent_cpu = cpu_file;
  1029. return ent;
  1030. }
  1031. for_each_tracing_cpu(cpu) {
  1032. if (ring_buffer_empty_cpu(buffer, cpu))
  1033. continue;
  1034. ent = peek_next_entry(iter, cpu, &ts);
  1035. /*
  1036. * Pick the entry with the smallest timestamp:
  1037. */
  1038. if (ent && (!next || ts < next_ts)) {
  1039. next = ent;
  1040. next_cpu = cpu;
  1041. next_ts = ts;
  1042. }
  1043. }
  1044. if (ent_cpu)
  1045. *ent_cpu = next_cpu;
  1046. if (ent_ts)
  1047. *ent_ts = next_ts;
  1048. return next;
  1049. }
  1050. /* Find the next real entry, without updating the iterator itself */
  1051. struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
  1052. int *ent_cpu, u64 *ent_ts)
  1053. {
  1054. return __find_next_entry(iter, ent_cpu, ent_ts);
  1055. }
  1056. /* Find the next real entry, and increment the iterator to the next entry */
  1057. static void *find_next_entry_inc(struct trace_iterator *iter)
  1058. {
  1059. iter->ent = __find_next_entry(iter, &iter->cpu, &iter->ts);
  1060. if (iter->ent)
  1061. trace_iterator_increment(iter);
  1062. return iter->ent ? iter : NULL;
  1063. }
  1064. static void trace_consume(struct trace_iterator *iter)
  1065. {
  1066. /* Don't allow ftrace to trace into the ring buffers */
  1067. ftrace_disable_cpu();
  1068. ring_buffer_consume(iter->tr->buffer, iter->cpu, &iter->ts);
  1069. ftrace_enable_cpu();
  1070. }
  1071. static void *s_next(struct seq_file *m, void *v, loff_t *pos)
  1072. {
  1073. struct trace_iterator *iter = m->private;
  1074. int i = (int)*pos;
  1075. void *ent;
  1076. (*pos)++;
  1077. /* can't go backwards */
  1078. if (iter->idx > i)
  1079. return NULL;
  1080. if (iter->idx < 0)
  1081. ent = find_next_entry_inc(iter);
  1082. else
  1083. ent = iter;
  1084. while (ent && iter->idx < i)
  1085. ent = find_next_entry_inc(iter);
  1086. iter->pos = *pos;
  1087. return ent;
  1088. }
  1089. /*
  1090. * No necessary locking here. The worst thing which can
  1091. * happen is loosing events consumed at the same time
  1092. * by a trace_pipe reader.
  1093. * Other than that, we don't risk to crash the ring buffer
  1094. * because it serializes the readers.
  1095. *
  1096. * The current tracer is copied to avoid a global locking
  1097. * all around.
  1098. */
  1099. static void *s_start(struct seq_file *m, loff_t *pos)
  1100. {
  1101. struct trace_iterator *iter = m->private;
  1102. static struct tracer *old_tracer;
  1103. int cpu_file = iter->cpu_file;
  1104. void *p = NULL;
  1105. loff_t l = 0;
  1106. int cpu;
  1107. /* copy the tracer to avoid using a global lock all around */
  1108. mutex_lock(&trace_types_lock);
  1109. if (unlikely(old_tracer != current_trace && current_trace)) {
  1110. old_tracer = current_trace;
  1111. *iter->trace = *current_trace;
  1112. }
  1113. mutex_unlock(&trace_types_lock);
  1114. atomic_inc(&trace_record_cmdline_disabled);
  1115. if (*pos != iter->pos) {
  1116. iter->ent = NULL;
  1117. iter->cpu = 0;
  1118. iter->idx = -1;
  1119. ftrace_disable_cpu();
  1120. if (cpu_file == TRACE_PIPE_ALL_CPU) {
  1121. for_each_tracing_cpu(cpu)
  1122. ring_buffer_iter_reset(iter->buffer_iter[cpu]);
  1123. } else
  1124. ring_buffer_iter_reset(iter->buffer_iter[cpu_file]);
  1125. ftrace_enable_cpu();
  1126. for (p = iter; p && l < *pos; p = s_next(m, p, &l))
  1127. ;
  1128. } else {
  1129. l = *pos - 1;
  1130. p = s_next(m, p, &l);
  1131. }
  1132. return p;
  1133. }
  1134. static void s_stop(struct seq_file *m, void *p)
  1135. {
  1136. atomic_dec(&trace_record_cmdline_disabled);
  1137. }
  1138. static void print_lat_help_header(struct seq_file *m)
  1139. {
  1140. seq_puts(m, "# _------=> CPU# \n");
  1141. seq_puts(m, "# / _-----=> irqs-off \n");
  1142. seq_puts(m, "# | / _----=> need-resched \n");
  1143. seq_puts(m, "# || / _---=> hardirq/softirq \n");
  1144. seq_puts(m, "# ||| / _--=> preempt-depth \n");
  1145. seq_puts(m, "# |||| / \n");
  1146. seq_puts(m, "# ||||| delay \n");
  1147. seq_puts(m, "# cmd pid ||||| time | caller \n");
  1148. seq_puts(m, "# \\ / ||||| \\ | / \n");
  1149. }
  1150. static void print_func_help_header(struct seq_file *m)
  1151. {
  1152. seq_puts(m, "# TASK-PID CPU# TIMESTAMP FUNCTION\n");
  1153. seq_puts(m, "# | | | | |\n");
  1154. }
  1155. static void
  1156. print_trace_header(struct seq_file *m, struct trace_iterator *iter)
  1157. {
  1158. unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
  1159. struct trace_array *tr = iter->tr;
  1160. struct trace_array_cpu *data = tr->data[tr->cpu];
  1161. struct tracer *type = current_trace;
  1162. unsigned long total;
  1163. unsigned long entries;
  1164. const char *name = "preemption";
  1165. if (type)
  1166. name = type->name;
  1167. entries = ring_buffer_entries(iter->tr->buffer);
  1168. total = entries +
  1169. ring_buffer_overruns(iter->tr->buffer);
  1170. seq_printf(m, "%s latency trace v1.1.5 on %s\n",
  1171. name, UTS_RELEASE);
  1172. seq_puts(m, "-----------------------------------"
  1173. "---------------------------------\n");
  1174. seq_printf(m, " latency: %lu us, #%lu/%lu, CPU#%d |"
  1175. " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
  1176. nsecs_to_usecs(data->saved_latency),
  1177. entries,
  1178. total,
  1179. tr->cpu,
  1180. #if defined(CONFIG_PREEMPT_NONE)
  1181. "server",
  1182. #elif defined(CONFIG_PREEMPT_VOLUNTARY)
  1183. "desktop",
  1184. #elif defined(CONFIG_PREEMPT)
  1185. "preempt",
  1186. #else
  1187. "unknown",
  1188. #endif
  1189. /* These are reserved for later use */
  1190. 0, 0, 0, 0);
  1191. #ifdef CONFIG_SMP
  1192. seq_printf(m, " #P:%d)\n", num_online_cpus());
  1193. #else
  1194. seq_puts(m, ")\n");
  1195. #endif
  1196. seq_puts(m, " -----------------\n");
  1197. seq_printf(m, " | task: %.16s-%d "
  1198. "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
  1199. data->comm, data->pid, data->uid, data->nice,
  1200. data->policy, data->rt_priority);
  1201. seq_puts(m, " -----------------\n");
  1202. if (data->critical_start) {
  1203. seq_puts(m, " => started at: ");
  1204. seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags);
  1205. trace_print_seq(m, &iter->seq);
  1206. seq_puts(m, "\n => ended at: ");
  1207. seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags);
  1208. trace_print_seq(m, &iter->seq);
  1209. seq_puts(m, "\n");
  1210. }
  1211. seq_puts(m, "\n");
  1212. }
  1213. static void test_cpu_buff_start(struct trace_iterator *iter)
  1214. {
  1215. struct trace_seq *s = &iter->seq;
  1216. if (!(trace_flags & TRACE_ITER_ANNOTATE))
  1217. return;
  1218. if (!(iter->iter_flags & TRACE_FILE_ANNOTATE))
  1219. return;
  1220. if (cpumask_test_cpu(iter->cpu, iter->started))
  1221. return;
  1222. cpumask_set_cpu(iter->cpu, iter->started);
  1223. trace_seq_printf(s, "##### CPU %u buffer started ####\n", iter->cpu);
  1224. }
  1225. static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
  1226. {
  1227. struct trace_seq *s = &iter->seq;
  1228. unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
  1229. struct trace_entry *entry;
  1230. struct trace_event *event;
  1231. entry = iter->ent;
  1232. test_cpu_buff_start(iter);
  1233. event = ftrace_find_event(entry->type);
  1234. if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
  1235. if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
  1236. if (!trace_print_lat_context(iter))
  1237. goto partial;
  1238. } else {
  1239. if (!trace_print_context(iter))
  1240. goto partial;
  1241. }
  1242. }
  1243. if (event)
  1244. return event->trace(iter, sym_flags);
  1245. if (!trace_seq_printf(s, "Unknown type %d\n", entry->type))
  1246. goto partial;
  1247. return TRACE_TYPE_HANDLED;
  1248. partial:
  1249. return TRACE_TYPE_PARTIAL_LINE;
  1250. }
  1251. static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
  1252. {
  1253. struct trace_seq *s = &iter->seq;
  1254. struct trace_entry *entry;
  1255. struct trace_event *event;
  1256. entry = iter->ent;
  1257. if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
  1258. if (!trace_seq_printf(s, "%d %d %llu ",
  1259. entry->pid, iter->cpu, iter->ts))
  1260. goto partial;
  1261. }
  1262. event = ftrace_find_event(entry->type);
  1263. if (event)
  1264. return event->raw(iter, 0);
  1265. if (!trace_seq_printf(s, "%d ?\n", entry->type))
  1266. goto partial;
  1267. return TRACE_TYPE_HANDLED;
  1268. partial:
  1269. return TRACE_TYPE_PARTIAL_LINE;
  1270. }
  1271. static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
  1272. {
  1273. struct trace_seq *s = &iter->seq;
  1274. unsigned char newline = '\n';
  1275. struct trace_entry *entry;
  1276. struct trace_event *event;
  1277. entry = iter->ent;
  1278. if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
  1279. SEQ_PUT_HEX_FIELD_RET(s, entry->pid);
  1280. SEQ_PUT_HEX_FIELD_RET(s, iter->cpu);
  1281. SEQ_PUT_HEX_FIELD_RET(s, iter->ts);
  1282. }
  1283. event = ftrace_find_event(entry->type);
  1284. if (event) {
  1285. enum print_line_t ret = event->hex(iter, 0);
  1286. if (ret != TRACE_TYPE_HANDLED)
  1287. return ret;
  1288. }
  1289. SEQ_PUT_FIELD_RET(s, newline);
  1290. return TRACE_TYPE_HANDLED;
  1291. }
  1292. static enum print_line_t print_printk_msg_only(struct trace_iterator *iter)
  1293. {
  1294. struct trace_seq *s = &iter->seq;
  1295. struct trace_entry *entry = iter->ent;
  1296. struct print_entry *field;
  1297. int ret;
  1298. trace_assign_type(field, entry);
  1299. ret = trace_seq_printf(s, "%s", field->buf);
  1300. if (!ret)
  1301. return TRACE_TYPE_PARTIAL_LINE;
  1302. return TRACE_TYPE_HANDLED;
  1303. }
  1304. static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
  1305. {
  1306. struct trace_seq *s = &iter->seq;
  1307. struct trace_entry *entry;
  1308. struct trace_event *event;
  1309. entry = iter->ent;
  1310. if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
  1311. SEQ_PUT_FIELD_RET(s, entry->pid);
  1312. SEQ_PUT_FIELD_RET(s, iter->cpu);
  1313. SEQ_PUT_FIELD_RET(s, iter->ts);
  1314. }
  1315. event = ftrace_find_event(entry->type);
  1316. return event ? event->binary(iter, 0) : TRACE_TYPE_HANDLED;
  1317. }
  1318. static int trace_empty(struct trace_iterator *iter)
  1319. {
  1320. int cpu;
  1321. for_each_tracing_cpu(cpu) {
  1322. if (iter->buffer_iter[cpu]) {
  1323. if (!ring_buffer_iter_empty(iter->buffer_iter[cpu]))
  1324. return 0;
  1325. } else {
  1326. if (!ring_buffer_empty_cpu(iter->tr->buffer, cpu))
  1327. return 0;
  1328. }
  1329. }
  1330. return 1;
  1331. }
  1332. static enum print_line_t print_trace_line(struct trace_iterator *iter)
  1333. {
  1334. enum print_line_t ret;
  1335. if (iter->trace && iter->trace->print_line) {
  1336. ret = iter->trace->print_line(iter);
  1337. if (ret != TRACE_TYPE_UNHANDLED)
  1338. return ret;
  1339. }
  1340. if (iter->ent->type == TRACE_PRINT &&
  1341. trace_flags & TRACE_ITER_PRINTK &&
  1342. trace_flags & TRACE_ITER_PRINTK_MSGONLY)
  1343. return print_printk_msg_only(iter);
  1344. if (trace_flags & TRACE_ITER_BIN)
  1345. return print_bin_fmt(iter);
  1346. if (trace_flags & TRACE_ITER_HEX)
  1347. return print_hex_fmt(iter);
  1348. if (trace_flags & TRACE_ITER_RAW)
  1349. return print_raw_fmt(iter);
  1350. return print_trace_fmt(iter);
  1351. }
  1352. static int s_show(struct seq_file *m, void *v)
  1353. {
  1354. struct trace_iterator *iter = v;
  1355. if (iter->ent == NULL) {
  1356. if (iter->tr) {
  1357. seq_printf(m, "# tracer: %s\n", iter->trace->name);
  1358. seq_puts(m, "#\n");
  1359. }
  1360. if (iter->trace && iter->trace->print_header)
  1361. iter->trace->print_header(m);
  1362. else if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
  1363. /* print nothing if the buffers are empty */
  1364. if (trace_empty(iter))
  1365. return 0;
  1366. print_trace_header(m, iter);
  1367. if (!(trace_flags & TRACE_ITER_VERBOSE))
  1368. print_lat_help_header(m);
  1369. } else {
  1370. if (!(trace_flags & TRACE_ITER_VERBOSE))
  1371. print_func_help_header(m);
  1372. }
  1373. } else {
  1374. print_trace_line(iter);
  1375. trace_print_seq(m, &iter->seq);
  1376. }
  1377. return 0;
  1378. }
  1379. static struct seq_operations tracer_seq_ops = {
  1380. .start = s_start,
  1381. .next = s_next,
  1382. .stop = s_stop,
  1383. .show = s_show,
  1384. };
  1385. static struct trace_iterator *
  1386. __tracing_open(struct inode *inode, struct file *file)
  1387. {
  1388. long cpu_file = (long) inode->i_private;
  1389. void *fail_ret = ERR_PTR(-ENOMEM);
  1390. struct trace_iterator *iter;
  1391. struct seq_file *m;
  1392. int cpu, ret;
  1393. if (tracing_disabled)
  1394. return ERR_PTR(-ENODEV);
  1395. iter = kzalloc(sizeof(*iter), GFP_KERNEL);
  1396. if (!iter)
  1397. return ERR_PTR(-ENOMEM);
  1398. /*
  1399. * We make a copy of the current tracer to avoid concurrent
  1400. * changes on it while we are reading.
  1401. */
  1402. mutex_lock(&trace_types_lock);
  1403. iter->trace = kzalloc(sizeof(*iter->trace), GFP_KERNEL);
  1404. if (!iter->trace)
  1405. goto fail;
  1406. if (current_trace)
  1407. *iter->trace = *current_trace;
  1408. if (current_trace && current_trace->print_max)
  1409. iter->tr = &max_tr;
  1410. else
  1411. iter->tr = &global_trace;
  1412. iter->pos = -1;
  1413. mutex_init(&iter->mutex);
  1414. iter->cpu_file = cpu_file;
  1415. /* Notify the tracer early; before we stop tracing. */
  1416. if (iter->trace && iter->trace->open)
  1417. iter->trace->open(iter);
  1418. /* Annotate start of buffers if we had overruns */
  1419. if (ring_buffer_overruns(iter->tr->buffer))
  1420. iter->iter_flags |= TRACE_FILE_ANNOTATE;
  1421. if (iter->cpu_file == TRACE_PIPE_ALL_CPU) {
  1422. for_each_tracing_cpu(cpu) {
  1423. iter->buffer_iter[cpu] =
  1424. ring_buffer_read_start(iter->tr->buffer, cpu);
  1425. if (!iter->buffer_iter[cpu])
  1426. goto fail_buffer;
  1427. }
  1428. } else {
  1429. cpu = iter->cpu_file;
  1430. iter->buffer_iter[cpu] =
  1431. ring_buffer_read_start(iter->tr->buffer, cpu);
  1432. if (!iter->buffer_iter[cpu])
  1433. goto fail;
  1434. }
  1435. /* TODO stop tracer */
  1436. ret = seq_open(file, &tracer_seq_ops);
  1437. if (ret < 0) {
  1438. fail_ret = ERR_PTR(ret);
  1439. goto fail_buffer;
  1440. }
  1441. m = file->private_data;
  1442. m->private = iter;
  1443. /* stop the trace while dumping */
  1444. tracing_stop();
  1445. mutex_unlock(&trace_types_lock);
  1446. return iter;
  1447. fail_buffer:
  1448. for_each_tracing_cpu(cpu) {
  1449. if (iter->buffer_iter[cpu])
  1450. ring_buffer_read_finish(iter->buffer_iter[cpu]);
  1451. }
  1452. fail:
  1453. mutex_unlock(&trace_types_lock);
  1454. kfree(iter->trace);
  1455. kfree(iter);
  1456. return fail_ret;
  1457. }
  1458. int tracing_open_generic(struct inode *inode, struct file *filp)
  1459. {
  1460. if (tracing_disabled)
  1461. return -ENODEV;
  1462. filp->private_data = inode->i_private;
  1463. return 0;
  1464. }
  1465. static int tracing_release(struct inode *inode, struct file *file)
  1466. {
  1467. struct seq_file *m = (struct seq_file *)file->private_data;
  1468. struct trace_iterator *iter = m->private;
  1469. int cpu;
  1470. mutex_lock(&trace_types_lock);
  1471. for_each_tracing_cpu(cpu) {
  1472. if (iter->buffer_iter[cpu])
  1473. ring_buffer_read_finish(iter->buffer_iter[cpu]);
  1474. }
  1475. if (iter->trace && iter->trace->close)
  1476. iter->trace->close(iter);
  1477. /* reenable tracing if it was previously enabled */
  1478. tracing_start();
  1479. mutex_unlock(&trace_types_lock);
  1480. seq_release(inode, file);
  1481. mutex_destroy(&iter->mutex);
  1482. kfree(iter->trace);
  1483. kfree(iter);
  1484. return 0;
  1485. }
  1486. static int tracing_open(struct inode *inode, struct file *file)
  1487. {
  1488. struct trace_iterator *iter;
  1489. int ret = 0;
  1490. iter = __tracing_open(inode, file);
  1491. if (IS_ERR(iter))
  1492. ret = PTR_ERR(iter);
  1493. else if (trace_flags & TRACE_ITER_LATENCY_FMT)
  1494. iter->iter_flags |= TRACE_FILE_LAT_FMT;
  1495. return ret;
  1496. }
  1497. static void *
  1498. t_next(struct seq_file *m, void *v, loff_t *pos)
  1499. {
  1500. struct tracer *t = m->private;
  1501. (*pos)++;
  1502. if (t)
  1503. t = t->next;
  1504. m->private = t;
  1505. return t;
  1506. }
  1507. static void *t_start(struct seq_file *m, loff_t *pos)
  1508. {
  1509. struct tracer *t = m->private;
  1510. loff_t l = 0;
  1511. mutex_lock(&trace_types_lock);
  1512. for (; t && l < *pos; t = t_next(m, t, &l))
  1513. ;
  1514. return t;
  1515. }
  1516. static void t_stop(struct seq_file *m, void *p)
  1517. {
  1518. mutex_unlock(&trace_types_lock);
  1519. }
  1520. static int t_show(struct seq_file *m, void *v)
  1521. {
  1522. struct tracer *t = v;
  1523. if (!t)
  1524. return 0;
  1525. seq_printf(m, "%s", t->name);
  1526. if (t->next)
  1527. seq_putc(m, ' ');
  1528. else
  1529. seq_putc(m, '\n');
  1530. return 0;
  1531. }
  1532. static struct seq_operations show_traces_seq_ops = {
  1533. .start = t_start,
  1534. .next = t_next,
  1535. .stop = t_stop,
  1536. .show = t_show,
  1537. };
  1538. static int show_traces_open(struct inode *inode, struct file *file)
  1539. {
  1540. int ret;
  1541. if (tracing_disabled)
  1542. return -ENODEV;
  1543. ret = seq_open(file, &show_traces_seq_ops);
  1544. if (!ret) {
  1545. struct seq_file *m = file->private_data;
  1546. m->private = trace_types;
  1547. }
  1548. return ret;
  1549. }
  1550. static const struct file_operations tracing_fops = {
  1551. .open = tracing_open,
  1552. .read = seq_read,
  1553. .llseek = seq_lseek,
  1554. .release = tracing_release,
  1555. };
  1556. static const struct file_operations show_traces_fops = {
  1557. .open = show_traces_open,
  1558. .read = seq_read,
  1559. .release = seq_release,
  1560. };
  1561. /*
  1562. * Only trace on a CPU if the bitmask is set:
  1563. */
  1564. static cpumask_var_t tracing_cpumask;
  1565. /*
  1566. * The tracer itself will not take this lock, but still we want
  1567. * to provide a consistent cpumask to user-space:
  1568. */
  1569. static DEFINE_MUTEX(tracing_cpumask_update_lock);
  1570. /*
  1571. * Temporary storage for the character representation of the
  1572. * CPU bitmask (and one more byte for the newline):
  1573. */
  1574. static char mask_str[NR_CPUS + 1];
  1575. static ssize_t
  1576. tracing_cpumask_read(struct file *filp, char __user *ubuf,
  1577. size_t count, loff_t *ppos)
  1578. {
  1579. int len;
  1580. mutex_lock(&tracing_cpumask_update_lock);
  1581. len = cpumask_scnprintf(mask_str, count, tracing_cpumask);
  1582. if (count - len < 2) {
  1583. count = -EINVAL;
  1584. goto out_err;
  1585. }
  1586. len += sprintf(mask_str + len, "\n");
  1587. count = simple_read_from_buffer(ubuf, count, ppos, mask_str, NR_CPUS+1);
  1588. out_err:
  1589. mutex_unlock(&tracing_cpumask_update_lock);
  1590. return count;
  1591. }
  1592. static ssize_t
  1593. tracing_cpumask_write(struct file *filp, const char __user *ubuf,
  1594. size_t count, loff_t *ppos)
  1595. {
  1596. int err, cpu;
  1597. cpumask_var_t tracing_cpumask_new;
  1598. if (!alloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
  1599. return -ENOMEM;
  1600. mutex_lock(&tracing_cpumask_update_lock);
  1601. err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
  1602. if (err)
  1603. goto err_unlock;
  1604. local_irq_disable();
  1605. __raw_spin_lock(&ftrace_max_lock);
  1606. for_each_tracing_cpu(cpu) {
  1607. /*
  1608. * Increase/decrease the disabled counter if we are
  1609. * about to flip a bit in the cpumask:
  1610. */
  1611. if (cpumask_test_cpu(cpu, tracing_cpumask) &&
  1612. !cpumask_test_cpu(cpu, tracing_cpumask_new)) {
  1613. atomic_inc(&global_trace.data[cpu]->disabled);
  1614. }
  1615. if (!cpumask_test_cpu(cpu, tracing_cpumask) &&
  1616. cpumask_test_cpu(cpu, tracing_cpumask_new)) {
  1617. atomic_dec(&global_trace.data[cpu]->disabled);
  1618. }
  1619. }
  1620. __raw_spin_unlock(&ftrace_max_lock);
  1621. local_irq_enable();
  1622. cpumask_copy(tracing_cpumask, tracing_cpumask_new);
  1623. mutex_unlock(&tracing_cpumask_update_lock);
  1624. free_cpumask_var(tracing_cpumask_new);
  1625. return count;
  1626. err_unlock:
  1627. mutex_unlock(&tracing_cpumask_update_lock);
  1628. free_cpumask_var(tracing_cpumask);
  1629. return err;
  1630. }
  1631. static const struct file_operations tracing_cpumask_fops = {
  1632. .open = tracing_open_generic,
  1633. .read = tracing_cpumask_read,
  1634. .write = tracing_cpumask_write,
  1635. };
  1636. static ssize_t
  1637. tracing_trace_options_read(struct file *filp, char __user *ubuf,
  1638. size_t cnt, loff_t *ppos)
  1639. {
  1640. struct tracer_opt *trace_opts;
  1641. u32 tracer_flags;
  1642. int len = 0;
  1643. char *buf;
  1644. int r = 0;
  1645. int i;
  1646. /* calculate max size */
  1647. for (i = 0; trace_options[i]; i++) {
  1648. len += strlen(trace_options[i]);
  1649. len += 3; /* "no" and newline */
  1650. }
  1651. mutex_lock(&trace_types_lock);
  1652. tracer_flags = current_trace->flags->val;
  1653. trace_opts = current_trace->flags->opts;
  1654. /*
  1655. * Increase the size with names of options specific
  1656. * of the current tracer.
  1657. */
  1658. for (i = 0; trace_opts[i].name; i++) {
  1659. len += strlen(trace_opts[i].name);
  1660. len += 3; /* "no" and newline */
  1661. }
  1662. /* +2 for \n and \0 */
  1663. buf = kmalloc(len + 2, GFP_KERNEL);
  1664. if (!buf) {
  1665. mutex_unlock(&trace_types_lock);
  1666. return -ENOMEM;
  1667. }
  1668. for (i = 0; trace_options[i]; i++) {
  1669. if (trace_flags & (1 << i))
  1670. r += sprintf(buf + r, "%s\n", trace_options[i]);
  1671. else
  1672. r += sprintf(buf + r, "no%s\n", trace_options[i]);
  1673. }
  1674. for (i = 0; trace_opts[i].name; i++) {
  1675. if (tracer_flags & trace_opts[i].bit)
  1676. r += sprintf(buf + r, "%s\n",
  1677. trace_opts[i].name);
  1678. else
  1679. r += sprintf(buf + r, "no%s\n",
  1680. trace_opts[i].name);
  1681. }
  1682. mutex_unlock(&trace_types_lock);
  1683. WARN_ON(r >= len + 2);
  1684. r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
  1685. kfree(buf);
  1686. return r;
  1687. }
  1688. /* Try to assign a tracer specific option */
  1689. static int set_tracer_option(struct tracer *trace, char *cmp, int neg)
  1690. {
  1691. struct tracer_flags *trace_flags = trace->flags;
  1692. struct tracer_opt *opts = NULL;
  1693. int ret = 0, i = 0;
  1694. int len;
  1695. for (i = 0; trace_flags->opts[i].name; i++) {
  1696. opts = &trace_flags->opts[i];
  1697. len = strlen(opts->name);
  1698. if (strncmp(cmp, opts->name, len) == 0) {
  1699. ret = trace->set_flag(trace_flags->val,
  1700. opts->bit, !neg);
  1701. break;
  1702. }
  1703. }
  1704. /* Not found */
  1705. if (!trace_flags->opts[i].name)
  1706. return -EINVAL;
  1707. /* Refused to handle */
  1708. if (ret)
  1709. return ret;
  1710. if (neg)
  1711. trace_flags->val &= ~opts->bit;
  1712. else
  1713. trace_flags->val |= opts->bit;
  1714. return 0;
  1715. }
  1716. static ssize_t
  1717. tracing_trace_options_write(struct file *filp, const char __user *ubuf,
  1718. size_t cnt, loff_t *ppos)
  1719. {
  1720. char buf[64];
  1721. char *cmp = buf;
  1722. int neg = 0;
  1723. int ret;
  1724. int i;
  1725. if (cnt >= sizeof(buf))
  1726. return -EINVAL;
  1727. if (copy_from_user(&buf, ubuf, cnt))
  1728. return -EFAULT;
  1729. buf[cnt] = 0;
  1730. if (strncmp(buf, "no", 2) == 0) {
  1731. neg = 1;
  1732. cmp += 2;
  1733. }
  1734. for (i = 0; trace_options[i]; i++) {
  1735. int len = strlen(trace_options[i]);
  1736. if (strncmp(cmp, trace_options[i], len) == 0) {
  1737. if (neg)
  1738. trace_flags &= ~(1 << i);
  1739. else
  1740. trace_flags |= (1 << i);
  1741. break;
  1742. }
  1743. }
  1744. /* If no option could be set, test the specific tracer options */
  1745. if (!trace_options[i]) {
  1746. mutex_lock(&trace_types_lock);
  1747. ret = set_tracer_option(current_trace, cmp, neg);
  1748. mutex_unlock(&trace_types_lock);
  1749. if (ret)
  1750. return ret;
  1751. }
  1752. filp->f_pos += cnt;
  1753. return cnt;
  1754. }
  1755. static const struct file_operations tracing_iter_fops = {
  1756. .open = tracing_open_generic,
  1757. .read = tracing_trace_options_read,
  1758. .write = tracing_trace_options_write,
  1759. };
  1760. static const char readme_msg[] =
  1761. "tracing mini-HOWTO:\n\n"
  1762. "# mkdir /debug\n"
  1763. "# mount -t debugfs nodev /debug\n\n"
  1764. "# cat /debug/tracing/available_tracers\n"
  1765. "wakeup preemptirqsoff preemptoff irqsoff ftrace sched_switch none\n\n"
  1766. "# cat /debug/tracing/current_tracer\n"
  1767. "none\n"
  1768. "# echo sched_switch > /debug/tracing/current_tracer\n"
  1769. "# cat /debug/tracing/current_tracer\n"
  1770. "sched_switch\n"
  1771. "# cat /debug/tracing/trace_options\n"
  1772. "noprint-parent nosym-offset nosym-addr noverbose\n"
  1773. "# echo print-parent > /debug/tracing/trace_options\n"
  1774. "# echo 1 > /debug/tracing/tracing_enabled\n"
  1775. "# cat /debug/tracing/trace > /tmp/trace.txt\n"
  1776. "echo 0 > /debug/tracing/tracing_enabled\n"
  1777. ;
  1778. static ssize_t
  1779. tracing_readme_read(struct file *filp, char __user *ubuf,
  1780. size_t cnt, loff_t *ppos)
  1781. {
  1782. return simple_read_from_buffer(ubuf, cnt, ppos,
  1783. readme_msg, strlen(readme_msg));
  1784. }
  1785. static const struct file_operations tracing_readme_fops = {
  1786. .open = tracing_open_generic,
  1787. .read = tracing_readme_read,
  1788. };
  1789. static ssize_t
  1790. tracing_ctrl_read(struct file *filp, char __user *ubuf,
  1791. size_t cnt, loff_t *ppos)
  1792. {
  1793. char buf[64];
  1794. int r;
  1795. r = sprintf(buf, "%u\n", tracer_enabled);
  1796. return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
  1797. }
  1798. static ssize_t
  1799. tracing_ctrl_write(struct file *filp, const char __user *ubuf,
  1800. size_t cnt, loff_t *ppos)
  1801. {
  1802. struct trace_array *tr = filp->private_data;
  1803. char buf[64];
  1804. unsigned long val;
  1805. int ret;
  1806. if (cnt >= sizeof(buf))
  1807. return -EINVAL;
  1808. if (copy_from_user(&buf, ubuf, cnt))
  1809. return -EFAULT;
  1810. buf[cnt] = 0;
  1811. ret = strict_strtoul(buf, 10, &val);
  1812. if (ret < 0)
  1813. return ret;
  1814. val = !!val;
  1815. mutex_lock(&trace_types_lock);
  1816. if (tracer_enabled ^ val) {
  1817. if (val) {
  1818. tracer_enabled = 1;
  1819. if (current_trace->start)
  1820. current_trace->start(tr);
  1821. tracing_start();
  1822. } else {
  1823. tracer_enabled = 0;
  1824. tracing_stop();
  1825. if (current_trace->stop)
  1826. current_trace->stop(tr);
  1827. }
  1828. }
  1829. mutex_unlock(&trace_types_lock);
  1830. filp->f_pos += cnt;
  1831. return cnt;
  1832. }
  1833. static ssize_t
  1834. tracing_set_trace_read(struct file *filp, char __user *ubuf,
  1835. size_t cnt, loff_t *ppos)
  1836. {
  1837. char buf[max_tracer_type_len+2];
  1838. int r;
  1839. mutex_lock(&trace_types_lock);
  1840. if (current_trace)
  1841. r = sprintf(buf, "%s\n", current_trace->name);
  1842. else
  1843. r = sprintf(buf, "\n");
  1844. mutex_unlock(&trace_types_lock);
  1845. return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
  1846. }
  1847. int tracer_init(struct tracer *t, struct trace_array *tr)
  1848. {
  1849. tracing_reset_online_cpus(tr);
  1850. return t->init(tr);
  1851. }
  1852. struct trace_option_dentry;
  1853. static struct trace_option_dentry *
  1854. create_trace_option_files(struct tracer *tracer);
  1855. static void
  1856. destroy_trace_option_files(struct trace_option_dentry *topts);
  1857. static int tracing_set_tracer(const char *buf)
  1858. {
  1859. static struct trace_option_dentry *topts;
  1860. struct trace_array *tr = &global_trace;
  1861. struct tracer *t;
  1862. int ret = 0;
  1863. mutex_lock(&trace_types_lock);
  1864. for (t = trace_types; t; t = t->next) {
  1865. if (strcmp(t->name, buf) == 0)
  1866. break;
  1867. }
  1868. if (!t) {
  1869. ret = -EINVAL;
  1870. goto out;
  1871. }
  1872. if (t == current_trace)
  1873. goto out;
  1874. trace_branch_disable();
  1875. if (current_trace && current_trace->reset)
  1876. current_trace->reset(tr);
  1877. destroy_trace_option_files(topts);
  1878. current_trace = t;
  1879. topts = create_trace_option_files(current_trace);
  1880. if (t->init) {
  1881. ret = tracer_init(t, tr);
  1882. if (ret)
  1883. goto out;
  1884. }
  1885. trace_branch_enable(tr);
  1886. out:
  1887. mutex_unlock(&trace_types_lock);
  1888. return ret;
  1889. }
  1890. static ssize_t
  1891. tracing_set_trace_write(struct file *filp, const char __user *ubuf,
  1892. size_t cnt, loff_t *ppos)
  1893. {
  1894. char buf[max_tracer_type_len+1];
  1895. int i;
  1896. size_t ret;
  1897. int err;
  1898. ret = cnt;
  1899. if (cnt > max_tracer_type_len)
  1900. cnt = max_tracer_type_len;
  1901. if (copy_from_user(&buf, ubuf, cnt))
  1902. return -EFAULT;
  1903. buf[cnt] = 0;
  1904. /* strip ending whitespace. */
  1905. for (i = cnt - 1; i > 0 && isspace(buf[i]); i--)
  1906. buf[i] = 0;
  1907. err = tracing_set_tracer(buf);
  1908. if (err)
  1909. return err;
  1910. filp->f_pos += ret;
  1911. return ret;
  1912. }
  1913. static ssize_t
  1914. tracing_max_lat_read(struct file *filp, char __user *ubuf,
  1915. size_t cnt, loff_t *ppos)
  1916. {
  1917. unsigned long *ptr = filp->private_data;
  1918. char buf[64];
  1919. int r;
  1920. r = snprintf(buf, sizeof(buf), "%ld\n",
  1921. *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr));
  1922. if (r > sizeof(buf))
  1923. r = sizeof(buf);
  1924. return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
  1925. }
  1926. static ssize_t
  1927. tracing_max_lat_write(struct file *filp, const char __user *ubuf,
  1928. size_t cnt, loff_t *ppos)
  1929. {
  1930. unsigned long *ptr = filp->private_data;
  1931. char buf[64];
  1932. unsigned long val;
  1933. int ret;
  1934. if (cnt >= sizeof(buf))
  1935. return -EINVAL;
  1936. if (copy_from_user(&buf, ubuf, cnt))
  1937. return -EFAULT;
  1938. buf[cnt] = 0;
  1939. ret = strict_strtoul(buf, 10, &val);
  1940. if (ret < 0)
  1941. return ret;
  1942. *ptr = val * 1000;
  1943. return cnt;
  1944. }
  1945. static int tracing_open_pipe(struct inode *inode, struct file *filp)
  1946. {
  1947. long cpu_file = (long) inode->i_private;
  1948. struct trace_iterator *iter;
  1949. int ret = 0;
  1950. if (tracing_disabled)
  1951. return -ENODEV;
  1952. mutex_lock(&trace_types_lock);
  1953. /* We only allow one reader per cpu */
  1954. if (cpu_file == TRACE_PIPE_ALL_CPU) {
  1955. if (!cpumask_empty(tracing_reader_cpumask)) {
  1956. ret = -EBUSY;
  1957. goto out;
  1958. }
  1959. cpumask_setall(tracing_reader_cpumask);
  1960. } else {
  1961. if (!cpumask_test_cpu(cpu_file, tracing_reader_cpumask))
  1962. cpumask_set_cpu(cpu_file, tracing_reader_cpumask);
  1963. else {
  1964. ret = -EBUSY;
  1965. goto out;
  1966. }
  1967. }
  1968. /* create a buffer to store the information to pass to userspace */
  1969. iter = kzalloc(sizeof(*iter), GFP_KERNEL);
  1970. if (!iter) {
  1971. ret = -ENOMEM;
  1972. goto out;
  1973. }
  1974. /*
  1975. * We make a copy of the current tracer to avoid concurrent
  1976. * changes on it while we are reading.
  1977. */
  1978. iter->trace = kmalloc(sizeof(*iter->trace), GFP_KERNEL);
  1979. if (!iter->trace) {
  1980. ret = -ENOMEM;
  1981. goto fail;
  1982. }
  1983. if (current_trace)
  1984. *iter->trace = *current_trace;
  1985. if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
  1986. ret = -ENOMEM;
  1987. goto fail;
  1988. }
  1989. /* trace pipe does not show start of buffer */
  1990. cpumask_setall(iter->started);
  1991. iter->cpu_file = cpu_file;
  1992. iter->tr = &global_trace;
  1993. mutex_init(&iter->mutex);
  1994. filp->private_data = iter;
  1995. if (iter->trace->pipe_open)
  1996. iter->trace->pipe_open(iter);
  1997. out:
  1998. mutex_unlock(&trace_types_lock);
  1999. return ret;
  2000. fail:
  2001. kfree(iter->trace);
  2002. kfree(iter);
  2003. mutex_unlock(&trace_types_lock);
  2004. return ret;
  2005. }
  2006. static int tracing_release_pipe(struct inode *inode, struct file *file)
  2007. {
  2008. struct trace_iterator *iter = file->private_data;
  2009. mutex_lock(&trace_types_lock);
  2010. if (iter->cpu_file == TRACE_PIPE_ALL_CPU)
  2011. cpumask_clear(tracing_reader_cpumask);
  2012. else
  2013. cpumask_clear_cpu(iter->cpu_file, tracing_reader_cpumask);
  2014. mutex_unlock(&trace_types_lock);
  2015. free_cpumask_var(iter->started);
  2016. mutex_destroy(&iter->mutex);
  2017. kfree(iter->trace);
  2018. kfree(iter);
  2019. return 0;
  2020. }
  2021. static unsigned int
  2022. tracing_poll_pipe(struct file *filp, poll_table *poll_table)
  2023. {
  2024. struct trace_iterator *iter = filp->private_data;
  2025. if (trace_flags & TRACE_ITER_BLOCK) {
  2026. /*
  2027. * Always select as readable when in blocking mode
  2028. */
  2029. return POLLIN | POLLRDNORM;
  2030. } else {
  2031. if (!trace_empty(iter))
  2032. return POLLIN | POLLRDNORM;
  2033. poll_wait(filp, &trace_wait, poll_table);
  2034. if (!trace_empty(iter))
  2035. return POLLIN | POLLRDNORM;
  2036. return 0;
  2037. }
  2038. }
  2039. void default_wait_pipe(struct trace_iterator *iter)
  2040. {
  2041. DEFINE_WAIT(wait);
  2042. prepare_to_wait(&trace_wait, &wait, TASK_INTERRUPTIBLE);
  2043. if (trace_empty(iter))
  2044. schedule();
  2045. finish_wait(&trace_wait, &wait);
  2046. }
  2047. /*
  2048. * This is a make-shift waitqueue.
  2049. * A tracer might use this callback on some rare cases:
  2050. *
  2051. * 1) the current tracer might hold the runqueue lock when it wakes up
  2052. * a reader, hence a deadlock (sched, function, and function graph tracers)
  2053. * 2) the function tracers, trace all functions, we don't want
  2054. * the overhead of calling wake_up and friends
  2055. * (and tracing them too)
  2056. *
  2057. * Anyway, this is really very primitive wakeup.
  2058. */
  2059. void poll_wait_pipe(struct trace_iterator *iter)
  2060. {
  2061. set_current_state(TASK_INTERRUPTIBLE);
  2062. /* sleep for 100 msecs, and try again. */
  2063. schedule_timeout(HZ / 10);
  2064. }
  2065. /* Must be called with trace_types_lock mutex held. */
  2066. static int tracing_wait_pipe(struct file *filp)
  2067. {
  2068. struct trace_iterator *iter = filp->private_data;
  2069. while (trace_empty(iter)) {
  2070. if ((filp->f_flags & O_NONBLOCK)) {
  2071. return -EAGAIN;
  2072. }
  2073. mutex_unlock(&iter->mutex);
  2074. iter->trace->wait_pipe(iter);
  2075. mutex_lock(&iter->mutex);
  2076. if (signal_pending(current))
  2077. return -EINTR;
  2078. /*
  2079. * We block until we read something and tracing is disabled.
  2080. * We still block if tracing is disabled, but we have never
  2081. * read anything. This allows a user to cat this file, and
  2082. * then enable tracing. But after we have read something,
  2083. * we give an EOF when tracing is again disabled.
  2084. *
  2085. * iter->pos will be 0 if we haven't read anything.
  2086. */
  2087. if (!tracer_enabled && iter->pos)
  2088. break;
  2089. }
  2090. return 1;
  2091. }
  2092. /*
  2093. * Consumer reader.
  2094. */
  2095. static ssize_t
  2096. tracing_read_pipe(struct file *filp, char __user *ubuf,
  2097. size_t cnt, loff_t *ppos)
  2098. {
  2099. struct trace_iterator *iter = filp->private_data;
  2100. static struct tracer *old_tracer;
  2101. ssize_t sret;
  2102. /* return any leftover data */
  2103. sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
  2104. if (sret != -EBUSY)
  2105. return sret;
  2106. trace_seq_init(&iter->seq);
  2107. /* copy the tracer to avoid using a global lock all around */
  2108. mutex_lock(&trace_types_lock);
  2109. if (unlikely(old_tracer != current_trace && current_trace)) {
  2110. old_tracer = current_trace;
  2111. *iter->trace = *current_trace;
  2112. }
  2113. mutex_unlock(&trace_types_lock);
  2114. /*
  2115. * Avoid more than one consumer on a single file descriptor
  2116. * This is just a matter of traces coherency, the ring buffer itself
  2117. * is protected.
  2118. */
  2119. mutex_lock(&iter->mutex);
  2120. if (iter->trace->read) {
  2121. sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
  2122. if (sret)
  2123. goto out;
  2124. }
  2125. waitagain:
  2126. sret = tracing_wait_pipe(filp);
  2127. if (sret <= 0)
  2128. goto out;
  2129. /* stop when tracing is finished */
  2130. if (trace_empty(iter)) {
  2131. sret = 0;
  2132. goto out;
  2133. }
  2134. if (cnt >= PAGE_SIZE)
  2135. cnt = PAGE_SIZE - 1;
  2136. /* reset all but tr, trace, and overruns */
  2137. memset(&iter->seq, 0,
  2138. sizeof(struct trace_iterator) -
  2139. offsetof(struct trace_iterator, seq));
  2140. iter->pos = -1;
  2141. while (find_next_entry_inc(iter) != NULL) {
  2142. enum print_line_t ret;
  2143. int len = iter->seq.len;
  2144. ret = print_trace_line(iter);
  2145. if (ret == TRACE_TYPE_PARTIAL_LINE) {
  2146. /* don't print partial lines */
  2147. iter->seq.len = len;
  2148. break;
  2149. }
  2150. if (ret != TRACE_TYPE_NO_CONSUME)
  2151. trace_consume(iter);
  2152. if (iter->seq.len >= cnt)
  2153. break;
  2154. }
  2155. /* Now copy what we have to the user */
  2156. sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
  2157. if (iter->seq.readpos >= iter->seq.len)
  2158. trace_seq_init(&iter->seq);
  2159. /*
  2160. * If there was nothing to send to user, inspite of consuming trace
  2161. * entries, go back to wait for more entries.
  2162. */
  2163. if (sret == -EBUSY)
  2164. goto waitagain;
  2165. out:
  2166. mutex_unlock(&iter->mutex);
  2167. return sret;
  2168. }
  2169. static void tracing_pipe_buf_release(struct pipe_inode_info *pipe,
  2170. struct pipe_buffer *buf)
  2171. {
  2172. __free_page(buf->page);
  2173. }
  2174. static void tracing_spd_release_pipe(struct splice_pipe_desc *spd,
  2175. unsigned int idx)
  2176. {
  2177. __free_page(spd->pages[idx]);
  2178. }
  2179. static struct pipe_buf_operations tracing_pipe_buf_ops = {
  2180. .can_merge = 0,
  2181. .map = generic_pipe_buf_map,
  2182. .unmap = generic_pipe_buf_unmap,
  2183. .confirm = generic_pipe_buf_confirm,
  2184. .release = tracing_pipe_buf_release,
  2185. .steal = generic_pipe_buf_steal,
  2186. .get = generic_pipe_buf_get,
  2187. };
  2188. static size_t
  2189. tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter)
  2190. {
  2191. size_t count;
  2192. int ret;
  2193. /* Seq buffer is page-sized, exactly what we need. */
  2194. for (;;) {
  2195. count = iter->seq.len;
  2196. ret = print_trace_line(iter);
  2197. count = iter->seq.len - count;
  2198. if (rem < count) {
  2199. rem = 0;
  2200. iter->seq.len -= count;
  2201. break;
  2202. }
  2203. if (ret == TRACE_TYPE_PARTIAL_LINE) {
  2204. iter->seq.len -= count;
  2205. break;
  2206. }
  2207. trace_consume(iter);
  2208. rem -= count;
  2209. if (!find_next_entry_inc(iter)) {
  2210. rem = 0;
  2211. iter->ent = NULL;
  2212. break;
  2213. }
  2214. }
  2215. return rem;
  2216. }
  2217. static ssize_t tracing_splice_read_pipe(struct file *filp,
  2218. loff_t *ppos,
  2219. struct pipe_inode_info *pipe,
  2220. size_t len,
  2221. unsigned int flags)
  2222. {
  2223. struct page *pages[PIPE_BUFFERS];
  2224. struct partial_page partial[PIPE_BUFFERS];
  2225. struct trace_iterator *iter = filp->private_data;
  2226. struct splice_pipe_desc spd = {
  2227. .pages = pages,
  2228. .partial = partial,
  2229. .nr_pages = 0, /* This gets updated below. */
  2230. .flags = flags,
  2231. .ops = &tracing_pipe_buf_ops,
  2232. .spd_release = tracing_spd_release_pipe,
  2233. };
  2234. static struct tracer *old_tracer;
  2235. ssize_t ret;
  2236. size_t rem;
  2237. unsigned int i;
  2238. /* copy the tracer to avoid using a global lock all around */
  2239. mutex_lock(&trace_types_lock);
  2240. if (unlikely(old_tracer != current_trace && current_trace)) {
  2241. old_tracer = current_trace;
  2242. *iter->trace = *current_trace;
  2243. }
  2244. mutex_unlock(&trace_types_lock);
  2245. mutex_lock(&iter->mutex);
  2246. if (iter->trace->splice_read) {
  2247. ret = iter->trace->splice_read(iter, filp,
  2248. ppos, pipe, len, flags);
  2249. if (ret)
  2250. goto out_err;
  2251. }
  2252. ret = tracing_wait_pipe(filp);
  2253. if (ret <= 0)
  2254. goto out_err;
  2255. if (!iter->ent && !find_next_entry_inc(iter)) {
  2256. ret = -EFAULT;
  2257. goto out_err;
  2258. }
  2259. /* Fill as many pages as possible. */
  2260. for (i = 0, rem = len; i < PIPE_BUFFERS && rem; i++) {
  2261. pages[i] = alloc_page(GFP_KERNEL);
  2262. if (!pages[i])
  2263. break;
  2264. rem = tracing_fill_pipe_page(rem, iter);
  2265. /* Copy the data into the page, so we can start over. */
  2266. ret = trace_seq_to_buffer(&iter->seq,
  2267. page_address(pages[i]),
  2268. iter->seq.len);
  2269. if (ret < 0) {
  2270. __free_page(pages[i]);
  2271. break;
  2272. }
  2273. partial[i].offset = 0;
  2274. partial[i].len = iter->seq.len;
  2275. trace_seq_init(&iter->seq);
  2276. }
  2277. mutex_unlock(&iter->mutex);
  2278. spd.nr_pages = i;
  2279. return splice_to_pipe(pipe, &spd);
  2280. out_err:
  2281. mutex_unlock(&iter->mutex);
  2282. return ret;
  2283. }
  2284. static ssize_t
  2285. tracing_entries_read(struct file *filp, char __user *ubuf,
  2286. size_t cnt, loff_t *ppos)
  2287. {
  2288. struct trace_array *tr = filp->private_data;
  2289. char buf[64];
  2290. int r;
  2291. r = sprintf(buf, "%lu\n", tr->entries >> 10);
  2292. return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
  2293. }
  2294. static ssize_t
  2295. tracing_entries_write(struct file *filp, const char __user *ubuf,
  2296. size_t cnt, loff_t *ppos)
  2297. {
  2298. unsigned long val;
  2299. char buf[64];
  2300. int ret, cpu;
  2301. if (cnt >= sizeof(buf))
  2302. return -EINVAL;
  2303. if (copy_from_user(&buf, ubuf, cnt))
  2304. return -EFAULT;
  2305. buf[cnt] = 0;
  2306. ret = strict_strtoul(buf, 10, &val);
  2307. if (ret < 0)
  2308. return ret;
  2309. /* must have at least 1 entry */
  2310. if (!val)
  2311. return -EINVAL;
  2312. mutex_lock(&trace_types_lock);
  2313. tracing_stop();
  2314. /* disable all cpu buffers */
  2315. for_each_tracing_cpu(cpu) {
  2316. if (global_trace.data[cpu])
  2317. atomic_inc(&global_trace.data[cpu]->disabled);
  2318. if (max_tr.data[cpu])
  2319. atomic_inc(&max_tr.data[cpu]->disabled);
  2320. }
  2321. /* value is in KB */
  2322. val <<= 10;
  2323. if (val != global_trace.entries) {
  2324. ret = ring_buffer_resize(global_trace.buffer, val);
  2325. if (ret < 0) {
  2326. cnt = ret;
  2327. goto out;
  2328. }
  2329. ret = ring_buffer_resize(max_tr.buffer, val);
  2330. if (ret < 0) {
  2331. int r;
  2332. cnt = ret;
  2333. r = ring_buffer_resize(global_trace.buffer,
  2334. global_trace.entries);
  2335. if (r < 0) {
  2336. /* AARGH! We are left with different
  2337. * size max buffer!!!! */
  2338. WARN_ON(1);
  2339. tracing_disabled = 1;
  2340. }
  2341. goto out;
  2342. }
  2343. global_trace.entries = val;
  2344. }
  2345. filp->f_pos += cnt;
  2346. /* If check pages failed, return ENOMEM */
  2347. if (tracing_disabled)
  2348. cnt = -ENOMEM;
  2349. out:
  2350. for_each_tracing_cpu(cpu) {
  2351. if (global_trace.data[cpu])
  2352. atomic_dec(&global_trace.data[cpu]->disabled);
  2353. if (max_tr.data[cpu])
  2354. atomic_dec(&max_tr.data[cpu]->disabled);
  2355. }
  2356. tracing_start();
  2357. max_tr.entries = global_trace.entries;
  2358. mutex_unlock(&trace_types_lock);
  2359. return cnt;
  2360. }
  2361. static int mark_printk(const char *fmt, ...)
  2362. {
  2363. int ret;
  2364. va_list args;
  2365. va_start(args, fmt);
  2366. ret = trace_vprintk(0, -1, fmt, args);
  2367. va_end(args);
  2368. return ret;
  2369. }
  2370. static ssize_t
  2371. tracing_mark_write(struct file *filp, const char __user *ubuf,
  2372. size_t cnt, loff_t *fpos)
  2373. {
  2374. char *buf;
  2375. char *end;
  2376. if (tracing_disabled)
  2377. return -EINVAL;
  2378. if (cnt > TRACE_BUF_SIZE)
  2379. cnt = TRACE_BUF_SIZE;
  2380. buf = kmalloc(cnt + 1, GFP_KERNEL);
  2381. if (buf == NULL)
  2382. return -ENOMEM;
  2383. if (copy_from_user(buf, ubuf, cnt)) {
  2384. kfree(buf);
  2385. return -EFAULT;
  2386. }
  2387. /* Cut from the first nil or newline. */
  2388. buf[cnt] = '\0';
  2389. end = strchr(buf, '\n');
  2390. if (end)
  2391. *end = '\0';
  2392. cnt = mark_printk("%s\n", buf);
  2393. kfree(buf);
  2394. *fpos += cnt;
  2395. return cnt;
  2396. }
  2397. static const struct file_operations tracing_max_lat_fops = {
  2398. .open = tracing_open_generic,
  2399. .read = tracing_max_lat_read,
  2400. .write = tracing_max_lat_write,
  2401. };
  2402. static const struct file_operations tracing_ctrl_fops = {
  2403. .open = tracing_open_generic,
  2404. .read = tracing_ctrl_read,
  2405. .write = tracing_ctrl_write,
  2406. };
  2407. static const struct file_operations set_tracer_fops = {
  2408. .open = tracing_open_generic,
  2409. .read = tracing_set_trace_read,
  2410. .write = tracing_set_trace_write,
  2411. };
  2412. static const struct file_operations tracing_pipe_fops = {
  2413. .open = tracing_open_pipe,
  2414. .poll = tracing_poll_pipe,
  2415. .read = tracing_read_pipe,
  2416. .splice_read = tracing_splice_read_pipe,
  2417. .release = tracing_release_pipe,
  2418. };
  2419. static const struct file_operations tracing_entries_fops = {
  2420. .open = tracing_open_generic,
  2421. .read = tracing_entries_read,
  2422. .write = tracing_entries_write,
  2423. };
  2424. static const struct file_operations tracing_mark_fops = {
  2425. .open = tracing_open_generic,
  2426. .write = tracing_mark_write,
  2427. };
  2428. struct ftrace_buffer_info {
  2429. struct trace_array *tr;
  2430. void *spare;
  2431. int cpu;
  2432. unsigned int read;
  2433. };
  2434. static int tracing_buffers_open(struct inode *inode, struct file *filp)
  2435. {
  2436. int cpu = (int)(long)inode->i_private;
  2437. struct ftrace_buffer_info *info;
  2438. if (tracing_disabled)
  2439. return -ENODEV;
  2440. info = kzalloc(sizeof(*info), GFP_KERNEL);
  2441. if (!info)
  2442. return -ENOMEM;
  2443. info->tr = &global_trace;
  2444. info->cpu = cpu;
  2445. info->spare = ring_buffer_alloc_read_page(info->tr->buffer);
  2446. /* Force reading ring buffer for first read */
  2447. info->read = (unsigned int)-1;
  2448. if (!info->spare)
  2449. goto out;
  2450. filp->private_data = info;
  2451. return 0;
  2452. out:
  2453. kfree(info);
  2454. return -ENOMEM;
  2455. }
  2456. static ssize_t
  2457. tracing_buffers_read(struct file *filp, char __user *ubuf,
  2458. size_t count, loff_t *ppos)
  2459. {
  2460. struct ftrace_buffer_info *info = filp->private_data;
  2461. unsigned int pos;
  2462. ssize_t ret;
  2463. size_t size;
  2464. if (!count)
  2465. return 0;
  2466. /* Do we have previous read data to read? */
  2467. if (info->read < PAGE_SIZE)
  2468. goto read;
  2469. info->read = 0;
  2470. ret = ring_buffer_read_page(info->tr->buffer,
  2471. &info->spare,
  2472. count,
  2473. info->cpu, 0);
  2474. if (ret < 0)
  2475. return 0;
  2476. pos = ring_buffer_page_len(info->spare);
  2477. if (pos < PAGE_SIZE)
  2478. memset(info->spare + pos, 0, PAGE_SIZE - pos);
  2479. read:
  2480. size = PAGE_SIZE - info->read;
  2481. if (size > count)
  2482. size = count;
  2483. ret = copy_to_user(ubuf, info->spare + info->read, size);
  2484. if (ret == size)
  2485. return -EFAULT;
  2486. size -= ret;
  2487. *ppos += size;
  2488. info->read += size;
  2489. return size;
  2490. }
  2491. static int tracing_buffers_release(struct inode *inode, struct file *file)
  2492. {
  2493. struct ftrace_buffer_info *info = file->private_data;
  2494. ring_buffer_free_read_page(info->tr->buffer, info->spare);
  2495. kfree(info);
  2496. return 0;
  2497. }
  2498. struct buffer_ref {
  2499. struct ring_buffer *buffer;
  2500. void *page;
  2501. int ref;
  2502. };
  2503. static void buffer_pipe_buf_release(struct pipe_inode_info *pipe,
  2504. struct pipe_buffer *buf)
  2505. {
  2506. struct buffer_ref *ref = (struct buffer_ref *)buf->private;
  2507. if (--ref->ref)
  2508. return;
  2509. ring_buffer_free_read_page(ref->buffer, ref->page);
  2510. kfree(ref);
  2511. buf->private = 0;
  2512. }
  2513. static int buffer_pipe_buf_steal(struct pipe_inode_info *pipe,
  2514. struct pipe_buffer *buf)
  2515. {
  2516. return 1;
  2517. }
  2518. static void buffer_pipe_buf_get(struct pipe_inode_info *pipe,
  2519. struct pipe_buffer *buf)
  2520. {
  2521. struct buffer_ref *ref = (struct buffer_ref *)buf->private;
  2522. ref->ref++;
  2523. }
  2524. /* Pipe buffer operations for a buffer. */
  2525. static struct pipe_buf_operations buffer_pipe_buf_ops = {
  2526. .can_merge = 0,
  2527. .map = generic_pipe_buf_map,
  2528. .unmap = generic_pipe_buf_unmap,
  2529. .confirm = generic_pipe_buf_confirm,
  2530. .release = buffer_pipe_buf_release,
  2531. .steal = buffer_pipe_buf_steal,
  2532. .get = buffer_pipe_buf_get,
  2533. };
  2534. /*
  2535. * Callback from splice_to_pipe(), if we need to release some pages
  2536. * at the end of the spd in case we error'ed out in filling the pipe.
  2537. */
  2538. static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i)
  2539. {
  2540. struct buffer_ref *ref =
  2541. (struct buffer_ref *)spd->partial[i].private;
  2542. if (--ref->ref)
  2543. return;
  2544. ring_buffer_free_read_page(ref->buffer, ref->page);
  2545. kfree(ref);
  2546. spd->partial[i].private = 0;
  2547. }
  2548. static ssize_t
  2549. tracing_buffers_splice_read(struct file *file, loff_t *ppos,
  2550. struct pipe_inode_info *pipe, size_t len,
  2551. unsigned int flags)
  2552. {
  2553. struct ftrace_buffer_info *info = file->private_data;
  2554. struct partial_page partial[PIPE_BUFFERS];
  2555. struct page *pages[PIPE_BUFFERS];
  2556. struct splice_pipe_desc spd = {
  2557. .pages = pages,
  2558. .partial = partial,
  2559. .flags = flags,
  2560. .ops = &buffer_pipe_buf_ops,
  2561. .spd_release = buffer_spd_release,
  2562. };
  2563. struct buffer_ref *ref;
  2564. int size, i;
  2565. size_t ret;
  2566. /*
  2567. * We can't seek on a buffer input
  2568. */
  2569. if (unlikely(*ppos))
  2570. return -ESPIPE;
  2571. for (i = 0; i < PIPE_BUFFERS && len; i++, len -= size) {
  2572. struct page *page;
  2573. int r;
  2574. ref = kzalloc(sizeof(*ref), GFP_KERNEL);
  2575. if (!ref)
  2576. break;
  2577. ref->buffer = info->tr->buffer;
  2578. ref->page = ring_buffer_alloc_read_page(ref->buffer);
  2579. if (!ref->page) {
  2580. kfree(ref);
  2581. break;
  2582. }
  2583. r = ring_buffer_read_page(ref->buffer, &ref->page,
  2584. len, info->cpu, 0);
  2585. if (r < 0) {
  2586. ring_buffer_free_read_page(ref->buffer,
  2587. ref->page);
  2588. kfree(ref);
  2589. break;
  2590. }
  2591. /*
  2592. * zero out any left over data, this is going to
  2593. * user land.
  2594. */
  2595. size = ring_buffer_page_len(ref->page);
  2596. if (size < PAGE_SIZE)
  2597. memset(ref->page + size, 0, PAGE_SIZE - size);
  2598. page = virt_to_page(ref->page);
  2599. spd.pages[i] = page;
  2600. spd.partial[i].len = PAGE_SIZE;
  2601. spd.partial[i].offset = 0;
  2602. spd.partial[i].private = (unsigned long)ref;
  2603. spd.nr_pages++;
  2604. }
  2605. spd.nr_pages = i;
  2606. /* did we read anything? */
  2607. if (!spd.nr_pages) {
  2608. if (flags & SPLICE_F_NONBLOCK)
  2609. ret = -EAGAIN;
  2610. else
  2611. ret = 0;
  2612. /* TODO: block */
  2613. return ret;
  2614. }
  2615. ret = splice_to_pipe(pipe, &spd);
  2616. return ret;
  2617. }
  2618. static const struct file_operations tracing_buffers_fops = {
  2619. .open = tracing_buffers_open,
  2620. .read = tracing_buffers_read,
  2621. .release = tracing_buffers_release,
  2622. .splice_read = tracing_buffers_splice_read,
  2623. .llseek = no_llseek,
  2624. };
  2625. #ifdef CONFIG_DYNAMIC_FTRACE
  2626. int __weak ftrace_arch_read_dyn_info(char *buf, int size)
  2627. {
  2628. return 0;
  2629. }
  2630. static ssize_t
  2631. tracing_read_dyn_info(struct file *filp, char __user *ubuf,
  2632. size_t cnt, loff_t *ppos)
  2633. {
  2634. static char ftrace_dyn_info_buffer[1024];
  2635. static DEFINE_MUTEX(dyn_info_mutex);
  2636. unsigned long *p = filp->private_data;
  2637. char *buf = ftrace_dyn_info_buffer;
  2638. int size = ARRAY_SIZE(ftrace_dyn_info_buffer);
  2639. int r;
  2640. mutex_lock(&dyn_info_mutex);
  2641. r = sprintf(buf, "%ld ", *p);
  2642. r += ftrace_arch_read_dyn_info(buf+r, (size-1)-r);
  2643. buf[r++] = '\n';
  2644. r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
  2645. mutex_unlock(&dyn_info_mutex);
  2646. return r;
  2647. }
  2648. static const struct file_operations tracing_dyn_info_fops = {
  2649. .open = tracing_open_generic,
  2650. .read = tracing_read_dyn_info,
  2651. };
  2652. #endif
  2653. static struct dentry *d_tracer;
  2654. struct dentry *tracing_init_dentry(void)
  2655. {
  2656. static int once;
  2657. if (d_tracer)
  2658. return d_tracer;
  2659. d_tracer = debugfs_create_dir("tracing", NULL);
  2660. if (!d_tracer && !once) {
  2661. once = 1;
  2662. pr_warning("Could not create debugfs directory 'tracing'\n");
  2663. return NULL;
  2664. }
  2665. return d_tracer;
  2666. }
  2667. static struct dentry *d_percpu;
  2668. struct dentry *tracing_dentry_percpu(void)
  2669. {
  2670. static int once;
  2671. struct dentry *d_tracer;
  2672. if (d_percpu)
  2673. return d_percpu;
  2674. d_tracer = tracing_init_dentry();
  2675. if (!d_tracer)
  2676. return NULL;
  2677. d_percpu = debugfs_create_dir("per_cpu", d_tracer);
  2678. if (!d_percpu && !once) {
  2679. once = 1;
  2680. pr_warning("Could not create debugfs directory 'per_cpu'\n");
  2681. return NULL;
  2682. }
  2683. return d_percpu;
  2684. }
  2685. static void tracing_init_debugfs_percpu(long cpu)
  2686. {
  2687. struct dentry *d_percpu = tracing_dentry_percpu();
  2688. struct dentry *entry, *d_cpu;
  2689. /* strlen(cpu) + MAX(log10(cpu)) + '\0' */
  2690. char cpu_dir[7];
  2691. if (cpu > 999 || cpu < 0)
  2692. return;
  2693. sprintf(cpu_dir, "cpu%ld", cpu);
  2694. d_cpu = debugfs_create_dir(cpu_dir, d_percpu);
  2695. if (!d_cpu) {
  2696. pr_warning("Could not create debugfs '%s' entry\n", cpu_dir);
  2697. return;
  2698. }
  2699. /* per cpu trace_pipe */
  2700. entry = debugfs_create_file("trace_pipe", 0444, d_cpu,
  2701. (void *) cpu, &tracing_pipe_fops);
  2702. if (!entry)
  2703. pr_warning("Could not create debugfs 'trace_pipe' entry\n");
  2704. /* per cpu trace */
  2705. entry = debugfs_create_file("trace", 0444, d_cpu,
  2706. (void *) cpu, &tracing_fops);
  2707. if (!entry)
  2708. pr_warning("Could not create debugfs 'trace' entry\n");
  2709. }
  2710. #ifdef CONFIG_FTRACE_SELFTEST
  2711. /* Let selftest have access to static functions in this file */
  2712. #include "trace_selftest.c"
  2713. #endif
  2714. struct trace_option_dentry {
  2715. struct tracer_opt *opt;
  2716. struct tracer_flags *flags;
  2717. struct dentry *entry;
  2718. };
  2719. static ssize_t
  2720. trace_options_read(struct file *filp, char __user *ubuf, size_t cnt,
  2721. loff_t *ppos)
  2722. {
  2723. struct trace_option_dentry *topt = filp->private_data;
  2724. char *buf;
  2725. if (topt->flags->val & topt->opt->bit)
  2726. buf = "1\n";
  2727. else
  2728. buf = "0\n";
  2729. return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
  2730. }
  2731. static ssize_t
  2732. trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt,
  2733. loff_t *ppos)
  2734. {
  2735. struct trace_option_dentry *topt = filp->private_data;
  2736. unsigned long val;
  2737. char buf[64];
  2738. int ret;
  2739. if (cnt >= sizeof(buf))
  2740. return -EINVAL;
  2741. if (copy_from_user(&buf, ubuf, cnt))
  2742. return -EFAULT;
  2743. buf[cnt] = 0;
  2744. ret = strict_strtoul(buf, 10, &val);
  2745. if (ret < 0)
  2746. return ret;
  2747. ret = 0;
  2748. switch (val) {
  2749. case 0:
  2750. /* do nothing if already cleared */
  2751. if (!(topt->flags->val & topt->opt->bit))
  2752. break;
  2753. mutex_lock(&trace_types_lock);
  2754. if (current_trace->set_flag)
  2755. ret = current_trace->set_flag(topt->flags->val,
  2756. topt->opt->bit, 0);
  2757. mutex_unlock(&trace_types_lock);
  2758. if (ret)
  2759. return ret;
  2760. topt->flags->val &= ~topt->opt->bit;
  2761. break;
  2762. case 1:
  2763. /* do nothing if already set */
  2764. if (topt->flags->val & topt->opt->bit)
  2765. break;
  2766. mutex_lock(&trace_types_lock);
  2767. if (current_trace->set_flag)
  2768. ret = current_trace->set_flag(topt->flags->val,
  2769. topt->opt->bit, 1);
  2770. mutex_unlock(&trace_types_lock);
  2771. if (ret)
  2772. return ret;
  2773. topt->flags->val |= topt->opt->bit;
  2774. break;
  2775. default:
  2776. return -EINVAL;
  2777. }
  2778. *ppos += cnt;
  2779. return cnt;
  2780. }
  2781. static const struct file_operations trace_options_fops = {
  2782. .open = tracing_open_generic,
  2783. .read = trace_options_read,
  2784. .write = trace_options_write,
  2785. };
  2786. static ssize_t
  2787. trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt,
  2788. loff_t *ppos)
  2789. {
  2790. long index = (long)filp->private_data;
  2791. char *buf;
  2792. if (trace_flags & (1 << index))
  2793. buf = "1\n";
  2794. else
  2795. buf = "0\n";
  2796. return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
  2797. }
  2798. static ssize_t
  2799. trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
  2800. loff_t *ppos)
  2801. {
  2802. long index = (long)filp->private_data;
  2803. char buf[64];
  2804. unsigned long val;
  2805. int ret;
  2806. if (cnt >= sizeof(buf))
  2807. return -EINVAL;
  2808. if (copy_from_user(&buf, ubuf, cnt))
  2809. return -EFAULT;
  2810. buf[cnt] = 0;
  2811. ret = strict_strtoul(buf, 10, &val);
  2812. if (ret < 0)
  2813. return ret;
  2814. switch (val) {
  2815. case 0:
  2816. trace_flags &= ~(1 << index);
  2817. break;
  2818. case 1:
  2819. trace_flags |= 1 << index;
  2820. break;
  2821. default:
  2822. return -EINVAL;
  2823. }
  2824. *ppos += cnt;
  2825. return cnt;
  2826. }
  2827. static const struct file_operations trace_options_core_fops = {
  2828. .open = tracing_open_generic,
  2829. .read = trace_options_core_read,
  2830. .write = trace_options_core_write,
  2831. };
  2832. static struct dentry *trace_options_init_dentry(void)
  2833. {
  2834. struct dentry *d_tracer;
  2835. static struct dentry *t_options;
  2836. if (t_options)
  2837. return t_options;
  2838. d_tracer = tracing_init_dentry();
  2839. if (!d_tracer)
  2840. return NULL;
  2841. t_options = debugfs_create_dir("options", d_tracer);
  2842. if (!t_options) {
  2843. pr_warning("Could not create debugfs directory 'options'\n");
  2844. return NULL;
  2845. }
  2846. return t_options;
  2847. }
  2848. static void
  2849. create_trace_option_file(struct trace_option_dentry *topt,
  2850. struct tracer_flags *flags,
  2851. struct tracer_opt *opt)
  2852. {
  2853. struct dentry *t_options;
  2854. struct dentry *entry;
  2855. t_options = trace_options_init_dentry();
  2856. if (!t_options)
  2857. return;
  2858. topt->flags = flags;
  2859. topt->opt = opt;
  2860. entry = debugfs_create_file(opt->name, 0644, t_options, topt,
  2861. &trace_options_fops);
  2862. topt->entry = entry;
  2863. }
  2864. static struct trace_option_dentry *
  2865. create_trace_option_files(struct tracer *tracer)
  2866. {
  2867. struct trace_option_dentry *topts;
  2868. struct tracer_flags *flags;
  2869. struct tracer_opt *opts;
  2870. int cnt;
  2871. if (!tracer)
  2872. return NULL;
  2873. flags = tracer->flags;
  2874. if (!flags || !flags->opts)
  2875. return NULL;
  2876. opts = flags->opts;
  2877. for (cnt = 0; opts[cnt].name; cnt++)
  2878. ;
  2879. topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL);
  2880. if (!topts)
  2881. return NULL;
  2882. for (cnt = 0; opts[cnt].name; cnt++)
  2883. create_trace_option_file(&topts[cnt], flags,
  2884. &opts[cnt]);
  2885. return topts;
  2886. }
  2887. static void
  2888. destroy_trace_option_files(struct trace_option_dentry *topts)
  2889. {
  2890. int cnt;
  2891. if (!topts)
  2892. return;
  2893. for (cnt = 0; topts[cnt].opt; cnt++) {
  2894. if (topts[cnt].entry)
  2895. debugfs_remove(topts[cnt].entry);
  2896. }
  2897. kfree(topts);
  2898. }
  2899. static struct dentry *
  2900. create_trace_option_core_file(const char *option, long index)
  2901. {
  2902. struct dentry *t_options;
  2903. struct dentry *entry;
  2904. t_options = trace_options_init_dentry();
  2905. if (!t_options)
  2906. return NULL;
  2907. entry = debugfs_create_file(option, 0644, t_options, (void *)index,
  2908. &trace_options_core_fops);
  2909. return entry;
  2910. }
  2911. static __init void create_trace_options_dir(void)
  2912. {
  2913. struct dentry *t_options;
  2914. struct dentry *entry;
  2915. int i;
  2916. t_options = trace_options_init_dentry();
  2917. if (!t_options)
  2918. return;
  2919. for (i = 0; trace_options[i]; i++) {
  2920. entry = create_trace_option_core_file(trace_options[i], i);
  2921. if (!entry)
  2922. pr_warning("Could not create debugfs %s entry\n",
  2923. trace_options[i]);
  2924. }
  2925. }
  2926. static __init int tracer_init_debugfs(void)
  2927. {
  2928. struct dentry *d_tracer;
  2929. struct dentry *buffers;
  2930. struct dentry *entry;
  2931. int cpu;
  2932. d_tracer = tracing_init_dentry();
  2933. entry = debugfs_create_file("tracing_enabled", 0644, d_tracer,
  2934. &global_trace, &tracing_ctrl_fops);
  2935. if (!entry)
  2936. pr_warning("Could not create debugfs 'tracing_enabled' entry\n");
  2937. entry = debugfs_create_file("trace_options", 0644, d_tracer,
  2938. NULL, &tracing_iter_fops);
  2939. if (!entry)
  2940. pr_warning("Could not create debugfs 'trace_options' entry\n");
  2941. create_trace_options_dir();
  2942. entry = debugfs_create_file("tracing_cpumask", 0644, d_tracer,
  2943. NULL, &tracing_cpumask_fops);
  2944. if (!entry)
  2945. pr_warning("Could not create debugfs 'tracing_cpumask' entry\n");
  2946. entry = debugfs_create_file("trace", 0444, d_tracer,
  2947. (void *) TRACE_PIPE_ALL_CPU, &tracing_fops);
  2948. if (!entry)
  2949. pr_warning("Could not create debugfs 'trace' entry\n");
  2950. entry = debugfs_create_file("available_tracers", 0444, d_tracer,
  2951. &global_trace, &show_traces_fops);
  2952. if (!entry)
  2953. pr_warning("Could not create debugfs 'available_tracers' entry\n");
  2954. entry = debugfs_create_file("current_tracer", 0444, d_tracer,
  2955. &global_trace, &set_tracer_fops);
  2956. if (!entry)
  2957. pr_warning("Could not create debugfs 'current_tracer' entry\n");
  2958. entry = debugfs_create_file("tracing_max_latency", 0644, d_tracer,
  2959. &tracing_max_latency,
  2960. &tracing_max_lat_fops);
  2961. if (!entry)
  2962. pr_warning("Could not create debugfs "
  2963. "'tracing_max_latency' entry\n");
  2964. entry = debugfs_create_file("tracing_thresh", 0644, d_tracer,
  2965. &tracing_thresh, &tracing_max_lat_fops);
  2966. if (!entry)
  2967. pr_warning("Could not create debugfs "
  2968. "'tracing_thresh' entry\n");
  2969. entry = debugfs_create_file("README", 0644, d_tracer,
  2970. NULL, &tracing_readme_fops);
  2971. if (!entry)
  2972. pr_warning("Could not create debugfs 'README' entry\n");
  2973. entry = debugfs_create_file("trace_pipe", 0444, d_tracer,
  2974. (void *) TRACE_PIPE_ALL_CPU, &tracing_pipe_fops);
  2975. if (!entry)
  2976. pr_warning("Could not create debugfs "
  2977. "'trace_pipe' entry\n");
  2978. entry = debugfs_create_file("buffer_size_kb", 0644, d_tracer,
  2979. &global_trace, &tracing_entries_fops);
  2980. if (!entry)
  2981. pr_warning("Could not create debugfs "
  2982. "'buffer_size_kb' entry\n");
  2983. entry = debugfs_create_file("trace_marker", 0220, d_tracer,
  2984. NULL, &tracing_mark_fops);
  2985. if (!entry)
  2986. pr_warning("Could not create debugfs "
  2987. "'trace_marker' entry\n");
  2988. buffers = debugfs_create_dir("binary_buffers", d_tracer);
  2989. if (!buffers)
  2990. pr_warning("Could not create buffers directory\n");
  2991. else {
  2992. int cpu;
  2993. char buf[64];
  2994. for_each_tracing_cpu(cpu) {
  2995. sprintf(buf, "%d", cpu);
  2996. entry = debugfs_create_file(buf, 0444, buffers,
  2997. (void *)(long)cpu,
  2998. &tracing_buffers_fops);
  2999. if (!entry)
  3000. pr_warning("Could not create debugfs buffers "
  3001. "'%s' entry\n", buf);
  3002. }
  3003. }
  3004. #ifdef CONFIG_DYNAMIC_FTRACE
  3005. entry = debugfs_create_file("dyn_ftrace_total_info", 0444, d_tracer,
  3006. &ftrace_update_tot_cnt,
  3007. &tracing_dyn_info_fops);
  3008. if (!entry)
  3009. pr_warning("Could not create debugfs "
  3010. "'dyn_ftrace_total_info' entry\n");
  3011. #endif
  3012. #ifdef CONFIG_SYSPROF_TRACER
  3013. init_tracer_sysprof_debugfs(d_tracer);
  3014. #endif
  3015. for_each_tracing_cpu(cpu)
  3016. tracing_init_debugfs_percpu(cpu);
  3017. return 0;
  3018. }
  3019. int trace_vprintk(unsigned long ip, int depth, const char *fmt, va_list args)
  3020. {
  3021. static raw_spinlock_t trace_buf_lock = __RAW_SPIN_LOCK_UNLOCKED;
  3022. static char trace_buf[TRACE_BUF_SIZE];
  3023. struct ring_buffer_event *event;
  3024. struct trace_array *tr = &global_trace;
  3025. struct trace_array_cpu *data;
  3026. int cpu, len = 0, size, pc;
  3027. struct print_entry *entry;
  3028. unsigned long irq_flags;
  3029. if (tracing_disabled || tracing_selftest_running)
  3030. return 0;
  3031. pc = preempt_count();
  3032. preempt_disable_notrace();
  3033. cpu = raw_smp_processor_id();
  3034. data = tr->data[cpu];
  3035. if (unlikely(atomic_read(&data->disabled)))
  3036. goto out;
  3037. pause_graph_tracing();
  3038. raw_local_irq_save(irq_flags);
  3039. __raw_spin_lock(&trace_buf_lock);
  3040. len = vsnprintf(trace_buf, TRACE_BUF_SIZE, fmt, args);
  3041. len = min(len, TRACE_BUF_SIZE-1);
  3042. trace_buf[len] = 0;
  3043. size = sizeof(*entry) + len + 1;
  3044. event = trace_buffer_lock_reserve(tr, TRACE_PRINT, size, irq_flags, pc);
  3045. if (!event)
  3046. goto out_unlock;
  3047. entry = ring_buffer_event_data(event);
  3048. entry->ip = ip;
  3049. entry->depth = depth;
  3050. memcpy(&entry->buf, trace_buf, len);
  3051. entry->buf[len] = 0;
  3052. ring_buffer_unlock_commit(tr->buffer, event);
  3053. out_unlock:
  3054. __raw_spin_unlock(&trace_buf_lock);
  3055. raw_local_irq_restore(irq_flags);
  3056. unpause_graph_tracing();
  3057. out:
  3058. preempt_enable_notrace();
  3059. return len;
  3060. }
  3061. EXPORT_SYMBOL_GPL(trace_vprintk);
  3062. int __trace_printk(unsigned long ip, const char *fmt, ...)
  3063. {
  3064. int ret;
  3065. va_list ap;
  3066. if (!(trace_flags & TRACE_ITER_PRINTK))
  3067. return 0;
  3068. va_start(ap, fmt);
  3069. ret = trace_vprintk(ip, task_curr_ret_stack(current), fmt, ap);
  3070. va_end(ap);
  3071. return ret;
  3072. }
  3073. EXPORT_SYMBOL_GPL(__trace_printk);
  3074. int __ftrace_vprintk(unsigned long ip, const char *fmt, va_list ap)
  3075. {
  3076. if (!(trace_flags & TRACE_ITER_PRINTK))
  3077. return 0;
  3078. return trace_vprintk(ip, task_curr_ret_stack(current), fmt, ap);
  3079. }
  3080. EXPORT_SYMBOL_GPL(__ftrace_vprintk);
  3081. /**
  3082. * trace_vbprintk - write binary msg to tracing buffer
  3083. *
  3084. * Caller must insure @fmt are valid when msg is in tracing buffer.
  3085. */
  3086. int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
  3087. {
  3088. static DEFINE_SPINLOCK(trace_buf_lock);
  3089. static u32 trace_buf[TRACE_BUF_SIZE];
  3090. struct ring_buffer_event *event;
  3091. struct trace_array *tr = &global_trace;
  3092. struct trace_array_cpu *data;
  3093. struct bprintk_entry *entry;
  3094. unsigned long flags;
  3095. int resched;
  3096. int cpu, len = 0, size, pc;
  3097. if (tracing_disabled || !trace_bprintk_enable)
  3098. return 0;
  3099. pc = preempt_count();
  3100. resched = ftrace_preempt_disable();
  3101. cpu = raw_smp_processor_id();
  3102. data = tr->data[cpu];
  3103. if (unlikely(atomic_read(&data->disabled)))
  3104. goto out;
  3105. spin_lock_irqsave(&trace_buf_lock, flags);
  3106. len = vbin_printf(trace_buf, TRACE_BUF_SIZE, fmt, args);
  3107. if (len > TRACE_BUF_SIZE || len < 0)
  3108. goto out_unlock;
  3109. size = sizeof(*entry) + sizeof(u32) * len;
  3110. event = trace_buffer_lock_reserve(tr, TRACE_BPRINTK, size, flags, pc);
  3111. if (!event)
  3112. goto out_unlock;
  3113. entry = ring_buffer_event_data(event);
  3114. entry->ip = ip;
  3115. entry->fmt = fmt;
  3116. memcpy(entry->buf, trace_buf, sizeof(u32) * len);
  3117. ring_buffer_unlock_commit(tr->buffer, event);
  3118. out_unlock:
  3119. spin_unlock_irqrestore(&trace_buf_lock, flags);
  3120. out:
  3121. ftrace_preempt_enable(resched);
  3122. return len;
  3123. }
  3124. EXPORT_SYMBOL_GPL(trace_vbprintk);
  3125. int __trace_bprintk(unsigned long ip, const char *fmt, ...)
  3126. {
  3127. int ret;
  3128. va_list ap;
  3129. if (!fmt)
  3130. return 0;
  3131. va_start(ap, fmt);
  3132. ret = trace_vbprintk(ip, fmt, ap);
  3133. va_end(ap);
  3134. return ret;
  3135. }
  3136. EXPORT_SYMBOL_GPL(__trace_bprintk);
  3137. static int trace_panic_handler(struct notifier_block *this,
  3138. unsigned long event, void *unused)
  3139. {
  3140. if (ftrace_dump_on_oops)
  3141. ftrace_dump();
  3142. return NOTIFY_OK;
  3143. }
  3144. static struct notifier_block trace_panic_notifier = {
  3145. .notifier_call = trace_panic_handler,
  3146. .next = NULL,
  3147. .priority = 150 /* priority: INT_MAX >= x >= 0 */
  3148. };
  3149. static int trace_die_handler(struct notifier_block *self,
  3150. unsigned long val,
  3151. void *data)
  3152. {
  3153. switch (val) {
  3154. case DIE_OOPS:
  3155. if (ftrace_dump_on_oops)
  3156. ftrace_dump();
  3157. break;
  3158. default:
  3159. break;
  3160. }
  3161. return NOTIFY_OK;
  3162. }
  3163. static struct notifier_block trace_die_notifier = {
  3164. .notifier_call = trace_die_handler,
  3165. .priority = 200
  3166. };
  3167. /*
  3168. * printk is set to max of 1024, we really don't need it that big.
  3169. * Nothing should be printing 1000 characters anyway.
  3170. */
  3171. #define TRACE_MAX_PRINT 1000
  3172. /*
  3173. * Define here KERN_TRACE so that we have one place to modify
  3174. * it if we decide to change what log level the ftrace dump
  3175. * should be at.
  3176. */
  3177. #define KERN_TRACE KERN_EMERG
  3178. static void
  3179. trace_printk_seq(struct trace_seq *s)
  3180. {
  3181. /* Probably should print a warning here. */
  3182. if (s->len >= 1000)
  3183. s->len = 1000;
  3184. /* should be zero ended, but we are paranoid. */
  3185. s->buffer[s->len] = 0;
  3186. printk(KERN_TRACE "%s", s->buffer);
  3187. trace_seq_init(s);
  3188. }
  3189. void ftrace_dump(void)
  3190. {
  3191. static DEFINE_SPINLOCK(ftrace_dump_lock);
  3192. /* use static because iter can be a bit big for the stack */
  3193. static struct trace_iterator iter;
  3194. static int dump_ran;
  3195. unsigned long flags;
  3196. int cnt = 0, cpu;
  3197. /* only one dump */
  3198. spin_lock_irqsave(&ftrace_dump_lock, flags);
  3199. if (dump_ran)
  3200. goto out;
  3201. dump_ran = 1;
  3202. /* No turning back! */
  3203. tracing_off();
  3204. ftrace_kill();
  3205. for_each_tracing_cpu(cpu) {
  3206. atomic_inc(&global_trace.data[cpu]->disabled);
  3207. }
  3208. /* don't look at user memory in panic mode */
  3209. trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
  3210. printk(KERN_TRACE "Dumping ftrace buffer:\n");
  3211. /* Simulate the iterator */
  3212. iter.tr = &global_trace;
  3213. iter.trace = current_trace;
  3214. iter.cpu_file = TRACE_PIPE_ALL_CPU;
  3215. /*
  3216. * We need to stop all tracing on all CPUS to read the
  3217. * the next buffer. This is a bit expensive, but is
  3218. * not done often. We fill all what we can read,
  3219. * and then release the locks again.
  3220. */
  3221. while (!trace_empty(&iter)) {
  3222. if (!cnt)
  3223. printk(KERN_TRACE "---------------------------------\n");
  3224. cnt++;
  3225. /* reset all but tr, trace, and overruns */
  3226. memset(&iter.seq, 0,
  3227. sizeof(struct trace_iterator) -
  3228. offsetof(struct trace_iterator, seq));
  3229. iter.iter_flags |= TRACE_FILE_LAT_FMT;
  3230. iter.pos = -1;
  3231. if (find_next_entry_inc(&iter) != NULL) {
  3232. print_trace_line(&iter);
  3233. trace_consume(&iter);
  3234. }
  3235. trace_printk_seq(&iter.seq);
  3236. }
  3237. if (!cnt)
  3238. printk(KERN_TRACE " (ftrace buffer empty)\n");
  3239. else
  3240. printk(KERN_TRACE "---------------------------------\n");
  3241. out:
  3242. spin_unlock_irqrestore(&ftrace_dump_lock, flags);
  3243. }
  3244. __init static int tracer_alloc_buffers(void)
  3245. {
  3246. struct trace_array_cpu *data;
  3247. int i;
  3248. int ret = -ENOMEM;
  3249. if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL))
  3250. goto out;
  3251. if (!alloc_cpumask_var(&tracing_cpumask, GFP_KERNEL))
  3252. goto out_free_buffer_mask;
  3253. if (!alloc_cpumask_var(&tracing_reader_cpumask, GFP_KERNEL))
  3254. goto out_free_tracing_cpumask;
  3255. cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
  3256. cpumask_copy(tracing_cpumask, cpu_all_mask);
  3257. cpumask_clear(tracing_reader_cpumask);
  3258. /* TODO: make the number of buffers hot pluggable with CPUS */
  3259. global_trace.buffer = ring_buffer_alloc(trace_buf_size,
  3260. TRACE_BUFFER_FLAGS);
  3261. if (!global_trace.buffer) {
  3262. printk(KERN_ERR "tracer: failed to allocate ring buffer!\n");
  3263. WARN_ON(1);
  3264. goto out_free_cpumask;
  3265. }
  3266. global_trace.entries = ring_buffer_size(global_trace.buffer);
  3267. #ifdef CONFIG_TRACER_MAX_TRACE
  3268. max_tr.buffer = ring_buffer_alloc(trace_buf_size,
  3269. TRACE_BUFFER_FLAGS);
  3270. if (!max_tr.buffer) {
  3271. printk(KERN_ERR "tracer: failed to allocate max ring buffer!\n");
  3272. WARN_ON(1);
  3273. ring_buffer_free(global_trace.buffer);
  3274. goto out_free_cpumask;
  3275. }
  3276. max_tr.entries = ring_buffer_size(max_tr.buffer);
  3277. WARN_ON(max_tr.entries != global_trace.entries);
  3278. #endif
  3279. /* Allocate the first page for all buffers */
  3280. for_each_tracing_cpu(i) {
  3281. data = global_trace.data[i] = &per_cpu(global_trace_cpu, i);
  3282. max_tr.data[i] = &per_cpu(max_data, i);
  3283. }
  3284. trace_init_cmdlines();
  3285. register_tracer(&nop_trace);
  3286. current_trace = &nop_trace;
  3287. #ifdef CONFIG_BOOT_TRACER
  3288. register_tracer(&boot_tracer);
  3289. #endif
  3290. /* All seems OK, enable tracing */
  3291. tracing_disabled = 0;
  3292. atomic_notifier_chain_register(&panic_notifier_list,
  3293. &trace_panic_notifier);
  3294. register_die_notifier(&trace_die_notifier);
  3295. ret = 0;
  3296. out_free_cpumask:
  3297. free_cpumask_var(tracing_reader_cpumask);
  3298. out_free_tracing_cpumask:
  3299. free_cpumask_var(tracing_cpumask);
  3300. out_free_buffer_mask:
  3301. free_cpumask_var(tracing_buffer_mask);
  3302. out:
  3303. return ret;
  3304. }
  3305. __init static int clear_boot_tracer(void)
  3306. {
  3307. /*
  3308. * The default tracer at boot buffer is an init section.
  3309. * This function is called in lateinit. If we did not
  3310. * find the boot tracer, then clear it out, to prevent
  3311. * later registration from accessing the buffer that is
  3312. * about to be freed.
  3313. */
  3314. if (!default_bootup_tracer)
  3315. return 0;
  3316. printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n",
  3317. default_bootup_tracer);
  3318. default_bootup_tracer = NULL;
  3319. return 0;
  3320. }
  3321. early_initcall(tracer_alloc_buffers);
  3322. fs_initcall(tracer_init_debugfs);
  3323. late_initcall(clear_boot_tracer);